code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Poisson matrix factorization with Batch inference and Stochastic inference
CREATED: 2014-03-25 02:06:52 by <NAME> <<EMAIL>>
"""
import sys
import numpy as np
from scipy import special
from scipy import stats
from sklearn.metrics import mean_squared_error as mse
from sklearn.decomposition import NMF
from sklearn.base import BaseEstimator, TransformerMixin
class NetworkPoissonMF(BaseEstimator, TransformerMixin):
''' Poisson matrix factorization with batch inference '''
def __init__(self, n_components=100, max_iter=100, tol=0.0005,
smoothness=100, random_state=None, verbose=False,
initialize_smart=False,
**kwargs):
''' Poisson matrix factorization
Arguments
---------
n_components : int
Number of latent components
max_iter : int
Maximal number of iterations to perform
tol : float
The threshold on the increase of the objective to stop the
iteration
smoothness : int
Smoothness on the initialization variational parameters
random_state : int or RandomState
Pseudo random number generator used for sampling
verbose : bool
Whether to show progress during model fitting
**kwargs: dict
Model hyperparameters
'''
self.n_components = n_components
self.max_iter = max_iter
self.tol = tol
self.smoothness = smoothness
self.random_state = random_state
self.verbose = verbose
self.smart_init = initialize_smart
if type(self.random_state) is int:
np.random.seed(self.random_state)
elif self.random_state is not None:
np.random.setstate(self.random_state)
else:
np.random.seed(0)
self._parse_args(**kwargs)
def _parse_args(self, **kwargs):
self.a = float(kwargs.get('a', 0.5))
self.b = float(kwargs.get('b', 0.5))
self.c = float(kwargs.get('c', 1.))
self.d = float(kwargs.get('d', 50.))
def _nmf_initialize(self, A):
nmf = NMF(n_components=self.n_components)
z = nmf.fit_transform(A)
z[z==0]=1e-10
return z, np.log(z)
# return np.exp(z), z
def _init_weights(self, n_samples, A=None):
# variational parameters for theta
self.gamma_t = self.smoothness \
* np.random.gamma(self.smoothness, 1. / self.smoothness,
size=(n_samples, self.n_components))
self.rho_t = self.smoothness \
* np.random.gamma(self.smoothness, 1. / self.smoothness,
size=(n_samples, self.n_components))
if self.smart_init:
self.Et, self.Elogt = self._nmf_initialize(A)
else:
self.Et, self.Elogt = _compute_expectations(self.gamma_t, self.rho_t)
self.c = 1. / np.mean(self.Et)
def _init_non_identity_mat(self, N):
self.non_id_mat = 1 - np.identity(N)
def fit(self, A):
'''Fit the model to the data in X.
Parameters
----------
X : array-like, shape (n_samples, n_feats)
Training data.
Returns
-------
self: object
Returns the instance itself.
'''
n_samples, n_feats = A.shape
n_users = A.shape[0]
if self.smart_init:
self._init_weights(n_samples, A=A)
else:
self._init_weights(n_samples)
self._init_non_identity_mat(n_users)
self._update(A)
return self
def transform(self, X, attr=None):
'''Encode the data as a linear combination of the latent components.
Parameters
----------
X : array-like, shape (n_samples, n_feats)
attr: string
The name of attribute, default 'Eb'. Can be changed to Elogb to
obtain E_q[log beta] as transformed data.
Returns
-------
X_new : array-like, shape(n_samples, n_filters)
Transformed data, as specified by attr.
'''
if not hasattr(self, 'Eb'):
raise ValueError('There are no pre-trained components.')
n_samples, n_feats = X.shape
if n_feats != self.Eb.shape[1]:
raise ValueError('The dimension of the transformed data '
'does not match with the existing components.')
if attr is None:
attr = 'Et'
self._init_weights(n_samples)
self._update(X, update_beta=False)
return getattr(self, attr)
def _update(self, A):
# alternating between update latent components and weights
old_bd = -np.inf
for i in range(self.max_iter):
self._update_theta(A)
bound = self._bound(A)
if i>0:
improvement = (bound - old_bd) / abs(old_bd)
if self.verbose:
sys.stdout.write('\r\tAfter ITERATION: %d\tObjective: %.2f\t'
'Old objective: %.2f\t'
'Improvement: %.5f' % (i, bound, old_bd,
improvement))
sys.stdout.flush()
if improvement < self.tol:
break
old_bd = bound
if self.verbose:
sys.stdout.write('\n')
pass
def _update_theta(self, A):
ratio_adj = A / self._xexplog_adj()
adj_term = np.multiply(np.exp(self.Elogt), np.dot(
ratio_adj, np.exp(self.Elogt)))
self.gamma_t = self.a + adj_term
self.rho_t = np.dot(self.non_id_mat, self.Et)
self.rho_t += self.c
self.Et, self.Elogt = _compute_expectations(self.gamma_t, self.rho_t)
self.c = 1. / np.mean(self.Et)
def _xexplog_adj(self):
return np.dot(np.exp(self.Elogt), np.exp(self.Elogt.T))
def _bound(self, A):
bound = np.sum(np.multiply(A, np.log(self._xexplog_adj()) - self.Et.dot(self.Et.T)))
bound += _gamma_term(self.a, self.a * self.c,
self.gamma_t, self.rho_t,
self.Et, self.Elogt)
bound += self.n_components * A.shape[0] * self.a * np.log(self.c)
return bound
def _compute_expectations(alpha, beta):
'''
Given x ~ Gam(alpha, beta), compute E[x] and E[log x]
'''
return (alpha / beta , special.psi(alpha) - np.log(beta))
def _gamma_term(a, b, shape, rate, Ex, Elogx):
return np.sum(np.multiply((a - shape), Elogx) - np.multiply((b - rate), Ex) +
(special.gammaln(shape) - np.multiply(shape, np.log(rate))))
if __name__ == '__main__':
N = 1000
K = 20
M = 1000
Z = stats.gamma.rvs(0.5, scale=0.1, size=(N,K))
A = stats.poisson.rvs(Z.dot(Z.T))
A = np.triu(A)
non_id = 1 - np.identity(N)
A = A*non_id
pmf = NetworkPoissonMF(n_components=K, verbose=True, initialize_smart=True)
pmf.fit(A)
print("MSE Z:", mse(Z, pmf.Et)) | [
"sys.stdout.write",
"sklearn.decomposition.NMF",
"numpy.triu",
"numpy.random.seed",
"numpy.log",
"scipy.stats.gamma.rvs",
"numpy.multiply",
"numpy.identity",
"scipy.special.psi",
"numpy.random.gamma",
"numpy.mean",
"sys.stdout.flush",
"numpy.exp",
"scipy.special.gammaln",
"numpy.dot",
... | [((6876, 6920), 'scipy.stats.gamma.rvs', 'stats.gamma.rvs', (['(0.5)'], {'scale': '(0.1)', 'size': '(N, K)'}), '(0.5, scale=0.1, size=(N, K))\n', (6891, 6920), False, 'from scipy import stats\n'), ((6966, 6976), 'numpy.triu', 'np.triu', (['A'], {}), '(A)\n', (6973, 6976), True, 'import numpy as np\n'), ((2154, 2189), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'self.n_components'}), '(n_components=self.n_components)\n', (2157, 2189), False, 'from sklearn.decomposition import NMF\n'), ((5764, 5796), 'numpy.dot', 'np.dot', (['self.non_id_mat', 'self.Et'], {}), '(self.non_id_mat, self.Et)\n', (5770, 5796), True, 'import numpy as np\n'), ((6994, 7008), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (7005, 7008), True, 'import numpy as np\n'), ((7147, 7161), 'sklearn.metrics.mean_squared_error', 'mse', (['Z', 'pmf.Et'], {}), '(Z, pmf.Et)\n', (7150, 7161), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((1680, 1713), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (1694, 1713), True, 'import numpy as np\n'), ((2263, 2272), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (2269, 2272), True, 'import numpy as np\n'), ((2450, 2546), 'numpy.random.gamma', 'np.random.gamma', (['self.smoothness', '(1.0 / self.smoothness)'], {'size': '(n_samples, self.n_components)'}), '(self.smoothness, 1.0 / self.smoothness, size=(n_samples,\n self.n_components))\n', (2465, 2546), True, 'import numpy as np\n'), ((2625, 2721), 'numpy.random.gamma', 'np.random.gamma', (['self.smoothness', '(1.0 / self.smoothness)'], {'size': '(n_samples, self.n_components)'}), '(self.smoothness, 1.0 / self.smoothness, size=(n_samples,\n self.n_components))\n', (2640, 2721), True, 'import numpy as np\n'), ((2953, 2969), 'numpy.mean', 'np.mean', (['self.Et'], {}), '(self.Et)\n', (2960, 2969), True, 'import numpy as np\n'), ((3042, 3056), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (3053, 3056), True, 'import numpy as np\n'), ((5484, 5506), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (5500, 5506), False, 'import sys\n'), ((5629, 5647), 'numpy.exp', 'np.exp', (['self.Elogt'], {}), '(self.Elogt)\n', (5635, 5647), True, 'import numpy as np\n'), ((5926, 5942), 'numpy.mean', 'np.mean', (['self.Et'], {}), '(self.Et)\n', (5933, 5942), True, 'import numpy as np\n'), ((5994, 6012), 'numpy.exp', 'np.exp', (['self.Elogt'], {}), '(self.Elogt)\n', (6000, 6012), True, 'import numpy as np\n'), ((6014, 6034), 'numpy.exp', 'np.exp', (['self.Elogt.T'], {}), '(self.Elogt.T)\n', (6020, 6034), True, 'import numpy as np\n'), ((6374, 6388), 'numpy.log', 'np.log', (['self.c'], {}), '(self.c)\n', (6380, 6388), True, 'import numpy as np\n'), ((6557, 6575), 'scipy.special.psi', 'special.psi', (['alpha'], {}), '(alpha)\n', (6568, 6575), False, 'from scipy import special\n'), ((6578, 6590), 'numpy.log', 'np.log', (['beta'], {}), '(beta)\n', (6584, 6590), True, 'import numpy as np\n'), ((1770, 1807), 'numpy.random.setstate', 'np.random.setstate', (['self.random_state'], {}), '(self.random_state)\n', (1788, 1807), True, 'import numpy as np\n'), ((1834, 1851), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1848, 1851), True, 'import numpy as np\n'), ((5680, 5698), 'numpy.exp', 'np.exp', (['self.Elogt'], {}), '(self.Elogt)\n', (5686, 5698), True, 'import numpy as np\n'), ((6659, 6688), 'numpy.multiply', 'np.multiply', (['(a - shape)', 'Elogx'], {}), '(a - shape, Elogx)\n', (6670, 6688), True, 'import numpy as np\n'), ((6693, 6718), 'numpy.multiply', 'np.multiply', (['(b - rate)', 'Ex'], {}), '(b - rate, Ex)\n', (6704, 6718), True, 'import numpy as np\n'), ((6742, 6764), 'scipy.special.gammaln', 'special.gammaln', (['shape'], {}), '(shape)\n', (6757, 6764), False, 'from scipy import special\n'), ((5037, 5181), 'sys.stdout.write', 'sys.stdout.write', (["('\\r\\tAfter ITERATION: %d\\tObjective: %.2f\\tOld objective: %.2f\\tImprovement: %.5f'\n % (i, bound, old_bd, improvement))"], {}), "(\n '\\r\\tAfter ITERATION: %d\\tObjective: %.2f\\tOld objective: %.2f\\tImprovement: %.5f'\n % (i, bound, old_bd, improvement))\n", (5053, 5181), False, 'import sys\n'), ((5332, 5350), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5348, 5350), False, 'import sys\n'), ((6786, 6798), 'numpy.log', 'np.log', (['rate'], {}), '(rate)\n', (6792, 6798), True, 'import numpy as np\n')] |
import subprocess
import os
import numpy as np
import cv2
import torch
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
if subprocess.call(['make', '-C', BASE_DIR]) != 0: # return value
raise RuntimeError('Cannot compile pse: {}'.format(BASE_DIR))
def pse_warpper(kernals, min_area=5):
'''
reference https://github.com/liuheng92/tensorflow_PSENet/blob/feature_dev/pse
:param kernals:
:param min_area:
:return:
'''
from .pse import pse_cpp
kernal_num = len(kernals)
if not kernal_num:
return np.array([]), []
kernals = np.array(kernals)
label_num, label = cv2.connectedComponents(kernals[0].astype(np.uint8), connectivity=4)
label_values = []
for label_idx in range(1, label_num):
if np.sum(label == label_idx) < min_area:
label[label == label_idx] = 0
continue
label_values.append(label_idx)
pred = pse_cpp(label, kernals, c=kernal_num)
return np.array(pred), label_values
def decode(preds, scale,
threshold=0.7311 ,
# threshold=0.7
no_sigmode = False
):
"""
在输出上使用sigmoid 将值转换为置信度,并使用阈值来进行文字和背景的区分
:param preds: 网络输出
:param scale: 网络的scale
:param threshold: sigmoid的阈值
:return: 最后的输出图和文本框
"""
if not no_sigmode:
preds = torch.sigmoid(preds)
preds = preds.detach().cpu().numpy()
score = preds[-1].astype(np.float32)
preds = preds > threshold
# preds = preds * preds[-1] # 使用最大的kernel作为其他小图的mask,不使用的话效果更好
pred, label_values = pse_warpper(preds, 5)
bbox_list = []
rects = []
for label_value in label_values:
points = np.array(np.where(pred == label_value)).transpose((1, 0))[:, ::-1]
if points.shape[0] < 800 / (scale * scale):
continue
score_i = np.mean(score[pred == label_value])
if score_i < 0.93:
continue
rect = cv2.minAreaRect(points)
bbox = cv2.boxPoints(rect)
bbox_list.append([bbox[1], bbox[2], bbox[3], bbox[0]])
rects.append(rect)
return pred, np.array(bbox_list),rects
| [
"numpy.sum",
"os.path.realpath",
"torch.sigmoid",
"numpy.mean",
"numpy.array",
"subprocess.call",
"cv2.boxPoints",
"cv2.minAreaRect",
"numpy.where"
] | [((99, 125), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (115, 125), False, 'import os\n'), ((131, 172), 'subprocess.call', 'subprocess.call', (["['make', '-C', BASE_DIR]"], {}), "(['make', '-C', BASE_DIR])\n", (146, 172), False, 'import subprocess\n'), ((582, 599), 'numpy.array', 'np.array', (['kernals'], {}), '(kernals)\n', (590, 599), True, 'import numpy as np\n'), ((971, 985), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (979, 985), True, 'import numpy as np\n'), ((1334, 1354), 'torch.sigmoid', 'torch.sigmoid', (['preds'], {}), '(preds)\n', (1347, 1354), False, 'import torch\n'), ((1838, 1873), 'numpy.mean', 'np.mean', (['score[pred == label_value]'], {}), '(score[pred == label_value])\n', (1845, 1873), True, 'import numpy as np\n'), ((1938, 1961), 'cv2.minAreaRect', 'cv2.minAreaRect', (['points'], {}), '(points)\n', (1953, 1961), False, 'import cv2\n'), ((1978, 1997), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (1991, 1997), False, 'import cv2\n'), ((2107, 2126), 'numpy.array', 'np.array', (['bbox_list'], {}), '(bbox_list)\n', (2115, 2126), True, 'import numpy as np\n'), ((551, 563), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (559, 563), True, 'import numpy as np\n'), ((768, 794), 'numpy.sum', 'np.sum', (['(label == label_idx)'], {}), '(label == label_idx)\n', (774, 794), True, 'import numpy as np\n'), ((1687, 1716), 'numpy.where', 'np.where', (['(pred == label_value)'], {}), '(pred == label_value)\n', (1695, 1716), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
A = np.array([[2,1,1],
[1,1,0],
[1,0,-3]])
B = np.array([[2],
[2],
[1]])
x = np.linalg.solve(A,B)
print(x)
| [
"numpy.linalg.solve",
"numpy.array"
] | [((41, 85), 'numpy.array', 'np.array', (['[[2, 1, 1], [1, 1, 0], [1, 0, -3]]'], {}), '([[2, 1, 1], [1, 1, 0], [1, 0, -3]])\n', (49, 85), True, 'import numpy as np\n'), ((111, 136), 'numpy.array', 'np.array', (['[[2], [2], [1]]'], {}), '([[2], [2], [1]])\n', (119, 136), True, 'import numpy as np\n'), ((168, 189), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (183, 189), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import time
import os
import advancedcv.hand_tracking as htm
brush_thickness = 15
eraser_thickness = 100
folder_path = 'headers'
my_list = os.listdir(folder_path)
my_list.pop(0)
my_list.sort()
overlay_list = []
for img_path in my_list:
image = cv2.imread(f'{folder_path}/{img_path}')
overlay_list.append(image)
header = overlay_list[0]
draw_color = (255, 0, 255)
cap = cv2.VideoCapture(0)
cap.set(3, 960)
cap.set(4, 540)
img_canvas = np.zeros((540, 960, 3), np.uint8)
detector = htm.HandDetector(detection_confidence=0.85)
xp, yp = 0, 0
p_time = 0
while True:
# Import the image
success, img = cap.read()
img = cv2.flip(img, 1)
# Find hand landmarks
img = detector.find_hands(img, draw=False)
lm_list = detector.get_position(img, draw=False)
if len(lm_list) != 0:
# tip of index and middle fingers
x1, y1 = lm_list[8][1:]
x2, y2 = lm_list[12][1:]
# Check which fingers are up
fingers = detector.fingers_up()
# If selection mode - two finger are up
if fingers[1] and fingers[2]:
xp, yp = 0, 0
if y1 < 175:
if 200 < x1 < 250:
header = overlay_list[0]
draw_color = (255, 0, 255)
elif 450 < x1 < 500:
header = overlay_list[1]
draw_color = (255, 0, 0)
elif 600 < x1 < 650:
header = overlay_list[2]
draw_color = (0, 255, 0)
elif 800 < x1 < 850:
header = overlay_list[3]
draw_color = (0, 0, 0)
cv2.rectangle(img, (x1, y1 - 25), (x2, y2 + 25), draw_color, cv2.FILLED)
# If drawing mode - index finger is up
if fingers[1] and not fingers[2]:
cv2.circle(img, (x1, y1), 15, draw_color, cv2.FILLED)
if xp == 0 and yp == 0:
xp, yp = x1, y1
if draw_color == (0, 0, 0):
cv2.line(img, (xp, yp), (x1, y1), draw_color, eraser_thickness)
cv2.line(img_canvas, (xp, yp), (x1, y1), draw_color, eraser_thickness)
else:
cv2.line(img, (xp, yp), (x1, y1), draw_color, brush_thickness)
cv2.line(img_canvas, (xp, yp), (x1, y1), draw_color, brush_thickness)
xp, yp = x1, y1
c_time = time.time()
fps = 1/(c_time - p_time)
p_time = c_time
img_gray = cv2.cvtColor(img_canvas, cv2.COLOR_BGR2GRAY)
_, img_inverse = cv2.threshold(img_gray, 50, 255, cv2.THRESH_BINARY_INV)
img_inverse = cv2.cvtColor(img_inverse, cv2.COLOR_GRAY2BGR)
img = cv2.bitwise_and(img, img_inverse)
img = cv2.bitwise_or(img, img_canvas)
# Setting the header image
img[0:110, 0:912] = header
img = cv2.addWeighted(img, 0.8, img_canvas, 0.2, 0)
cv2.putText(img, f'FPS: {str(int(fps))}', (40, 50), cv2.FONT_HERSHEY_PLAIN, 5, (255, 0, 0), 3)
cv2.imshow('Image', img)
cv2.waitKey(1)
| [
"cv2.line",
"advancedcv.hand_tracking.HandDetector",
"cv2.circle",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.threshold",
"numpy.zeros",
"time.time",
"cv2.VideoCapture",
"cv2.imread",
"cv2.addWeighted",
"cv2.bitwise_or",
"cv2.rectangle",
"cv2.flip",
"cv2.imshow",
"os.list... | [((172, 195), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (182, 195), False, 'import os\n'), ((414, 433), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (430, 433), False, 'import cv2\n'), ((480, 513), 'numpy.zeros', 'np.zeros', (['(540, 960, 3)', 'np.uint8'], {}), '((540, 960, 3), np.uint8)\n', (488, 513), True, 'import numpy as np\n'), ((526, 569), 'advancedcv.hand_tracking.HandDetector', 'htm.HandDetector', ([], {'detection_confidence': '(0.85)'}), '(detection_confidence=0.85)\n', (542, 569), True, 'import advancedcv.hand_tracking as htm\n'), ((283, 322), 'cv2.imread', 'cv2.imread', (['f"""{folder_path}/{img_path}"""'], {}), "(f'{folder_path}/{img_path}')\n", (293, 322), False, 'import cv2\n'), ((672, 688), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (680, 688), False, 'import cv2\n'), ((2431, 2442), 'time.time', 'time.time', ([], {}), '()\n', (2440, 2442), False, 'import time\n'), ((2521, 2565), 'cv2.cvtColor', 'cv2.cvtColor', (['img_canvas', 'cv2.COLOR_BGR2GRAY'], {}), '(img_canvas, cv2.COLOR_BGR2GRAY)\n', (2533, 2565), False, 'import cv2\n'), ((2587, 2642), 'cv2.threshold', 'cv2.threshold', (['img_gray', '(50)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(img_gray, 50, 255, cv2.THRESH_BINARY_INV)\n', (2600, 2642), False, 'import cv2\n'), ((2661, 2706), 'cv2.cvtColor', 'cv2.cvtColor', (['img_inverse', 'cv2.COLOR_GRAY2BGR'], {}), '(img_inverse, cv2.COLOR_GRAY2BGR)\n', (2673, 2706), False, 'import cv2\n'), ((2717, 2750), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img_inverse'], {}), '(img, img_inverse)\n', (2732, 2750), False, 'import cv2\n'), ((2761, 2792), 'cv2.bitwise_or', 'cv2.bitwise_or', (['img', 'img_canvas'], {}), '(img, img_canvas)\n', (2775, 2792), False, 'import cv2\n'), ((2866, 2911), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(0.8)', 'img_canvas', '(0.2)', '(0)'], {}), '(img, 0.8, img_canvas, 0.2, 0)\n', (2881, 2911), False, 'import cv2\n'), ((3015, 3039), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (3025, 3039), False, 'import cv2\n'), ((3044, 3058), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3055, 3058), False, 'import cv2\n'), ((1687, 1759), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1 - 25)', '(x2, y2 + 25)', 'draw_color', 'cv2.FILLED'], {}), '(img, (x1, y1 - 25), (x2, y2 + 25), draw_color, cv2.FILLED)\n', (1700, 1759), False, 'import cv2\n'), ((1862, 1915), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', '(15)', 'draw_color', 'cv2.FILLED'], {}), '(img, (x1, y1), 15, draw_color, cv2.FILLED)\n', (1872, 1915), False, 'import cv2\n'), ((2042, 2105), 'cv2.line', 'cv2.line', (['img', '(xp, yp)', '(x1, y1)', 'draw_color', 'eraser_thickness'], {}), '(img, (xp, yp), (x1, y1), draw_color, eraser_thickness)\n', (2050, 2105), False, 'import cv2\n'), ((2122, 2192), 'cv2.line', 'cv2.line', (['img_canvas', '(xp, yp)', '(x1, y1)', 'draw_color', 'eraser_thickness'], {}), '(img_canvas, (xp, yp), (x1, y1), draw_color, eraser_thickness)\n', (2130, 2192), False, 'import cv2\n'), ((2227, 2289), 'cv2.line', 'cv2.line', (['img', '(xp, yp)', '(x1, y1)', 'draw_color', 'brush_thickness'], {}), '(img, (xp, yp), (x1, y1), draw_color, brush_thickness)\n', (2235, 2289), False, 'import cv2\n'), ((2306, 2375), 'cv2.line', 'cv2.line', (['img_canvas', '(xp, yp)', '(x1, y1)', 'draw_color', 'brush_thickness'], {}), '(img_canvas, (xp, yp), (x1, y1), draw_color, brush_thickness)\n', (2314, 2375), False, 'import cv2\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import cv2
import numpy as np
import sys
sys.path.append("..")
from opts_pose import opts
from detectors.detector_factory import detector_factory
class Clothes_detector():
def __init__(self,weights):
opt = opts().init()
opt.load_model = weights
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Detector = detector_factory[opt.task]
self.detector = Detector(opt)
self.opt = opt
def detect(self,img):
results = self.detector.run(img)['results']
det=[]
for bbox in results[1]:
if bbox[4] > self.opt.vis_thresh:
points=[]
for i in range(0,len(bbox)):
points.append(int(bbox[i]))
det.append(points)
return det
def get_points(self,img):
det = self.detect(img)
sk_out = []
if (len(det)>0):
for i in range(0,len(det)):
bbox = det[i]
##################### HEAD #############################
head = []
for j in range(5,15):
head.append(bbox[j])
################### SHIRT #############################
shirt = []
for j in range(15,31):
shirt.append(bbox[j])
################### PANTS #############################
pants = []
for j in range(27,39):
pants.append(bbox[j])
sk_out.append([head,shirt,pants])
return sk_out
def get_bbox(self,img):
det = self.detect(img)
bbox_out = []
if (len(det)>0):
for i in range(0,len(det)):
bbox = det[i]
##################### HEAD #############################
diff = int(np.abs(bbox[11]-bbox[13])*0.6)+ int(np.abs(bbox[12]-bbox[14])*0.3)
head = [bbox[11],min(bbox[12],bbox[14])-diff,bbox[13],min(bbox[12],bbox[14])+diff]
################### SHIRT #############################
xs,ys=[],[]
for j in range(15,31):
if j%2==0:
ys.append(bbox[j])
else:
xs.append(bbox[j])
reg=int(np.abs(bbox[15]-bbox[17])*0.2)
shirt = [np.min(xs)-reg,np.min(ys)-reg,np.max(xs)+reg,np.max(ys)]
################### PANTS #############################
xp,yp=[],[]
for j in range(27,39):
if j%2==0:
yp.append(bbox[j])
else:
xp.append(bbox[j])
pants = [np.min(xp)-reg*2,np.min(yp)-reg,np.max(xp)+reg*2,np.max(yp)]
bbox_out.append([head,shirt,pants])
return bbox_out
def main():
detector = Clothes_detector('../models/multi_pose_dla_3x.pth')
cam = cv2.VideoCapture(0)
while True:
_, img = cam.read()
cv2.imshow('input', img)
# det = detector.detect(img)
# if (len(det)>0):
# clothes_img = print_clothes(img,det[0])
# cv2.imshow('Clothes', clothes_img)
bboxes = detector.get_bbox(img)
clothes_img = img.copy()
if ( len(bboxes)>0):
for box in bboxes:
head, shirt, pants = box[0], box[1], box[2]
cv2.rectangle(clothes_img,(head[0],head[1]),(head[2],head[3]),(0,0,255),3)
cv2.rectangle(clothes_img,(shirt[0],shirt[1]),(shirt[2],shirt[3]),(0,255,0),3)
cv2.rectangle(clothes_img,(pants[0],pants[1]),(pants[2],pants[3]),(255,0,0),3)
cv2.imshow('Clothes', clothes_img)
skts = detector.get_points(img)
if ( len(skts)>0):
for skt in skts:
head, shirt, pants = skt[0], skt[1], skt[2]
#print("sizes: [{},{},{}]".format(len(head),len(shirt),len(pants)))
for i in range(0,len(head)//2):
cv2.circle(img,(head[2*i],head[2*i+1]), 7, (0,0,255), -1)
for i in range(0,len(shirt)//2):
cv2.circle(img,(shirt[2*i],shirt[2*i+1]), 7, (0,255,0), -1)
for i in range(0,len(pants)//2):
cv2.circle(img,(pants[2*i],pants[2*i+1]), 7, (255,0,0), -1)
cv2.imshow('Poses', img)
if cv2.waitKey(1) == 27:
return # esc to quit
if __name__ == '__main__':
main()
| [
"sys.path.append",
"cv2.circle",
"numpy.abs",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.rectangle",
"numpy.max",
"numpy.min",
"opts_pose.opts",
"cv2.imshow"
] | [((182, 203), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (197, 203), False, 'import sys\n'), ((3170, 3189), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (3186, 3189), False, 'import cv2\n'), ((3242, 3266), 'cv2.imshow', 'cv2.imshow', (['"""input"""', 'img'], {}), "('input', img)\n", (3252, 3266), False, 'import cv2\n'), ((3938, 3972), 'cv2.imshow', 'cv2.imshow', (['"""Clothes"""', 'clothes_img'], {}), "('Clothes', clothes_img)\n", (3948, 3972), False, 'import cv2\n'), ((4628, 4652), 'cv2.imshow', 'cv2.imshow', (['"""Poses"""', 'img'], {}), "('Poses', img)\n", (4638, 4652), False, 'import cv2\n'), ((4665, 4679), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4676, 4679), False, 'import cv2\n'), ((363, 369), 'opts_pose.opts', 'opts', ([], {}), '()\n', (367, 369), False, 'from opts_pose import opts\n'), ((3647, 3734), 'cv2.rectangle', 'cv2.rectangle', (['clothes_img', '(head[0], head[1])', '(head[2], head[3])', '(0, 0, 255)', '(3)'], {}), '(clothes_img, (head[0], head[1]), (head[2], head[3]), (0, 0, \n 255), 3)\n', (3660, 3734), False, 'import cv2\n'), ((3738, 3829), 'cv2.rectangle', 'cv2.rectangle', (['clothes_img', '(shirt[0], shirt[1])', '(shirt[2], shirt[3])', '(0, 255, 0)', '(3)'], {}), '(clothes_img, (shirt[0], shirt[1]), (shirt[2], shirt[3]), (0, \n 255, 0), 3)\n', (3751, 3829), False, 'import cv2\n'), ((3833, 3923), 'cv2.rectangle', 'cv2.rectangle', (['clothes_img', '(pants[0], pants[1])', '(pants[2], pants[3])', '(255, 0, 0)', '(3)'], {}), '(clothes_img, (pants[0], pants[1]), (pants[2], pants[3]), (255,\n 0, 0), 3)\n', (3846, 3923), False, 'import cv2\n'), ((2611, 2621), 'numpy.max', 'np.max', (['ys'], {}), '(ys)\n', (2617, 2621), True, 'import numpy as np\n'), ((2986, 2996), 'numpy.max', 'np.max', (['yp'], {}), '(yp)\n', (2992, 2996), True, 'import numpy as np\n'), ((4284, 4351), 'cv2.circle', 'cv2.circle', (['img', '(head[2 * i], head[2 * i + 1])', '(7)', '(0, 0, 255)', '(-1)'], {}), '(img, (head[2 * i], head[2 * i + 1]), 7, (0, 0, 255), -1)\n', (4294, 4351), False, 'import cv2\n'), ((4412, 4481), 'cv2.circle', 'cv2.circle', (['img', '(shirt[2 * i], shirt[2 * i + 1])', '(7)', '(0, 255, 0)', '(-1)'], {}), '(img, (shirt[2 * i], shirt[2 * i + 1]), 7, (0, 255, 0), -1)\n', (4422, 4481), False, 'import cv2\n'), ((4542, 4611), 'cv2.circle', 'cv2.circle', (['img', '(pants[2 * i], pants[2 * i + 1])', '(7)', '(255, 0, 0)', '(-1)'], {}), '(img, (pants[2 * i], pants[2 * i + 1]), 7, (255, 0, 0), -1)\n', (4552, 4611), False, 'import cv2\n'), ((2510, 2537), 'numpy.abs', 'np.abs', (['(bbox[15] - bbox[17])'], {}), '(bbox[15] - bbox[17])\n', (2516, 2537), True, 'import numpy as np\n'), ((2566, 2576), 'numpy.min', 'np.min', (['xs'], {}), '(xs)\n', (2572, 2576), True, 'import numpy as np\n'), ((2581, 2591), 'numpy.min', 'np.min', (['ys'], {}), '(ys)\n', (2587, 2591), True, 'import numpy as np\n'), ((2596, 2606), 'numpy.max', 'np.max', (['xs'], {}), '(xs)\n', (2602, 2606), True, 'import numpy as np\n'), ((2937, 2947), 'numpy.min', 'np.min', (['xp'], {}), '(xp)\n', (2943, 2947), True, 'import numpy as np\n'), ((2954, 2964), 'numpy.min', 'np.min', (['yp'], {}), '(yp)\n', (2960, 2964), True, 'import numpy as np\n'), ((2969, 2979), 'numpy.max', 'np.max', (['xp'], {}), '(xp)\n', (2975, 2979), True, 'import numpy as np\n'), ((2030, 2057), 'numpy.abs', 'np.abs', (['(bbox[11] - bbox[13])'], {}), '(bbox[11] - bbox[13])\n', (2036, 2057), True, 'import numpy as np\n'), ((2066, 2093), 'numpy.abs', 'np.abs', (['(bbox[12] - bbox[14])'], {}), '(bbox[12] - bbox[14])\n', (2072, 2093), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.stats
def density(x, desired_length=100, bandwith=1, show=False):
"""Density estimation.
Computes kernel density estimates.
Parameters
-----------
x : Union[list, np.array, pd.Series]
A vector of values.
desired_length : int
The amount of values in the returned density estimation.
bandwith : float
The bandwith of the kernel. The smaller the values, the smoother the estimation.
show : bool
Display the density plot.
Returns
-------
x, y
The x axis of the density estimation.
y
The y axis of the density estimation.
Examples
--------
>>> import neurokit2 as nk
>>>
>>> signal = nk.ecg_simulate(duration=20)
>>> x, y = nk.density(signal, bandwith=0.5, show=True)
>>>
>>> # Bandwidth comparison
>>> x, y1 = nk.density(signal, bandwith=0.5)
>>> x, y2 = nk.density(signal, bandwith=1)
>>> x, y3 = nk.density(signal, bandwith=2)
>>> pd.DataFrame({"x": x, "y1": y1, "y2": y2, "y3": y3}).plot(x="x") #doctest: +SKIP
"""
density_function = scipy.stats.gaussian_kde(x, bw_method="scott")
density_function.set_bandwidth(bw_method=density_function.factor / bandwith)
x = np.linspace(np.min(x), np.max(x), num=desired_length)
y = density_function(x)
if show is True:
pd.DataFrame({"x": x, "y": y}).plot(x="x")
return x, y
| [
"pandas.DataFrame",
"numpy.min",
"numpy.max"
] | [((1317, 1326), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1323, 1326), True, 'import numpy as np\n'), ((1328, 1337), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1334, 1337), True, 'import numpy as np\n'), ((1417, 1447), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (1429, 1447), True, 'import pandas as pd\n')] |
import numpy as np
from mlutils.core.event.handler import EventHandler
from mlutils.core.engine.callback.metrics import MetricsList, Loss
class Monitor(EventHandler):
def __init__(self, additional_metrics=[]):
self.metrics = MetricsList([Loss()] + additional_metrics)
def _on_batch_end(self, state):
self.metrics.update_batch_data(state)
def _on_epoch_start(self, state):
self.metrics.reset_batch_data(state)
for metric in self.metrics:
key = f"{state.phase}_{metric.name}"
if key not in state.epoch_results:
state.epoch_results.update(**{key: {}})
if key not in state.best_results:
best = -np.float('inf') if metric.greater_is_better else np.float('inf')
best_dict = {'best': best, 'best_epoch': 0, 'prev_best': best, 'prev_best_epoch': 0}
state.best_results.update(**{key: best_dict})
def _on_epoch_end(self, state):
self.metrics.update(state)
for metric in self.metrics:
key = f"{state.phase}_{metric.name}"
state.epoch_results.update(**self.metrics.get_data(state.phase))
best_value = state.best_results[key]['best']
current_value = metric.get_value(state.phase)
if metric.op(current_value, best_value):
state.best_results[key]['prev_best'] = state.best_results[key]['best']
state.best_results[key]['prev_best_epoch'] = state.best_results[key]['best_epoch']
state.best_results[key]['best'] = current_value
state.best_results[key]['best_epoch'] = state.epoch
def on_training_batch_end(self, state):
self._on_batch_end(state)
def on_validation_batch_end(self, state):
self._on_batch_end(state)
def on_test_batch_end(self, state):
self._on_batch_end(state)
def on_training_epoch_start(self, state):
self._on_epoch_start(state)
def on_validation_epoch_start(self, state):
self._on_epoch_start(state)
def on_test_epoch_start(self, state):
self._on_epoch_start(state)
def on_training_epoch_end(self, state):
self._on_epoch_end(state)
def on_validation_epoch_end(self, state):
self._on_epoch_end(state)
def on_test_epoch_end(self, state):
self._on_epoch_end(state)
| [
"mlutils.core.engine.callback.metrics.Loss",
"numpy.float"
] | [((253, 259), 'mlutils.core.engine.callback.metrics.Loss', 'Loss', ([], {}), '()\n', (257, 259), False, 'from mlutils.core.engine.callback.metrics import MetricsList, Loss\n'), ((760, 775), 'numpy.float', 'np.float', (['"""inf"""'], {}), "('inf')\n", (768, 775), True, 'import numpy as np\n'), ((711, 726), 'numpy.float', 'np.float', (['"""inf"""'], {}), "('inf')\n", (719, 726), True, 'import numpy as np\n')] |
import re
import cv2
import glob
import argparse
import numpy as np
from colour import Color
def parse_args():
arg_parser = argparse.ArgumentParser(
description='Plot exploration_100 experiment result as heatmap')
arg_parser.add_argument('--maze-size', type=str, default='10x10',
help='Size of maze, default 10x10')
arg_parser.add_argument('--steps', type=int, default=5,
help='Number of steps at the beginning to record')
arg_parser.add_argument('--fig', type=str, default='vis_exploration.png',
help='Filename for saved plot figure')
arg_parser.add_argument(
'--episode-folder-pattern', '-p', type=str, default='',
help='Filename pattern for episode log folders')
args = arg_parser.parse_args()
return args
def collect_agent_pos(log_file_pattern, steps=None):
log_files = glob.glob(log_file_pattern)
pos_dict = {}
total_pos = 0
for log in log_files:
collect_steps = 0
with open(log, 'r') as f:
for l in f.readlines():
if l.startswith('agent:'):
if steps is not None and collect_steps >= steps:
break
pos = re.search('\[.*\]', l)
pos_str = pos.group(0).replace(', ', '_')[1:-1]
if pos_str in pos_dict.keys():
pos_dict[pos_str] += 1
else:
pos_dict[pos_str] = 1
collect_steps += 1
total_pos += 1
return pos_dict, total_pos
def plot_exploration_heatmap(pos_dict, total_pos, log_file_pattern,
maze_size, fig):
maze_size = [float(i) for i in maze_size.split('x')]
example_log = glob.glob(log_file_pattern)[0]
init_ob = cv2.imread(example_log.replace('log.txt', 'init_ob.png'))
init_ob = cv2.resize(init_ob, (80, 80)) # for better offset
dark_red = Color("#550000")
colors = list(dark_red.range_to(Color("#ffaaaa"), int(total_pos)))
colors.reverse()
h, w, _ = init_ob.shape
for pos_str, count in pos_dict.items():
pos = [int(i) for i in pos_str.split('_')]
y = int(pos[0] * h / maze_size[0])
x = int(pos[1] * w / maze_size[1])
y_ = int((pos[0] + 1) * h / maze_size[0])
x_ = int((pos[1] + 1) * w / maze_size[1])
color = np.array(colors[count].rgb[::-1]) * 255
init_ob[y:y_, x:x_] = np.asarray(color, dtype=np.uint8)
cv2.imwrite(fig, init_ob)
print('Saved {}'.format(fig))
def main():
args = parse_args()
agent_pos_statistic = collect_agent_pos(args.episode_folder_pattern,
steps=args.steps)
print(agent_pos_statistic)
plot_exploration_heatmap(*agent_pos_statistic, args.episode_folder_pattern,
args.maze_size, args.fig)
if __name__ == '__main__':
main()
| [
"colour.Color",
"argparse.ArgumentParser",
"cv2.imwrite",
"numpy.asarray",
"numpy.array",
"glob.glob",
"re.search",
"cv2.resize"
] | [((130, 223), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot exploration_100 experiment result as heatmap"""'}), "(description=\n 'Plot exploration_100 experiment result as heatmap')\n", (153, 223), False, 'import argparse\n'), ((918, 945), 'glob.glob', 'glob.glob', (['log_file_pattern'], {}), '(log_file_pattern)\n', (927, 945), False, 'import glob\n'), ((1949, 1978), 'cv2.resize', 'cv2.resize', (['init_ob', '(80, 80)'], {}), '(init_ob, (80, 80))\n', (1959, 1978), False, 'import cv2\n'), ((2015, 2031), 'colour.Color', 'Color', (['"""#550000"""'], {}), "('#550000')\n", (2020, 2031), False, 'from colour import Color\n'), ((2558, 2583), 'cv2.imwrite', 'cv2.imwrite', (['fig', 'init_ob'], {}), '(fig, init_ob)\n', (2569, 2583), False, 'import cv2\n'), ((1832, 1859), 'glob.glob', 'glob.glob', (['log_file_pattern'], {}), '(log_file_pattern)\n', (1841, 1859), False, 'import glob\n'), ((2519, 2552), 'numpy.asarray', 'np.asarray', (['color'], {'dtype': 'np.uint8'}), '(color, dtype=np.uint8)\n', (2529, 2552), True, 'import numpy as np\n'), ((2068, 2084), 'colour.Color', 'Color', (['"""#ffaaaa"""'], {}), "('#ffaaaa')\n", (2073, 2084), False, 'from colour import Color\n'), ((2449, 2482), 'numpy.array', 'np.array', (['colors[count].rgb[::-1]'], {}), '(colors[count].rgb[::-1])\n', (2457, 2482), True, 'import numpy as np\n'), ((1272, 1296), 're.search', 're.search', (['"""\\\\[.*\\\\]"""', 'l'], {}), "('\\\\[.*\\\\]', l)\n", (1281, 1296), False, 'import re\n')] |
from processing import *
from analysis import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#load the structure with the statistics
D=load_obj("dict")
#store the dictionary of each stat by day
days=[]
stats=[]
for key in sorted(D):
days.append(key)
stats.append(D[key])
print(D[key])
#obtain each data from day by day
# print(stats[0].keys())
#-------------- General Analysis ---------------
#overall sentiment over the days:
ov_sent=[a['os'] for a in stats]
#number tweets
no_tws=[a['no_tweets'] for a in stats]
#brexit
no_brexit=[a['n Brexit'] for a in stats]
sent_brexit=[a['ms Brexit'] for a in stats]
#-------------- PARTY ANALYSIS -----------------
#labour
no_labour=[a['n Labour party'] for a in stats]
sent_labour=[a['ms Labour party'] for a in stats]
#conservatives
no_conservat=[a['n Conservative party'] for a in stats]
sent_conservat=[a['ms Conservative party'] for a in stats]
#brexit party
no_brexitpart=[a['n Brexit party'] for a in stats]
sent_brexitpart=[a['ms Brexit party'] for a in stats]
#libdems involves combining the two keys
no_l1=np.array([a['n Lib dems'] for a in stats])
no_l2=np.array([a['n Liberal Democrats'] for a in stats])
no_libdem=list(no_l1+no_l2)
sent_l1=np.array([a['ms Lib dems'] for a in stats])
sent_l2=np.array([a['ms Liberal Democrats'] for a in stats])
sent_libdem=list((sent_l1*no_l1+sent_l2*no_l2)/(no_l1+no_l2))
#-------------------- Leader Analysis ------------------
#farage
no_farage=[a['n <NAME>'] for a in stats]
sent_farage=[a['ms <NAME>'] for a in stats]
print(sent_farage)
#johnson
no_johnson=[a['n <NAME>'] for a in stats]
sent_johnson=[a['ms <NAME>'] for a in stats]
#Swinson
no_swinson=[a['n <NAME>'] for a in stats]
sent_swinson=[a['ms <NAME>'] for a in stats]
#Corbyn --- note this was subsampled due to spelling error
no_corbyn= [a['n <NAME>'] for a in stats]
sent_corbyn=[a['ms <NAME>'] for a in stats]
plot_overall=True
if plot_overall:
plt.figure()
plt.plot(days,ov_sent,marker='x',linestyle="--",color='g'
,markeredgecolor='r',alpha=0.7,label="Overall Sentiment")
plt.grid('on')
ax = plt.gca()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d/%m/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
ax.set_facecolor('#D9E6E8')
plt.legend()
plt.ylabel(r"Mean Sentiment")
plt.tight_layout()
plt.show()
plot_numbers=True
if plot_numbers:
plt.plot(days,no_tws,marker='x',linestyle="--",color='g'
,markeredgecolor='r',alpha=0.7)
plt.grid('on')
ax = plt.gca()
ax.set_facecolor('#D9E6E8')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d/%m/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
plt.ylabel(r"# of Tweets")
plt.tight_layout()
plt.show()
plot_psents=True
if plot_psents:
# plt.figure(figsize=(7,7))
plt.plot(days,sent_conservat,marker='x',linestyle="--"
,alpha=0.7,label="Conservative")
plt.plot(days,sent_labour,marker='x',linestyle="--"
,alpha=0.7,label="Labour")
plt.plot(days,sent_libdem,marker='x',linestyle="--"
,alpha=0.7,label="Liberal Democrat")
plt.plot(days,sent_brexitpart,marker='x',linestyle="--"
,alpha=0.7,label="Brexit Party")
plt.grid('on')
ax = plt.gca()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d/%m/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
ax.set_facecolor('#D9E6E8')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.ylabel(r"Mean Sentiment")
plt.tight_layout()
plt.show()
plot_partynos=True
if plot_partynos:
plt.plot(days,no_conservat,marker='x',linestyle="--",
alpha=0.7,label="Conservative")
plt.plot(days,no_labour,marker='x',linestyle="--",
alpha=0.7,label="Labour")
plt.plot(days,no_brexitpart,marker='x',linestyle="--",
alpha=0.7,label="Brexit Party")
plt.plot(days,no_libdem,marker='x',linestyle="--",
alpha=0.7,label="Liberal Democrats")
plt.grid('on')
ax = plt.gca()
plt.legend()
ax.set_facecolor('#D9E6E8')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d/%m/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
plt.ylabel(r"# of Tweets")
plt.tight_layout()
plt.show()
#an idea:
#https://www.bbc.com/news/uk-politics-50572454
plot_nleaders=True
if plot_nleaders:
plt.plot(days,no_johnson,marker='x',linestyle="--",
alpha=0.7,label="<NAME>")
plt.plot(days,no_corbyn,marker='x',linestyle="--",
alpha=0.7,label="<NAME> -CORRUPTED")
plt.plot(days,no_farage,marker='x',linestyle="--",
alpha=0.7,label="<NAME>")
plt.plot(days,no_swinson,marker='x',linestyle="--",
alpha=0.7,label="<NAME>")
plt.grid('on')
ax = plt.gca()
plt.legend()
ax.set_facecolor('#D9E6E8')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d/%m/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
plt.ylabel(r"# of Tweets")
plt.tight_layout()
plt.show()
plot_leadsents=True
if plot_leadsents:
# plt.figure(figsize=(8,10))
plt.plot(days,sent_johnson,marker='x',linestyle="--"
,alpha=0.7,label="<NAME>")
plt.plot(days,sent_corbyn,marker='x',linestyle="--"
,alpha=0.7,label="<NAME> -CORRUPTED")
plt.plot(days,sent_swinson,marker='x',linestyle="--"
,alpha=0.7,label="<NAME>")
plt.plot(days,sent_farage,marker='x',linestyle="--"
,alpha=0.7,label="<NAME>")
plt.grid('on')
ax = plt.gca()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d/%m/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
ax.set_facecolor('#D9E6E8')
# plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.legend()
# plt.ylim(-0.7,0.7)
plt.ylabel(r"Mean Sentiment")
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"matplotlib.dates.DayLocator",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.dates.DateFormatter",
"numpy.array",
"matplotlib.pyplot.ticklabel_format",
"matplotlib.pyplot.gca",
"matplotlib... | [((1118, 1160), 'numpy.array', 'np.array', (["[a['n Lib dems'] for a in stats]"], {}), "([a['n Lib dems'] for a in stats])\n", (1126, 1160), True, 'import numpy as np\n'), ((1167, 1218), 'numpy.array', 'np.array', (["[a['n Liberal Democrats'] for a in stats]"], {}), "([a['n Liberal Democrats'] for a in stats])\n", (1175, 1218), True, 'import numpy as np\n'), ((1256, 1299), 'numpy.array', 'np.array', (["[a['ms Lib dems'] for a in stats]"], {}), "([a['ms Lib dems'] for a in stats])\n", (1264, 1299), True, 'import numpy as np\n'), ((1308, 1360), 'numpy.array', 'np.array', (["[a['ms Liberal Democrats'] for a in stats]"], {}), "([a['ms Liberal Democrats'] for a in stats])\n", (1316, 1360), True, 'import numpy as np\n'), ((1972, 1984), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1982, 1984), True, 'import matplotlib.pyplot as plt\n'), ((1986, 2111), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'ov_sent'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'color': '"""g"""', 'markeredgecolor': '"""r"""', 'alpha': '(0.7)', 'label': '"""Overall Sentiment"""'}), "(days, ov_sent, marker='x', linestyle='--', color='g',\n markeredgecolor='r', alpha=0.7, label='Overall Sentiment')\n", (1994, 2111), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2119), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (2113, 2119), True, 'import matplotlib.pyplot as plt\n'), ((2126, 2135), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2133, 2135), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2305), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2303, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2335), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Sentiment"""'], {}), "('Mean Sentiment')\n", (2317, 2335), True, 'import matplotlib.pyplot as plt\n'), ((2338, 2356), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2354, 2356), True, 'import matplotlib.pyplot as plt\n'), ((2358, 2368), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2366, 2368), True, 'import matplotlib.pyplot as plt\n'), ((2406, 2503), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'no_tws'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'color': '"""g"""', 'markeredgecolor': '"""r"""', 'alpha': '(0.7)'}), "(days, no_tws, marker='x', linestyle='--', color='g',\n markeredgecolor='r', alpha=0.7)\n", (2414, 2503), True, 'import matplotlib.pyplot as plt\n'), ((2498, 2512), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (2506, 2512), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2528), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2526, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2559, 2620), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""y"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='y', scilimits=(0, 0))\n", (2579, 2620), True, 'import matplotlib.pyplot as plt\n'), ((2748, 2773), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# of Tweets"""'], {}), "('# of Tweets')\n", (2758, 2773), True, 'import matplotlib.pyplot as plt\n'), ((2776, 2794), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2792, 2794), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2806), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2804, 2806), True, 'import matplotlib.pyplot as plt\n'), ((2872, 2968), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'sent_conservat'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""Conservative"""'}), "(days, sent_conservat, marker='x', linestyle='--', alpha=0.7, label\n ='Conservative')\n", (2880, 2968), True, 'import matplotlib.pyplot as plt\n'), ((2963, 3050), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'sent_labour'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""Labour"""'}), "(days, sent_labour, marker='x', linestyle='--', alpha=0.7, label=\n 'Labour')\n", (2971, 3050), True, 'import matplotlib.pyplot as plt\n'), ((3045, 3142), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'sent_libdem'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""Liberal Democrat"""'}), "(days, sent_libdem, marker='x', linestyle='--', alpha=0.7, label=\n 'Liberal Democrat')\n", (3053, 3142), True, 'import matplotlib.pyplot as plt\n'), ((3137, 3233), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'sent_brexitpart'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""Brexit Party"""'}), "(days, sent_brexitpart, marker='x', linestyle='--', alpha=0.7,\n label='Brexit Party')\n", (3145, 3233), True, 'import matplotlib.pyplot as plt\n'), ((3229, 3243), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (3237, 3243), True, 'import matplotlib.pyplot as plt\n'), ((3250, 3259), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3257, 3259), True, 'import matplotlib.pyplot as plt\n'), ((3417, 3490), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '"""upper left"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.0)\n", (3427, 3490), True, 'import matplotlib.pyplot as plt\n'), ((3491, 3519), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Sentiment"""'], {}), "('Mean Sentiment')\n", (3501, 3519), True, 'import matplotlib.pyplot as plt\n'), ((3522, 3540), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3538, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3550, 3552), True, 'import matplotlib.pyplot as plt\n'), ((3592, 3686), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'no_conservat'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""Conservative"""'}), "(days, no_conservat, marker='x', linestyle='--', alpha=0.7, label=\n 'Conservative')\n", (3600, 3686), True, 'import matplotlib.pyplot as plt\n'), ((3681, 3766), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'no_labour'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""Labour"""'}), "(days, no_labour, marker='x', linestyle='--', alpha=0.7, label='Labour'\n )\n", (3689, 3766), True, 'import matplotlib.pyplot as plt\n'), ((3761, 3856), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'no_brexitpart'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""Brexit Party"""'}), "(days, no_brexitpart, marker='x', linestyle='--', alpha=0.7, label=\n 'Brexit Party')\n", (3769, 3856), True, 'import matplotlib.pyplot as plt\n'), ((3851, 3947), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'no_libdem'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""Liberal Democrats"""'}), "(days, no_libdem, marker='x', linestyle='--', alpha=0.7, label=\n 'Liberal Democrats')\n", (3859, 3947), True, 'import matplotlib.pyplot as plt\n'), ((3942, 3956), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (3950, 3956), True, 'import matplotlib.pyplot as plt\n'), ((3963, 3972), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3970, 3972), True, 'import matplotlib.pyplot as plt\n'), ((3974, 3986), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3984, 3986), True, 'import matplotlib.pyplot as plt\n'), ((4017, 4078), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""y"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='y', scilimits=(0, 0))\n", (4037, 4078), True, 'import matplotlib.pyplot as plt\n'), ((4206, 4231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# of Tweets"""'], {}), "('# of Tweets')\n", (4216, 4231), True, 'import matplotlib.pyplot as plt\n'), ((4234, 4252), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4250, 4252), True, 'import matplotlib.pyplot as plt\n'), ((4254, 4264), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4262, 4264), True, 'import matplotlib.pyplot as plt\n'), ((4364, 4450), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'no_johnson'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""<NAME>"""'}), "(days, no_johnson, marker='x', linestyle='--', alpha=0.7, label=\n '<NAME>')\n", (4372, 4450), True, 'import matplotlib.pyplot as plt\n'), ((4445, 4541), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'no_corbyn'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""<NAME> -CORRUPTED"""'}), "(days, no_corbyn, marker='x', linestyle='--', alpha=0.7, label=\n '<NAME> -CORRUPTED')\n", (4453, 4541), True, 'import matplotlib.pyplot as plt\n'), ((4536, 4621), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'no_farage'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""<NAME>"""'}), "(days, no_farage, marker='x', linestyle='--', alpha=0.7, label='<NAME>'\n )\n", (4544, 4621), True, 'import matplotlib.pyplot as plt\n'), ((4616, 4702), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'no_swinson'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""<NAME>"""'}), "(days, no_swinson, marker='x', linestyle='--', alpha=0.7, label=\n '<NAME>')\n", (4624, 4702), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4711), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (4705, 4711), True, 'import matplotlib.pyplot as plt\n'), ((4718, 4727), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4725, 4727), True, 'import matplotlib.pyplot as plt\n'), ((4729, 4741), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4739, 4741), True, 'import matplotlib.pyplot as plt\n'), ((4772, 4833), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""y"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='y', scilimits=(0, 0))\n", (4792, 4833), True, 'import matplotlib.pyplot as plt\n'), ((4961, 4986), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# of Tweets"""'], {}), "('# of Tweets')\n", (4971, 4986), True, 'import matplotlib.pyplot as plt\n'), ((4989, 5007), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5005, 5007), True, 'import matplotlib.pyplot as plt\n'), ((5009, 5019), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5017, 5019), True, 'import matplotlib.pyplot as plt\n'), ((5092, 5180), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'sent_johnson'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""<NAME>"""'}), "(days, sent_johnson, marker='x', linestyle='--', alpha=0.7, label=\n '<NAME>')\n", (5100, 5180), True, 'import matplotlib.pyplot as plt\n'), ((5175, 5273), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'sent_corbyn'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""<NAME> -CORRUPTED"""'}), "(days, sent_corbyn, marker='x', linestyle='--', alpha=0.7, label=\n '<NAME> -CORRUPTED')\n", (5183, 5273), True, 'import matplotlib.pyplot as plt\n'), ((5268, 5356), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'sent_swinson'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""<NAME>"""'}), "(days, sent_swinson, marker='x', linestyle='--', alpha=0.7, label=\n '<NAME>')\n", (5276, 5356), True, 'import matplotlib.pyplot as plt\n'), ((5351, 5438), 'matplotlib.pyplot.plot', 'plt.plot', (['days', 'sent_farage'], {'marker': '"""x"""', 'linestyle': '"""--"""', 'alpha': '(0.7)', 'label': '"""<NAME>"""'}), "(days, sent_farage, marker='x', linestyle='--', alpha=0.7, label=\n '<NAME>')\n", (5359, 5438), True, 'import matplotlib.pyplot as plt\n'), ((5433, 5447), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (5441, 5447), True, 'import matplotlib.pyplot as plt\n'), ((5454, 5463), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5461, 5463), True, 'import matplotlib.pyplot as plt\n'), ((5697, 5709), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5707, 5709), True, 'import matplotlib.pyplot as plt\n'), ((5733, 5761), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Sentiment"""'], {}), "('Mean Sentiment')\n", (5743, 5761), True, 'import matplotlib.pyplot as plt\n'), ((5764, 5782), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5780, 5782), True, 'import matplotlib.pyplot as plt\n'), ((5784, 5794), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5792, 5794), True, 'import matplotlib.pyplot as plt\n'), ((2173, 2205), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d/%m/%Y"""'], {}), "('%d/%m/%Y')\n", (2193, 2205), True, 'import matplotlib.dates as mdates\n'), ((2242, 2261), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (2259, 2261), True, 'import matplotlib.dates as mdates\n'), ((2657, 2689), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d/%m/%Y"""'], {}), "('%d/%m/%Y')\n", (2677, 2689), True, 'import matplotlib.dates as mdates\n'), ((2726, 2745), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (2743, 2745), True, 'import matplotlib.dates as mdates\n'), ((3297, 3329), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d/%m/%Y"""'], {}), "('%d/%m/%Y')\n", (3317, 3329), True, 'import matplotlib.dates as mdates\n'), ((3366, 3385), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (3383, 3385), True, 'import matplotlib.dates as mdates\n'), ((4115, 4147), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d/%m/%Y"""'], {}), "('%d/%m/%Y')\n", (4135, 4147), True, 'import matplotlib.dates as mdates\n'), ((4184, 4203), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (4201, 4203), True, 'import matplotlib.dates as mdates\n'), ((4870, 4902), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d/%m/%Y"""'], {}), "('%d/%m/%Y')\n", (4890, 4902), True, 'import matplotlib.dates as mdates\n'), ((4939, 4958), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (4956, 4958), True, 'import matplotlib.dates as mdates\n'), ((5501, 5533), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d/%m/%Y"""'], {}), "('%d/%m/%Y')\n", (5521, 5533), True, 'import matplotlib.dates as mdates\n'), ((5570, 5589), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (5587, 5589), True, 'import matplotlib.dates as mdates\n'), ((2137, 2146), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2144, 2146), True, 'import matplotlib.pyplot as plt\n'), ((2208, 2217), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2215, 2217), True, 'import matplotlib.pyplot as plt\n'), ((2621, 2630), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2628, 2630), True, 'import matplotlib.pyplot as plt\n'), ((2692, 2701), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2699, 2701), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3270), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3268, 3270), True, 'import matplotlib.pyplot as plt\n'), ((3332, 3341), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3339, 3341), True, 'import matplotlib.pyplot as plt\n'), ((4079, 4088), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4086, 4088), True, 'import matplotlib.pyplot as plt\n'), ((4150, 4159), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4157, 4159), True, 'import matplotlib.pyplot as plt\n'), ((4834, 4843), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4841, 4843), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4914), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4912, 4914), True, 'import matplotlib.pyplot as plt\n'), ((5465, 5474), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5472, 5474), True, 'import matplotlib.pyplot as plt\n'), ((5536, 5545), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5543, 5545), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
# x = np.array([1,2,3])
# print(x)
# 행렬의 계산
x = np.array([1.0,2.0,3.0])
y = np.array([2.0,4.0,6.0])
x + y
x * y
x / y
x / 2.0 # 브로드 캐스트 기능
A = np.array([[1,2],[3,4]])
print(A)
A.shape
A.dtype
B = np.array([[3,0],[0,6]])
A + B
A * B
A * 10 # 브로드 캐스트
# 브로드 캐스트 -> 스칼라값을 확대
A = np.array([1,2],[3,4])
B = np.array([10,20])
A * B
# 원소 접근
X = np.array([[51,55],[14,19],[0,4]])
print(X)
print(X.shape)
X[0]
X[0][1]
for row in X:
print(row) | [
"numpy.array"
] | [((69, 94), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (77, 94), True, 'import numpy as np\n'), ((97, 122), 'numpy.array', 'np.array', (['[2.0, 4.0, 6.0]'], {}), '([2.0, 4.0, 6.0])\n', (105, 122), True, 'import numpy as np\n'), ((165, 191), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (173, 191), True, 'import numpy as np\n'), ((218, 244), 'numpy.array', 'np.array', (['[[3, 0], [0, 6]]'], {}), '([[3, 0], [0, 6]])\n', (226, 244), True, 'import numpy as np\n'), ((299, 323), 'numpy.array', 'np.array', (['[1, 2]', '[3, 4]'], {}), '([1, 2], [3, 4])\n', (307, 323), True, 'import numpy as np\n'), ((325, 343), 'numpy.array', 'np.array', (['[10, 20]'], {}), '([10, 20])\n', (333, 343), True, 'import numpy as np\n'), ((363, 401), 'numpy.array', 'np.array', (['[[51, 55], [14, 19], [0, 4]]'], {}), '([[51, 55], [14, 19], [0, 4]])\n', (371, 401), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt # Pyplot for nice graphs
from matplotlib import patches as mpatches
from mpl_toolkits.mplot3d import Axes3D # Used for 3D plots
from matplotlib.widgets import Slider, Button
import matplotlib
import numpy as np # NumPy
# import seaborn
from numpy import linalg as LA
# from collections import Counter
from Functions import xyzimport, Hkay, Onsite, Hop, RecursionRoutine
import sys
np.set_printoptions(threshold=sys.maxsize)
# Set hopping potential
Vppi = -1
# Define lattice vectors
# shiftx = 32.7862152500
# shifty = 8.6934634800
#
# # Retrieve unit cell
# xyz = xyzimport('TestCell.fdf')
# # Calculate onsite nearest neighbours
# Ham = Onsite(xyz, Vppi)
#
# # Shift unit cell
# xyz1 = xyz + np.array([shiftx, 0, 0])
# # Calculate offsite nearest neighbours
# V1 = Hop(xyz, xyz1, Vppi)
#
# # Shift unit cell
# xyz2 = xyz + np.array([0, shifty, 0])
# # Calculate offsite nearest neighbours
# V2 = Hop(xyz, xyz2, Vppi)
#
# # Shift unit cell
# xyz3 = xyz + np.array([shiftx, shifty, 0])
# # Calculate offsite nearest neighbours
# V3 = Hop(xyz, xyz3, Vppi)
h = np.array([[0, 1, 0, 0], [1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0]])
V = np.array([[0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0]])
# h = np.array([[0]])
# V = np.array([[1]])
h = h * Vppi
V = V * Vppi
# print(np.sum(h))
Show = 0
if Show == 1:
plt.imshow(h)
plt.colorbar()
plt.show()
plt.imshow(V)
plt.colorbar()
plt.show()
En = np.linspace(-3, 3, 100)
# En = np.linspace(-1, 1, 3)
eta = 1e-6j
G00 = np.zeros((En.shape[0]), dtype=complex) # Empty data matrix for Green's functions
for i in range(En.shape[0]): # Loop iterating over energies
G, SelfER, SelfEL = RecursionRoutine(En[i], h, V, eta) # Invoking the RecursionRoutine
G = np.diag(G) # The Green's functions for each site is in the diagonal of the G matrix
G00[i] = G[4] # Chosen Green's function (here the 4th site)
# print(G00)
Y = G00
X = En
Y1 = Y.real
Y2 = Y.imag
# Y1 = np.sort(Y1)
# Y2 = np.sort(Y2)
# print(Y1, Y2)
real, = plt.plot(X, Y1, label='real')
# imag, = plt.plot(X, Y2, label='imag')
imag, = plt.fill(X, Y2, c='orange', alpha=0.8, label='imag')
plt.ylim((-2, 4))
# plt.axis('equal')
plt.grid(which='major', axis='both')
plt.legend(handles=[imag, real])
plt.title('Greens function of a simple four-atom unit cell')
plt.xlabel('Energy E arb. unit')
plt.ylabel('Re[G00(E)]/Im[G00(E)]')
plt.savefig('imrealplot.eps', bbox_inches='tight')
plt.show()
for i in range(h[:, 0].shape[0]):
s = i
xy = (h[i, 0], h[i, 1])
plt.annotate(s, xy)
plt.grid(b=True, which='both', axis='both')
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.diag",
"numpy.set_printoptions",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"Functions.RecursionRoutine",
"matplotlib.pyplot.ylabel",
"matplotlib.p... | [((444, 486), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (463, 486), True, 'import numpy as np\n'), ((1125, 1191), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0]]'], {}), '([[0, 1, 0, 0], [1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0]])\n', (1133, 1191), True, 'import numpy as np\n'), ((1196, 1262), 'numpy.array', 'np.array', (['[[0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0]]'], {}), '([[0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0]])\n', (1204, 1262), True, 'import numpy as np\n'), ((1489, 1512), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(100)'], {}), '(-3, 3, 100)\n', (1500, 1512), True, 'import numpy as np\n'), ((1560, 1596), 'numpy.zeros', 'np.zeros', (['En.shape[0]'], {'dtype': 'complex'}), '(En.shape[0], dtype=complex)\n', (1568, 1596), True, 'import numpy as np\n'), ((2067, 2096), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y1'], {'label': '"""real"""'}), "(X, Y1, label='real')\n", (2075, 2096), True, 'from matplotlib import pyplot as plt\n'), ((2145, 2197), 'matplotlib.pyplot.fill', 'plt.fill', (['X', 'Y2'], {'c': '"""orange"""', 'alpha': '(0.8)', 'label': '"""imag"""'}), "(X, Y2, c='orange', alpha=0.8, label='imag')\n", (2153, 2197), True, 'from matplotlib import pyplot as plt\n'), ((2198, 2215), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2, 4)'], {}), '((-2, 4))\n', (2206, 2215), True, 'from matplotlib import pyplot as plt\n'), ((2236, 2272), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'axis': '"""both"""'}), "(which='major', axis='both')\n", (2244, 2272), True, 'from matplotlib import pyplot as plt\n'), ((2273, 2305), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[imag, real]'}), '(handles=[imag, real])\n', (2283, 2305), True, 'from matplotlib import pyplot as plt\n'), ((2306, 2366), 'matplotlib.pyplot.title', 'plt.title', (['"""Greens function of a simple four-atom unit cell"""'], {}), "('Greens function of a simple four-atom unit cell')\n", (2315, 2366), True, 'from matplotlib import pyplot as plt\n'), ((2367, 2399), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy E arb. unit"""'], {}), "('Energy E arb. unit')\n", (2377, 2399), True, 'from matplotlib import pyplot as plt\n'), ((2400, 2435), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Re[G00(E)]/Im[G00(E)]"""'], {}), "('Re[G00(E)]/Im[G00(E)]')\n", (2410, 2435), True, 'from matplotlib import pyplot as plt\n'), ((2436, 2486), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""imrealplot.eps"""'], {'bbox_inches': '"""tight"""'}), "('imrealplot.eps', bbox_inches='tight')\n", (2447, 2486), True, 'from matplotlib import pyplot as plt\n'), ((2487, 2497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2495, 2497), True, 'from matplotlib import pyplot as plt\n'), ((2595, 2638), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""both"""', 'axis': '"""both"""'}), "(b=True, which='both', axis='both')\n", (2603, 2638), True, 'from matplotlib import pyplot as plt\n'), ((2639, 2649), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2647, 2649), True, 'from matplotlib import pyplot as plt\n'), ((1383, 1396), 'matplotlib.pyplot.imshow', 'plt.imshow', (['h'], {}), '(h)\n', (1393, 1396), True, 'from matplotlib import pyplot as plt\n'), ((1401, 1415), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1413, 1415), True, 'from matplotlib import pyplot as plt\n'), ((1420, 1430), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1428, 1430), True, 'from matplotlib import pyplot as plt\n'), ((1435, 1448), 'matplotlib.pyplot.imshow', 'plt.imshow', (['V'], {}), '(V)\n', (1445, 1448), True, 'from matplotlib import pyplot as plt\n'), ((1453, 1467), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1465, 1467), True, 'from matplotlib import pyplot as plt\n'), ((1472, 1482), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1480, 1482), True, 'from matplotlib import pyplot as plt\n'), ((1727, 1761), 'Functions.RecursionRoutine', 'RecursionRoutine', (['En[i]', 'h', 'V', 'eta'], {}), '(En[i], h, V, eta)\n', (1743, 1761), False, 'from Functions import xyzimport, Hkay, Onsite, Hop, RecursionRoutine\n'), ((1803, 1813), 'numpy.diag', 'np.diag', (['G'], {}), '(G)\n', (1810, 1813), True, 'import numpy as np\n'), ((2575, 2594), 'matplotlib.pyplot.annotate', 'plt.annotate', (['s', 'xy'], {}), '(s, xy)\n', (2587, 2594), True, 'from matplotlib import pyplot as plt\n')] |
"""
Because reinventing the wheel is so much fun, here is my own PCA implementation
using SVD in numpy.
Ok, I did read a lot of existing implementations, for instance:
http://stackoverflow.com/questions/1730600/principal-component-analysis-in-python
http://www.cs.stevens.edu/~mordohai/classes/cs559_s09/PCA_in_MATLAB.pdf
http://en.wikipedia.org/wiki/Singular_value_decomposition
http://en.wikipedia.org/wiki/Principal_component_analysis
My goal is to have a PCA when data is centered but not normalized, and be able
to use it with unseen data.
----
Author:
<NAME> (<EMAIL>)
----
License:
This code is distributed under the GNU LESSER PUBLIC LICENSE
(LGPL, see www.gnu.org).
Copyright (c) 2012-2013 MARL@NYU.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of MARL, NYU nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import os
import sys
import pickle
import copy
import time
import numpy as np
from numpy.linalg import svd
class PCA(object):
"""
implementation of a PCA, for both training and applying it
to unseen data.
For the moment we keep all the singular values, so we can
apply PCA with as many dimensions as we want. If it's heavy on
memory we'll cut.
"""
def __init__(self, data, inline=False):
"""
does the actual training!
data - M x N, M number of examples, N dimension
inline - if True, data is modified
"""
# original shape
self.original_shape = data.shape
# save means
self.means = data.mean(axis=0)
# center and save the means
if not inline:
data = data.copy()
self.center_newdata(data)
data /= np.sqrt(data.shape[0]-1) # to get the variance right
# compute SVD!
self.U, self.d, self.Vt = svd(data, full_matrices=False)
if self.U.shape[0] * self.U.shape[1] > 500000:
del self.U
# make sure the values are properly sorted
assert np.all(self.d[:-1] >= self.d[1:])
# get the variance
# if we want the eigenvalues, we must run without normalizing by the number of examples
# getting the variance makes sense when using the covariance matrix
# instead of SVD to compute PCA
self.variance = self.d**2
# built time
self.built_time = time.ctime()
def apply_newdata(self, data, ndims=-1):
"""
Apply PCA to new data
By default, dimensionality is preserved
"""
if ndims < 1:
return np.dot(data - self.means, self.Vt.T)
else:
return np.dot(data - self.means, self.Vt[:ndims].T)
def center_newdata(self, data):
"""
center data inline (brings the column mean to zero)
Use copy if you need to preserve the data.
Uses the saved means, must have been computed!
data - M x N, M number of examples, N dimensions
"""
data -= self.means
def uncenter(self, data):
"""
Uncenter the data inline (add backs the means)
Use copy if you need to preserve the data.
data - M x N, M number of examples, N dimension
"""
data += self.means
def __repr__(self):
"""
Quick string presentation of the data
"""
s = 'PCA built on data shaped: %s' % str(self.original_shape)
s += ' on %s' % self.built_time
return s
| [
"time.ctime",
"numpy.linalg.svd",
"numpy.dot",
"numpy.all",
"numpy.sqrt"
] | [((3030, 3056), 'numpy.sqrt', 'np.sqrt', (['(data.shape[0] - 1)'], {}), '(data.shape[0] - 1)\n', (3037, 3056), True, 'import numpy as np\n'), ((3140, 3170), 'numpy.linalg.svd', 'svd', (['data'], {'full_matrices': '(False)'}), '(data, full_matrices=False)\n', (3143, 3170), False, 'from numpy.linalg import svd\n'), ((3315, 3348), 'numpy.all', 'np.all', (['(self.d[:-1] >= self.d[1:])'], {}), '(self.d[:-1] >= self.d[1:])\n', (3321, 3348), True, 'import numpy as np\n'), ((3669, 3681), 'time.ctime', 'time.ctime', ([], {}), '()\n', (3679, 3681), False, 'import time\n'), ((3872, 3908), 'numpy.dot', 'np.dot', (['(data - self.means)', 'self.Vt.T'], {}), '(data - self.means, self.Vt.T)\n', (3878, 3908), True, 'import numpy as np\n'), ((3942, 3986), 'numpy.dot', 'np.dot', (['(data - self.means)', 'self.Vt[:ndims].T'], {}), '(data - self.means, self.Vt[:ndims].T)\n', (3948, 3986), True, 'import numpy as np\n')] |
from pyoma.browser import db
import pickle
import pandas as pd
import h5py
import itertools
import json
import random
from scipy.sparse import csr_matrix
from tables import *
import numpy as np
import random
np.random.seed(0)
random.seed(0)
import ete3
from datasketch import WeightedMinHashGenerator
#from validation import validation_semantic_similarity
from pyprofiler.utils import hashutils, config_utils , pyhamutils , files_utils
from time import time
import multiprocessing as mp
import functools
import numpy as np
import time
import sys
import gc
import logging
class Profiler:
"""
A profiler object allows the user to query the LSH with HOGs and get a list of result HOGs back
"""
def __init__(self,lshforestpath = None, hashes_h5=None, mat_path= None, oma = False, tar= None , nsamples = 256 , mastertree = None ):
#use the lsh forest or the lsh
print('loading lsh')
with open(lshforestpath, 'rb') as lshpickle:
self.lshobj = pickle.loads(lshpickle.read())
print('indexing lsh')
self.lshobj.index()
self.hashes_h5 = h5py.File(hashes_h5, mode='r')
self.nsamples = nsamples
if mat_path:
## TODO: change this to read hdf5
#profile_matrix_file = open(profile_matrix_path, 'rb')
#profile_matrix_unpickled = pickle.Unpickler(profile_matrix_file)
#self.profile_matrix = profile_matrix_unpickled.load()
pass
if oma:
from pyoma.browser import db
if mastertree is None:
mastertree = config_utils.datadir + 'mastertree.pkl'
with open(mastertree , 'rb') as treein:
self.tree = pickle.loads(treein.read())
self.tree_string = self.tree.write(format = 1)
self.taxaIndex, self.ReverseTaxaIndex = files_utils.generate_taxa_index(self.tree)
if oma == True:
h5_oma = open_file(config_utils.omadir + 'OmaServer.h5', mode="r")
else:
h5_oma = open_file(oma, mode="r")
self.db_obj = db.Database(h5_oma)
#open up master tree
self.treeweights = hashutils.generate_treeweights(self.tree , self.taxaIndex , None, None )
self.READ_ORTHO = functools.partial(pyhamutils.get_orthoxml_oma , db_obj=self.db_obj)
elif tar:
## TODO: finish tar function
self.READ_ORTHO = functools.partial(pyhamutils.get_orthoxml_tar, db_obj=self.db_obj)
if oma or tar:
self.HAM_PIPELINE = functools.partial(pyhamutils.get_ham_treemap_from_row, tree=self.tree_string )
self.HASH_PIPELINE = functools.partial(hashutils.row2hash , taxaIndex=self.taxaIndex , treeweights=self.treeweights , wmg=None )
print('DONE')
def return_profile_OTF(self, fam):
"""
Returns profiles as binary vectors for use with optimisation pipelines
"""
if type(fam) is str:
fam = hashutils.hogid2fam(fam)
ortho_fam = self.READ_ORTHO(fam)
tp = self.HAM_PIPELINE([fam, ortho_fam])
losses = [ self.taxaIndex[n.name] for n in tp.traverse() if n.lost and n.name in self.taxaIndex ]
dupl = [ self.taxaIndex[n.name] for n in tp.traverse() if n.dupl and n.name in self.taxaIndex ]
presence = [ self.taxaIndex[n.name] for n in tp.traverse() if n.nbr_genes > 0 and n.name in self.taxaIndex ]
indices = dict(zip (['presence', 'loss', 'dup'],[presence,losses,dupl] ) )
hog_matrix_raw = np.zeros((1, 3*len(self.taxaIndex)))
for i,event in enumerate(indices):
if len(indices[event])>0:
taxindex = np.asarray(indices[event])
hogindex = np.asarray(indices[event])+i*len(self.taxaIndex)
hog_matrix_raw[:,hogindex] = 1
return {fam:{ 'mat':hog_matrix_raw, 'tree':tp} }
def return_profile_complements(self, fam):
"""
Returns profiles for each loss to search for complementary hogs
"""
if type(fam) is str:
fam = hashutils.hogid2fam(fam)
ortho_fam = self.READ_ORTHO(fam)
tp = self.HAM_PIPELINE([fam, ortho_fam])
losses = set([ n.name for n in tp.traverse() if n.lost and n.name in self.taxaIndex ])
#these are the roots of the fams we are looking for
#we just assume no duplications or losses from this point
ancestral_nodes = ([ n for n in profiler.tree.traverse() if n.name in losses])
losses=[]
dupl=[]
complements={ n.name+'_loss' : [] }
indices = dict(zip (['presence', 'loss', 'dup'],[presence,losses,dupl] ) )
hog_matrix_raw = np.zeros((1, 3*len(self.taxaIndex)))
for i,event in enumerate(indices):
if len(indices[event])>0:
taxindex = np.asarray(indices[event])
hogindex = np.asarray(indices[event])+i*len(self.taxaIndex)
hog_matrix_raw[:,hogindex] = 1
return {fam:{ 'mat':hog_matrix_raw, 'hash':tp} }
def return_profile_OTF_DCA(self, fam, lock = None):
"""
Returns profiles as strings for use with DCA pipelines
just concatenate the numpy arrays and use the tostring
function to generate an input "alignment"
"""
if type(fam) is str:
fam = hashutils.hogid2fam(fam)
if lock is not None:
lock.acquire()
ortho_fam = self.READ_ORTHO(fam)
if lock is not None:
lock.release()
tp = self.HAM_PIPELINE([fam, ortho_fam])
dcastr = hashutils.tree2str_DCA(tp,self.taxaIndex)
return {fam:{ 'dcastr':dcastr, 'tree':tp} }
def worker( self,i, inq, retq ):
"""
this worker function is for parallelization of generation of binary vector for use with optimisation pipelines
"""
print('worker start'+str(i))
while True:
input = inq.get()
if input is None:
break
else:
fam,ortho_fam = input
tp = self.HAM_PIPELINE([fam, ortho_fam])
losses = [ self.taxaIndex[n.name] for n in tp.traverse() if n.lost and n.name in self.taxaIndex ]
dupl = [ self.taxaIndex[n.name] for n in tp.traverse() if n.dupl and n.name in self.taxaIndex ]
presence = [ self.taxaIndex[n.name] for n in tp.traverse() if n.nbr_genes > 0 and n.name in self.taxaIndex ]
indices = dict(zip (['presence', 'loss', 'dup'],[presence,losses,dupl] ) )
hog_matrix_raw = np.zeros((1, 3*len(self.taxaIndex)))
for i,event in enumerate(indices):
if len(indices[event])>0:
taxindex = np.asarray(indices[event])
hogindex = np.asarray(indices[event])+i*len(self.taxaIndex)
hog_matrix_raw[:,hogindex] = 1
retq.put({fam:{ 'mat':hog_matrix_raw, 'tree':tp} })
def retmat_mp(self, traindf , nworkers = 25, chunksize=50 ):
"""
function used to create training matrix with pairs of hogs. calculate_x will return the intersetcion of
two binary vectors generated by pyham
"""
#fams = [ hashutils.hogid2fam(fam) for fam in fams ]
def calculate_x(row):
mat_x1 = row.mat_x
mat_x2 = row.mat_y
ret1 = np.zeros(mat_x1.shape)
ret2 = np.zeros(mat_x2.shape)
#diff = mat_x1 - mat_x2
matsum = mat_x1 + mat_x2
#ret1[np.where(diff != 0 ) ] = -1
ret2[ np.where(matsum == 2 ) ] = 1
return list(ret2)
retq= mp.Queue(-1)
inq= mp.Queue(-1)
processes = {}
mp.log_to_stderr()
logger = mp.get_logger()
logger.setLevel(logging.INFO)
for i in range(nworkers):
processes[i] = {'time':time.time() , 'process': mp.Process( target = self.worker , args = (i,inq, retq ) ) }
#processes[i]['process'].daemon = True
processes[i]['process'].start()
for batch in range(0, len(traindf) , chunksize ):
print(batch)
slicedf = traindf.iloc[batch:batch+chunksize, :]
fams = list(set(list(slicedf.HogFamA.unique()) + list(slicedf.HogFamB.unique() ) ) )
total= {}
for fam in fams:
orthxml = self.READ_ORTHO(fam)
if orthxml is not None:
inq.put((fam,orthxml))
done = []
count = 0
while len(fams)-1 > count:
try:
data =retq.get(False)
count+=1
total.update(data)
except :
pass
time.sleep(.01)
gc.collect()
retdf= pd.DataFrame.from_dict( total , orient= 'index')
slicedf = slicedf.merge( retdf , left_on = 'HogFamA' , right_index = True , how= 'left')
slicedf = slicedf.merge( retdf , left_on = 'HogFamB' , right_index = True , how= 'left')
slicedf = slicedf.dropna(subset=['mat_y', 'mat_x'] , how = 'any')
slicedf['xtrain'] = slicedf.apply( calculate_x , axis = 1)
X_train = np.vstack( slicedf['xtrain'])
y_train = slicedf.truth
print(slicedf)
yield (X_train, y_train)
for i in processes:
inq.put(None)
for i in processes:
processes[i]['process'].terminate()
def retmat_mp_profiles(self, fams , nworkers = 25, chunksize=50 ):
"""
function used to create dataframe containing binary profiles
and trees of fams
"""
fams = [ f for f in fams if f]
retq= mp.Queue(-1)
inq= mp.Queue(-1)
processes = {}
mp.log_to_stderr()
logger = mp.get_logger()
logger.setLevel(logging.INFO)
total = {}
for i in range(nworkers):
processes[i] = {'time':time.time() , 'process': mp.Process( target = self.worker , args = (i,inq, retq ) ) }
#processes[i]['process'].daemon = True
processes[i]['process'].start()
for fam in fams:
try:
orthxml = self.READ_ORTHO(fam)
except:
orthoxml = None
if orthxml is not None:
inq.put((fam,orthxml))
done = []
count = 0
while len(fams)-1 > count :
try:
data =retq.get(False )
count+=1
total.update(data)
if count % 100 == 0 :
print(count)
except :
pass
time.sleep(.01)
for i in range(nworkers):
processes[i]['process'].terminate()
retdf= pd.DataFrame.from_dict( total , orient= 'index')
return retdf
def hog_query(self, hog_id=None, fam_id=None , k = 100 ):
"""
Given a hog_id or a fam_id as a query, returns a dictionary containing the results of the LSH.
:param hog_id: query hog id
:param fam_id: query fam id
:return: list containing the results of the LSH for the given query
"""
if hog_id is not None:
fam_id = hashutils.hogid2fam(hog_id)
query_hash = hashutils.fam2hash_hdf5(fam_id, self.hashes_h5 , nsamples= self.nsamples )
#print(query_hash.hashvalues)
results = self.lshobj.query(query_hash, k)
return results
def hog_query_sorted(self, hog_id=None, fam_id=None , k = 100 ):
"""
Given a hog_id or a fam_id as a query, returns a dictionary containing the results of the LSH.
:param hog_id: query hog id
:param fam_id: query fam id
:return: list containing the results of the LSH for the given query
"""
if hog_id is not None:
fam_id = hashutils.hogid2fam(hog_id)
query_hash = hashutils.fam2hash_hdf5(fam_id, self.hashes_h5 , nsamples= self.nsamples )
results = self.lshobj.query(query_hash, k)
hogdict = self.pull_hashes(results)
hogdict = { hog: hogdict[hog].jaccard(query_hash) for hog in hogdict }
sortedhogs = [(k, v) for k, v in hogdict.items()]
sortedhogs = sorted(student_tuples, key=lambda x: x[1])
sortedhogs = [ h[0] for h in sortehogs.reverse() ]
return hogdict
def pull_hashes(self , hoglist):
"""
Given a list of hog_ids , returns a dictionary containing their hashes.
This uses the hdf5 file to get the hashvalues
:param hog_id: query hog id
:param fam_id: query fam id
:return: a dict containing the hash values of the hogs in hoglist
"""
return { hog: hashutils.fam2hash_hdf5( hashutils.hogid2fam(str(hog)), self.hashes_h5 , nsamples= self.nsamples) for hog in hoglist}
def pull_matrows(self,fams):
"""
given a list of fams return the submatrix containing their profiles
:return:fams sorted, sparse mat
"""
return self.profile_matrix[np.asarray(fams),:]
@staticmethod
def sort_hashes(query_hash,hashes):
"""
Given a dict of hogs:hashes, returns a sorted array of hogs and jaccard distances relative to query hog.
:param query hash: weighted minhash of the query
:param hashes: a dict of hogs:hashes
:return: sortedhogs, jaccard
"""
#sort the hashes by their jaccard relative to query hash
jaccard=[ query_hash.jaccard(hashes[hog]) for hog in hashes]
index = np.argsort(jaccard)
sortedhogs = np.asarry(list(hashes.keys()))[index]
jaccard= jaccard[index]
return sortedhogs, jaccard
@staticmethod
def allvall_hashes(hashes):
"""
Given a dict of hogs:hashes, returns generate an all v all jaccard distance matrix.
:param hashes: a dict of hogs:hashes
:return: hashmat
"""
#generate an all v all jaccard distance matrix
hashmat = np.zeros((len(hashes),len(hashes)))
for i , hog1 in enumerate(hashes):
for j, hog2 in enumerate(hashes):
if i < j :
hashmat[i,j]= hashes[hog1].jaccard(hashes[hog2])
hashmat = hashmat+hashmat.T
np.fill_diagonal(hashmat, 1)
return hashmat
def hog_v_hog(self, hogs):
"""
give two hogs returns jaccard distance.
:param hog1 , hog2: str hog id
:return: jaccard score
"""
hog1,hog2 = hogs
#generate an all v all jaccard distance matrix
hashes = self.pull_hashes([hog1,hog2])
hashes = list(hashes.values())
return hashes[0].jaccard(hashes[1])
def allvall_nx(G,hashes,thresh =None):
"""
Given a dict of hogs:hashes, returns generate an all v all jaccard distance matrix.
:param hashes: a dict of hogs:hashes
:return: hashmat
"""
#generate an all v all jaccard distance matrix
hashmat = [[ hashes[hog1].jaccard(hashes[hog2]) if j>i else 0 for j,hog2 in enumerate(hashes[0:i] ) ] for i,hog1 in enumerate(hashes) ]
hashmat = np.asarray(hashmat)
hashmat+= hashmat.T
np.fill_diagonal(hashmat, 1)
#hashmat = np.zeros((len(hashes),len(hashes)))
#for i , hog1 in enumerate(hashes):
# for j, hog2 in enumerate(hashes):
# hashmat[i,j]= hashes[hog1].jaccard(hashes[hog2])
return hashmat
def iternetwork(seedHOG):
pass
def rank_hashes(query_hash,hashes):
jaccard = []
sorted = []
scores = {}
hogsRanked = np.asarray(list(hashes.keys()))
for i, hog in enumerate(hashes):
score = query_hash.jaccard(hashes[hog])
jaccard.append( score)
scores[hog] = score
hogsRanked = list( hogsRanked[ np.argsort(jaccard) ] )
jaccard = np.sort(jaccard)
return hogsRanked, jaccard
def get_vpairs(fam):
"""
get pairwise distance matrix of OMA all v all
#not finished
:param fam: an oma fam
:return sparesemat: a mat with all taxa in Oma with nonzero entries where this protein is found
:return densemat: a mat with the taxa covered by the fam
"""
taxa = self.db_obj.hog_levels_of_fam(fam)
subtaxindex = { taxon:i for i,taxon in enumerate(taxa) }
prots = self.db_obj.hog_members_from_hog_id(fam, 'LUCA')
for prot in prots:
taxon = prot.ncbi_taxon_id()
pairs = self.db_obj.get_vpairs(prot)
for EntryNr1, EntryNr2, RelType , score , distance in list(pairs):
pass
return sparsemat , densemat
def get_submatrix_form_results(self, results):
res_mat_list = []
for query, result in results.items():
res_mat = csr_matrix((len(result), self.profile_matrix.shape[1]))
for i, r in enumerate(result):
res_mat[i, :] = self.profile_matrix[r, :]
res_mat_list.append(res_mat)
final = np.vstack(res_mat_list)
return res_mat_list
| [
"pyprofiler.utils.hashutils.tree2str_DCA",
"numpy.random.seed",
"numpy.argsort",
"gc.collect",
"multiprocessing.log_to_stderr",
"multiprocessing.Queue",
"random.seed",
"numpy.fill_diagonal",
"h5py.File",
"functools.partial",
"pandas.DataFrame.from_dict",
"numpy.asarray",
"time.sleep",
"num... | [((210, 227), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (224, 227), True, 'import numpy as np\n'), ((230, 244), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (241, 244), False, 'import random\n'), ((1058, 1088), 'h5py.File', 'h5py.File', (['hashes_h5'], {'mode': '"""r"""'}), "(hashes_h5, mode='r')\n", (1067, 1088), False, 'import h5py\n'), ((4921, 4963), 'pyprofiler.utils.hashutils.tree2str_DCA', 'hashutils.tree2str_DCA', (['tp', 'self.taxaIndex'], {}), '(tp, self.taxaIndex)\n', (4943, 4963), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((6651, 6663), 'multiprocessing.Queue', 'mp.Queue', (['(-1)'], {}), '(-1)\n', (6659, 6663), True, 'import multiprocessing as mp\n'), ((6671, 6683), 'multiprocessing.Queue', 'mp.Queue', (['(-1)'], {}), '(-1)\n', (6679, 6683), True, 'import multiprocessing as mp\n'), ((6703, 6721), 'multiprocessing.log_to_stderr', 'mp.log_to_stderr', ([], {}), '()\n', (6719, 6721), True, 'import multiprocessing as mp\n'), ((6733, 6748), 'multiprocessing.get_logger', 'mp.get_logger', ([], {}), '()\n', (6746, 6748), True, 'import multiprocessing as mp\n'), ((8325, 8337), 'multiprocessing.Queue', 'mp.Queue', (['(-1)'], {}), '(-1)\n', (8333, 8337), True, 'import multiprocessing as mp\n'), ((8345, 8357), 'multiprocessing.Queue', 'mp.Queue', (['(-1)'], {}), '(-1)\n', (8353, 8357), True, 'import multiprocessing as mp\n'), ((8377, 8395), 'multiprocessing.log_to_stderr', 'mp.log_to_stderr', ([], {}), '()\n', (8393, 8395), True, 'import multiprocessing as mp\n'), ((8407, 8422), 'multiprocessing.get_logger', 'mp.get_logger', ([], {}), '()\n', (8420, 8422), True, 'import multiprocessing as mp\n'), ((9122, 9167), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['total'], {'orient': '"""index"""'}), "(total, orient='index')\n", (9144, 9167), True, 'import pandas as pd\n'), ((9567, 9638), 'pyprofiler.utils.hashutils.fam2hash_hdf5', 'hashutils.fam2hash_hdf5', (['fam_id', 'self.hashes_h5'], {'nsamples': 'self.nsamples'}), '(fam_id, self.hashes_h5, nsamples=self.nsamples)\n', (9590, 9638), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((10127, 10198), 'pyprofiler.utils.hashutils.fam2hash_hdf5', 'hashutils.fam2hash_hdf5', (['fam_id', 'self.hashes_h5'], {'nsamples': 'self.nsamples'}), '(fam_id, self.hashes_h5, nsamples=self.nsamples)\n', (10150, 10198), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((11598, 11617), 'numpy.argsort', 'np.argsort', (['jaccard'], {}), '(jaccard)\n', (11608, 11617), True, 'import numpy as np\n'), ((12199, 12227), 'numpy.fill_diagonal', 'np.fill_diagonal', (['hashmat', '(1)'], {}), '(hashmat, 1)\n', (12215, 12227), True, 'import numpy as np\n'), ((12965, 12984), 'numpy.asarray', 'np.asarray', (['hashmat'], {}), '(hashmat)\n', (12975, 12984), True, 'import numpy as np\n'), ((13009, 13037), 'numpy.fill_diagonal', 'np.fill_diagonal', (['hashmat', '(1)'], {}), '(hashmat, 1)\n', (13025, 13037), True, 'import numpy as np\n'), ((13595, 13611), 'numpy.sort', 'np.sort', (['jaccard'], {}), '(jaccard)\n', (13602, 13611), True, 'import numpy as np\n'), ((14589, 14612), 'numpy.vstack', 'np.vstack', (['res_mat_list'], {}), '(res_mat_list)\n', (14598, 14612), True, 'import numpy as np\n'), ((1668, 1710), 'pyprofiler.utils.files_utils.generate_taxa_index', 'files_utils.generate_taxa_index', (['self.tree'], {}), '(self.tree)\n', (1699, 1710), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((1866, 1885), 'pyoma.browser.db.Database', 'db.Database', (['h5_oma'], {}), '(h5_oma)\n', (1877, 1885), False, 'from pyoma.browser import db\n'), ((1932, 2001), 'pyprofiler.utils.hashutils.generate_treeweights', 'hashutils.generate_treeweights', (['self.tree', 'self.taxaIndex', 'None', 'None'], {}), '(self.tree, self.taxaIndex, None, None)\n', (1962, 2001), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((2026, 2092), 'functools.partial', 'functools.partial', (['pyhamutils.get_orthoxml_oma'], {'db_obj': 'self.db_obj'}), '(pyhamutils.get_orthoxml_oma, db_obj=self.db_obj)\n', (2043, 2092), False, 'import functools\n'), ((2266, 2343), 'functools.partial', 'functools.partial', (['pyhamutils.get_ham_treemap_from_row'], {'tree': 'self.tree_string'}), '(pyhamutils.get_ham_treemap_from_row, tree=self.tree_string)\n', (2283, 2343), False, 'import functools\n'), ((2369, 2477), 'functools.partial', 'functools.partial', (['hashutils.row2hash'], {'taxaIndex': 'self.taxaIndex', 'treeweights': 'self.treeweights', 'wmg': 'None'}), '(hashutils.row2hash, taxaIndex=self.taxaIndex, treeweights\n =self.treeweights, wmg=None)\n', (2386, 2477), False, 'import functools\n'), ((2650, 2674), 'pyprofiler.utils.hashutils.hogid2fam', 'hashutils.hogid2fam', (['fam'], {}), '(fam)\n', (2669, 2674), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((3620, 3644), 'pyprofiler.utils.hashutils.hogid2fam', 'hashutils.hogid2fam', (['fam'], {}), '(fam)\n', (3639, 3644), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((4725, 4749), 'pyprofiler.utils.hashutils.hogid2fam', 'hashutils.hogid2fam', (['fam'], {}), '(fam)\n', (4744, 4749), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((6436, 6458), 'numpy.zeros', 'np.zeros', (['mat_x1.shape'], {}), '(mat_x1.shape)\n', (6444, 6458), True, 'import numpy as np\n'), ((6469, 6491), 'numpy.zeros', 'np.zeros', (['mat_x2.shape'], {}), '(mat_x2.shape)\n', (6477, 6491), True, 'import numpy as np\n'), ((7511, 7523), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7521, 7523), False, 'import gc\n'), ((7534, 7579), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['total'], {'orient': '"""index"""'}), "(total, orient='index')\n", (7556, 7579), True, 'import pandas as pd\n'), ((7911, 7939), 'numpy.vstack', 'np.vstack', (["slicedf['xtrain']"], {}), "(slicedf['xtrain'])\n", (7920, 7939), True, 'import numpy as np\n'), ((9029, 9045), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (9039, 9045), False, 'import time\n'), ((9524, 9551), 'pyprofiler.utils.hashutils.hogid2fam', 'hashutils.hogid2fam', (['hog_id'], {}), '(hog_id)\n', (9543, 9551), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((10084, 10111), 'pyprofiler.utils.hashutils.hogid2fam', 'hashutils.hogid2fam', (['hog_id'], {}), '(hog_id)\n', (10103, 10111), False, 'from pyprofiler.utils import hashutils, config_utils, pyhamutils, files_utils\n'), ((2159, 2225), 'functools.partial', 'functools.partial', (['pyhamutils.get_orthoxml_tar'], {'db_obj': 'self.db_obj'}), '(pyhamutils.get_orthoxml_tar, db_obj=self.db_obj)\n', (2176, 2225), False, 'import functools\n'), ((3286, 3312), 'numpy.asarray', 'np.asarray', (['indices[event]'], {}), '(indices[event])\n', (3296, 3312), True, 'import numpy as np\n'), ((4287, 4313), 'numpy.asarray', 'np.asarray', (['indices[event]'], {}), '(indices[event])\n', (4297, 4313), True, 'import numpy as np\n'), ((6593, 6614), 'numpy.where', 'np.where', (['(matsum == 2)'], {}), '(matsum == 2)\n', (6601, 6614), True, 'import numpy as np\n'), ((6836, 6847), 'time.time', 'time.time', ([], {}), '()\n', (6845, 6847), False, 'import time\n'), ((6861, 6912), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'self.worker', 'args': '(i, inq, retq)'}), '(target=self.worker, args=(i, inq, retq))\n', (6871, 6912), True, 'import multiprocessing as mp\n'), ((7491, 7507), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (7501, 7507), False, 'import time\n'), ((8523, 8534), 'time.time', 'time.time', ([], {}), '()\n', (8532, 8534), False, 'import time\n'), ((8548, 8599), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'self.worker', 'args': '(i, inq, retq)'}), '(target=self.worker, args=(i, inq, retq))\n', (8558, 8599), True, 'import multiprocessing as mp\n'), ((11152, 11168), 'numpy.asarray', 'np.asarray', (['fams'], {}), '(fams)\n', (11162, 11168), True, 'import numpy as np\n'), ((13559, 13578), 'numpy.argsort', 'np.argsort', (['jaccard'], {}), '(jaccard)\n', (13569, 13578), True, 'import numpy as np\n'), ((3328, 3354), 'numpy.asarray', 'np.asarray', (['indices[event]'], {}), '(indices[event])\n', (3338, 3354), True, 'import numpy as np\n'), ((4329, 4355), 'numpy.asarray', 'np.asarray', (['indices[event]'], {}), '(indices[event])\n', (4339, 4355), True, 'import numpy as np\n'), ((5894, 5920), 'numpy.asarray', 'np.asarray', (['indices[event]'], {}), '(indices[event])\n', (5904, 5920), True, 'import numpy as np\n'), ((5938, 5964), 'numpy.asarray', 'np.asarray', (['indices[event]'], {}), '(indices[event])\n', (5948, 5964), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import sklearn.metrics.pairwise
from ..misc import NeuroKitWarning
from ..signal.signal_psd import signal_psd
from .complexity_embedding import complexity_embedding
from .optim_complexity_delay import complexity_delay
def complexity_lyapunov(
signal,
delay=1,
dimension=2,
method="rosenstein1993",
len_trajectory=20,
matrix_dim=4,
min_neighbors="default",
**kwargs,
):
"""(Largest) Lyapunov Exponent (LLE)
Lyapunov exponents (LE) describe the rate of exponential separation (convergence or divergence)
of nearby trajectories of a dynamical system. A system can have multiple LEs, equal to the
number of the dimensionality of the phase space, and the largest LE value, `LLE` is often used to
determine the overall predictability of the dynamical system.
Different algorithms:
- Rosenstein et al.'s (1993) algorithm was designed for calculating LLEs from small datasets.
The time series is first reconstructed using a delay-embedding method, and the closest neighbour
of each vector is computed using the euclidean distance. These two neighbouring points are then
tracked along their distance trajectories for a number of data points. The slope of the line
using a least-squares fit of the mean log trajectory of the distances gives the final LLE.
- Eckmann et al. (1996) computes LEs by first reconstructing the time series using a
delay-embedding method, and obtains the tangent that maps to the reconstructed dynamics using
a least-squares fit, where the LEs are deduced from the tangent maps.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int, None
Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common
to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003), or
to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).
If None for 'rosenstein1993', the delay is set to distance where the
autocorrelation function drops below 1 - 1/e times its original value.
dimension : int
Embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order'). Typically
2 or 3. It corresponds to the number of compared runs of lagged data. If 2, the embedding returns
an array with two columns corresponding to the original signal and its delayed (by Tau) version.
If method is 'eckmann1996', large values for dimension are recommended.
method : str
The method that defines the algorithm for computing LE. Can be one of 'rosenstein1993' or
'eckmann1996'.
len_trajectory : int
The number of data points in which neighbouring trajectories are followed. Only relevant if
method is 'rosenstein1993'.
matrix_dim : int
Correponds to the number of LEs to return for 'eckmann1996'.
min_neighbors : int, str
Minimum number of neighbors for 'eckmann1996'. If "default", min(2 * matrix_dim, matrix_dim + 4)
is used.
**kwargs : optional
Other arguments to be passed to ``signal_psd()`` for calculating the minimum temporal
separation of two neighbours.
Returns
--------
lle : float
An estimate of the largest Lyapunov exponent (LLE) if method is 'rosenstein1993', and
an array of LEs if 'eckmann1996'.
info : dict
A dictionary containing additional information regarding the parameters used
to compute LLE.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=3, sampling_rate=100, frequency=[5, 8], noise=0.5)
>>> lle, info = nk.complexity_lyapunov(signal, delay=1, dimension=2)
>>> lle #doctest: +SKIP
Reference
----------
- <NAME>., <NAME>., & <NAME>. (1993). A practical method
for calculating largest Lyapunov exponents from small data sets.
Physica D: Nonlinear Phenomena, 65(1-2), 117-134.
- <NAME>., <NAME>., <NAME>., & <NAME>. (1986). Liapunov
exponents from time series. Physical Review A, 34(6), 4971.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# If default tolerance
tolerance = _complexity_lyapunov_tolerance(signal, **kwargs) # rosenstein's method
# Method
method = method.lower()
if method in ["rosenstein", "rosenstein1993"]:
le, parameters = _complexity_lyapunov_rosenstein(
signal, delay, dimension, tolerance, len_trajectory, **kwargs
)
elif method in ["eckmann", "eckmann1996"]:
le, parameters = _complexity_lyapunov_eckmann(
signal, delay, dimension, tolerance, matrix_dim, min_neighbors
)
# Store params
info = {
"Dimension": dimension,
"Delay": delay,
"Minimum Separation": tolerance,
"Method": method,
}
info.update(parameters)
return le, info
# =============================================================================
# Methods
# =============================================================================
def _complexity_lyapunov_rosenstein(
signal, delay=1, dimension=2, tolerance=None, len_trajectory=20, **kwargs
):
# If default tolerance (kwargs: tolerance="default")
tolerance = _complexity_lyapunov_tolerance(signal, **kwargs)
# Delay embedding
if delay is None:
delay = complexity_delay(signal, method="rosenstein1993", show=False)
# Check that sufficient data points are available
_complexity_lyapunov_checklength(
len(signal), delay, dimension, tolerance, len_trajectory, method="rosenstein1993"
)
# Embed
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
m = len(embedded)
# construct matrix with pairwise distances between vectors in orbit
dists = sklearn.metrics.pairwise.euclidean_distances(embedded)
for i in range(m):
# Exclude indices within tolerance
dists[i, max(0, i - tolerance) : i + tolerance + 1] = np.inf
# Find indices of nearest neighbours
ntraj = m - len_trajectory + 1
min_dist_indices = np.argmin(dists[:ntraj, :ntraj], axis=1) # exclude last few indices
min_dist_indices = min_dist_indices.astype(int)
# Follow trajectories of neighbour pairs for len_trajectory data points
trajectories = np.zeros(len_trajectory)
for k in range(len_trajectory):
divergence = dists[(np.arange(ntraj) + k, min_dist_indices + k)]
dist_nonzero = np.where(divergence != 0)[0]
if len(dist_nonzero) == 0:
trajectories[k] = -np.inf
else:
# Get average distances of neighbour pairs along the trajectory
trajectories[k] = np.mean(np.log(divergence[dist_nonzero]))
divergence_rate = trajectories[np.isfinite(trajectories)]
# LLE obtained by least-squares fit to average line
slope, _ = np.polyfit(np.arange(1, len(divergence_rate) + 1), divergence_rate, 1)
parameters = {"Trajectory Length": len_trajectory}
return slope, parameters
def _complexity_lyapunov_eckmann(
signal, delay=1, dimension=2, tolerance=None, matrix_dim=4, min_neighbors="default"
):
"""TODO: check implementation
From https://github.com/CSchoel/nolds
"""
# Prepare parameters
if min_neighbors == "default":
min_neighbors = min(2 * matrix_dim, matrix_dim + 4)
m = (dimension - 1) // (matrix_dim - 1)
# Check that sufficient data points are available
_complexity_lyapunov_checklength(
len(signal),
delay,
dimension,
tolerance,
method="eckmann1996",
matrix_dim=matrix_dim,
min_neighbors=min_neighbors,
)
# Storing of LEs
lexp = np.zeros(matrix_dim)
lexp_counts = np.zeros(matrix_dim)
old_Q = np.identity(matrix_dim)
# Reconstruction using time-delay method
embedded = complexity_embedding(signal[:-m], delay=delay, dimension=dimension)
distances = sklearn.metrics.pairwise_distances(embedded, metric="chebyshev")
for i in range(len(embedded)):
# exclude difference of vector to itself and those too close in time
distances[i, max(0, i - tolerance) : i + tolerance + 1] = np.inf
# index of furthest nearest neighbour
neighbour_furthest = np.argsort(distances[i])[min_neighbors - 1]
# get neighbors within the radius
r = distances[i][neighbour_furthest]
neighbors = np.where(distances[i] <= r)[0] # should have length = min_neighbours
# Find matrix T_i (matrix_dim * matrix_dim) that sends points from neighbourhood of x(i) to x(i+1)
vec_beta = signal[neighbors + matrix_dim * m] - signal[i + matrix_dim * m]
matrix = np.array([signal[j : j + dimension : m] for j in neighbors]) # x(j)
matrix -= signal[i : i + dimension : m] # x(j) - x(i)
# form matrix T_i
t_i = np.zeros((matrix_dim, matrix_dim))
t_i[:-1, 1:] = np.identity(matrix_dim - 1)
t_i[-1] = np.linalg.lstsq(matrix, vec_beta, rcond=-1)[0] # least squares solution
# QR-decomposition of T * old_Q
mat_Q, mat_R = np.linalg.qr(np.dot(t_i, old_Q))
# force diagonal of R to be positive
sign_diag = np.sign(np.diag(mat_R))
sign_diag[np.where(sign_diag == 0)] = 1
sign_diag = np.diag(sign_diag)
mat_Q = np.dot(mat_Q, sign_diag)
mat_R = np.dot(sign_diag, mat_R)
old_Q = mat_Q
# successively build sum for Lyapunov exponents
diag_R = np.diag(mat_R)
# filter zeros in mat_R (would lead to -infs)
positive_elements = np.where(diag_R > 0)
lexp_i = np.zeros(len(diag_R))
lexp_i[positive_elements] = np.log(diag_R[positive_elements])
lexp_i[np.where(diag_R == 0)] = np.inf
lexp[positive_elements] += lexp_i[positive_elements]
lexp_counts[positive_elements] += 1
# normalize exponents over number of individual mat_Rs
idx = np.where(lexp_counts > 0)
lexp[idx] /= lexp_counts[idx]
lexp[np.where(lexp_counts == 0)] = np.inf
# normalize with respect to tau
lexp /= delay
# take m into account
lexp /= m
parameters = {"Minimum Neighbors": min_neighbors}
return lexp, parameters
# =============================================================================
# Utilities
# =============================================================================
def _complexity_lyapunov_tolerance(signal, tolerance="default", **kwargs):
"""Minimum temporal separation (tolerance) between two neighbors.
If 'default', finds a suitable value by calculating the mean period of the data,
obtained by the reciprocal of the mean frequency of the power spectrum.
https://github.com/CSchoel/nolds
"""
if isinstance(tolerance, (int, float)):
return tolerance
psd = signal_psd(signal, sampling_rate=1000, method="fft", normalize=False, **kwargs)
# actual sampling rate does not matter
mean_freq = np.sum(psd["Power"] * psd["Frequency"]) / np.sum(psd["Power"])
mean_period = 1 / mean_freq # seconds per cycle
tolerance = int(np.ceil(mean_period * 1000))
return tolerance
def _complexity_lyapunov_checklength(
n,
delay=1,
dimension=2,
tolerance="default",
len_trajectory=20,
method="rosenstein1993",
matrix_dim=4,
min_neighbors="default",
):
"""Helper function that calculates the minimum number of data points required.
"""
if method in ["rosenstein", "rosenstein1993"]:
# minimum length required to find single orbit vector
min_len = (dimension - 1) * delay + 1
# we need len_trajectory orbit vectors to follow a complete trajectory
min_len += len_trajectory - 1
# we need tolerance * 2 + 1 orbit vectors to find neighbors for each
min_len += tolerance * 2 + 1
# Sanity check
if n < min_len:
warn(
f"for dimension={dimension}, delay={delay}, tolerance={tolerance} and "
+ f"len_trajectory={len_trajectory}, you need at least {min_len} datapoints in your time series.",
category=NeuroKitWarning,
)
elif method in ["eckmann", "eckmann1996"]:
m = (dimension - 1) // (matrix_dim - 1)
# minimum length required to find single orbit vector
min_len = dimension
# we need to follow each starting point of an orbit vector for m more steps
min_len += m
# we need tolerance * 2 + 1 orbit vectors to find neighbors for each
min_len += tolerance * 2
# we need at least min_nb neighbors for each orbit vector
min_len += min_neighbors
# Sanity check
if n < min_len:
warn(
f"for dimension={dimension}, delay={delay}, tolerance={tolerance}, "
+ f"matrix_dim={matrix_dim} and min_neighbors={min_neighbors}, "
+ f"you need at least {min_len} datapoints in your time series.",
category=NeuroKitWarning,
)
| [
"numpy.sum",
"numpy.log",
"numpy.ceil",
"numpy.linalg.lstsq",
"numpy.zeros",
"numpy.identity",
"numpy.argmin",
"numpy.isfinite",
"numpy.argsort",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.dot",
"numpy.diag",
"warnings.warn"
] | [((6535, 6575), 'numpy.argmin', 'np.argmin', (['dists[:ntraj, :ntraj]'], {'axis': '(1)'}), '(dists[:ntraj, :ntraj], axis=1)\n', (6544, 6575), True, 'import numpy as np\n'), ((6752, 6776), 'numpy.zeros', 'np.zeros', (['len_trajectory'], {}), '(len_trajectory)\n', (6760, 6776), True, 'import numpy as np\n'), ((8145, 8165), 'numpy.zeros', 'np.zeros', (['matrix_dim'], {}), '(matrix_dim)\n', (8153, 8165), True, 'import numpy as np\n'), ((8184, 8204), 'numpy.zeros', 'np.zeros', (['matrix_dim'], {}), '(matrix_dim)\n', (8192, 8204), True, 'import numpy as np\n'), ((8217, 8240), 'numpy.identity', 'np.identity', (['matrix_dim'], {}), '(matrix_dim)\n', (8228, 8240), True, 'import numpy as np\n'), ((10395, 10420), 'numpy.where', 'np.where', (['(lexp_counts > 0)'], {}), '(lexp_counts > 0)\n', (10403, 10420), True, 'import numpy as np\n'), ((7209, 7234), 'numpy.isfinite', 'np.isfinite', (['trajectories'], {}), '(trajectories)\n', (7220, 7234), True, 'import numpy as np\n'), ((9143, 9199), 'numpy.array', 'np.array', (['[signal[j:j + dimension:m] for j in neighbors]'], {}), '([signal[j:j + dimension:m] for j in neighbors])\n', (9151, 9199), True, 'import numpy as np\n'), ((9316, 9350), 'numpy.zeros', 'np.zeros', (['(matrix_dim, matrix_dim)'], {}), '((matrix_dim, matrix_dim))\n', (9324, 9350), True, 'import numpy as np\n'), ((9374, 9401), 'numpy.identity', 'np.identity', (['(matrix_dim - 1)'], {}), '(matrix_dim - 1)\n', (9385, 9401), True, 'import numpy as np\n'), ((9748, 9766), 'numpy.diag', 'np.diag', (['sign_diag'], {}), '(sign_diag)\n', (9755, 9766), True, 'import numpy as np\n'), ((9783, 9807), 'numpy.dot', 'np.dot', (['mat_Q', 'sign_diag'], {}), '(mat_Q, sign_diag)\n', (9789, 9807), True, 'import numpy as np\n'), ((9824, 9848), 'numpy.dot', 'np.dot', (['sign_diag', 'mat_R'], {}), '(sign_diag, mat_R)\n', (9830, 9848), True, 'import numpy as np\n'), ((9945, 9959), 'numpy.diag', 'np.diag', (['mat_R'], {}), '(mat_R)\n', (9952, 9959), True, 'import numpy as np\n'), ((10042, 10062), 'numpy.where', 'np.where', (['(diag_R > 0)'], {}), '(diag_R > 0)\n', (10050, 10062), True, 'import numpy as np\n'), ((10138, 10171), 'numpy.log', 'np.log', (['diag_R[positive_elements]'], {}), '(diag_R[positive_elements])\n', (10144, 10171), True, 'import numpy as np\n'), ((10464, 10490), 'numpy.where', 'np.where', (['(lexp_counts == 0)'], {}), '(lexp_counts == 0)\n', (10472, 10490), True, 'import numpy as np\n'), ((11427, 11466), 'numpy.sum', 'np.sum', (["(psd['Power'] * psd['Frequency'])"], {}), "(psd['Power'] * psd['Frequency'])\n", (11433, 11466), True, 'import numpy as np\n'), ((11469, 11489), 'numpy.sum', 'np.sum', (["psd['Power']"], {}), "(psd['Power'])\n", (11475, 11489), True, 'import numpy as np\n'), ((11563, 11590), 'numpy.ceil', 'np.ceil', (['(mean_period * 1000)'], {}), '(mean_period * 1000)\n', (11570, 11590), True, 'import numpy as np\n'), ((6909, 6934), 'numpy.where', 'np.where', (['(divergence != 0)'], {}), '(divergence != 0)\n', (6917, 6934), True, 'import numpy as np\n'), ((8713, 8737), 'numpy.argsort', 'np.argsort', (['distances[i]'], {}), '(distances[i])\n', (8723, 8737), True, 'import numpy as np\n'), ((8865, 8892), 'numpy.where', 'np.where', (['(distances[i] <= r)'], {}), '(distances[i] <= r)\n', (8873, 8892), True, 'import numpy as np\n'), ((9420, 9463), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['matrix', 'vec_beta'], {'rcond': '(-1)'}), '(matrix, vec_beta, rcond=-1)\n', (9435, 9463), True, 'import numpy as np\n'), ((9570, 9588), 'numpy.dot', 'np.dot', (['t_i', 'old_Q'], {}), '(t_i, old_Q)\n', (9576, 9588), True, 'import numpy as np\n'), ((9664, 9678), 'numpy.diag', 'np.diag', (['mat_R'], {}), '(mat_R)\n', (9671, 9678), True, 'import numpy as np\n'), ((9698, 9722), 'numpy.where', 'np.where', (['(sign_diag == 0)'], {}), '(sign_diag == 0)\n', (9706, 9722), True, 'import numpy as np\n'), ((10187, 10208), 'numpy.where', 'np.where', (['(diag_R == 0)'], {}), '(diag_R == 0)\n', (10195, 10208), True, 'import numpy as np\n'), ((12358, 12573), 'warnings.warn', 'warn', (["(f'for dimension={dimension}, delay={delay}, tolerance={tolerance} and ' +\n f'len_trajectory={len_trajectory}, you need at least {min_len} datapoints in your time series.'\n )"], {'category': 'NeuroKitWarning'}), "(\n f'for dimension={dimension}, delay={delay}, tolerance={tolerance} and ' +\n f'len_trajectory={len_trajectory}, you need at least {min_len} datapoints in your time series.'\n , category=NeuroKitWarning)\n", (12362, 12573), False, 'from warnings import warn\n'), ((7139, 7171), 'numpy.log', 'np.log', (['divergence[dist_nonzero]'], {}), '(divergence[dist_nonzero])\n', (7145, 7171), True, 'import numpy as np\n'), ((13182, 13424), 'warnings.warn', 'warn', (["(f'for dimension={dimension}, delay={delay}, tolerance={tolerance}, ' +\n f'matrix_dim={matrix_dim} and min_neighbors={min_neighbors}, ' +\n f'you need at least {min_len} datapoints in your time series.')"], {'category': 'NeuroKitWarning'}), "(f'for dimension={dimension}, delay={delay}, tolerance={tolerance}, ' +\n f'matrix_dim={matrix_dim} and min_neighbors={min_neighbors}, ' +\n f'you need at least {min_len} datapoints in your time series.',\n category=NeuroKitWarning)\n", (13186, 13424), False, 'from warnings import warn\n'), ((6841, 6857), 'numpy.arange', 'np.arange', (['ntraj'], {}), '(ntraj)\n', (6850, 6857), True, 'import numpy as np\n')] |
import numpy as np
import torch
from matplotlib import pyplot as plt
from torch import nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, inchannel, outchannel, stride=1, shortcut=None):
super().__init__()
self.left = nn.Sequential(
nn.Conv2d(inchannel, outchannel, 3, stride, 1, bias=False),
nn.BatchNorm2d(outchannel),
nn.ReLU(),
nn.Conv2d(outchannel, outchannel, 3, 1, 1, bias=False),
nn.BatchNorm2d(outchannel)
)
self.right = shortcut
def forward(self, input):
out = self.left(input)
residual = input if self.right is None else self.right(input)
out += residual
return F.relu(out)
class ResNet(nn.Module):
def __init__(self, num_class=1000):
super().__init__()
# 前面几层普通卷积
self.pre = nn.Sequential(
nn.Conv2d(3, 64, 7, 2, 3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(3, 2, 1)
)
self.stages = nn.ModuleList([
self._make_layer(64, 128, 3),
self._make_layer(128, 256, 4, stride=2),
self._make_layer(256, 512, 6, stride=2),
self._make_layer(512, 1024, 3, stride=2)
])
self.fc = nn.Linear(1024, num_class)
def _make_layer(self, inchannel, outchannel, block_num, stride=1):
shortcut = nn.Sequential(
nn.Conv2d(inchannel, outchannel, 1, stride, bias=False),
nn.BatchNorm2d(outchannel)
)
layers = []
layers.append(ResidualBlock(inchannel, outchannel, stride, shortcut))
for i in range(1, block_num):
layers.append(ResidualBlock(outchannel, outchannel))
return nn.Sequential(*layers)
def forward(self, input):
x = self.pre(input)
x = self.stages[0](x)
out1 = self.stages[1](x)
out2 = self.stages[2](out1)
out3 = self.stages[3](out2)
x = F.avg_pool2d(out3, 7)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x, out1, out2, out3
def resnet17(pretrained):
model = ResNet(num_class=10)
if pretrained:
if isinstance(pretrained, str):
model.load_state_dict(torch.load(pretrained))
else:
raise Exception("resnet request a pretrained path. got [{}]".format(pretrained))
return model
def imshow(img):
img = img / 2 + 0.5
np_img = img.numpy()
plt.imshow(np.transpose(np_img, (1, 2, 0)))
if __name__ == "__main__":
model = resnet17(None)
x = torch.randn(1, 3, 416, 416)
_, out1, out2, out3 = model(x)
print(_.shape, out1.shape, out2.shape, out3.shape)
| [
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.functional.avg_pool2d",
"torch.load",
"torch.nn.Conv2d",
"numpy.transpose",
"torch.randn",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.functional.relu"
] | [((2616, 2643), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(416)', '(416)'], {}), '(1, 3, 416, 416)\n', (2627, 2643), False, 'import torch\n'), ((743, 754), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (749, 754), True, 'import torch.nn.functional as F\n'), ((1320, 1346), 'torch.nn.Linear', 'nn.Linear', (['(1024)', 'num_class'], {}), '(1024, num_class)\n', (1329, 1346), False, 'from torch import nn\n'), ((1790, 1812), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (1803, 1812), False, 'from torch import nn\n'), ((2020, 2041), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out3', '(7)'], {}), '(out3, 7)\n', (2032, 2041), True, 'import torch.nn.functional as F\n'), ((2519, 2550), 'numpy.transpose', 'np.transpose', (['np_img', '(1, 2, 0)'], {}), '(np_img, (1, 2, 0))\n', (2531, 2550), True, 'import numpy as np\n'), ((302, 360), 'torch.nn.Conv2d', 'nn.Conv2d', (['inchannel', 'outchannel', '(3)', 'stride', '(1)'], {'bias': '(False)'}), '(inchannel, outchannel, 3, stride, 1, bias=False)\n', (311, 360), False, 'from torch import nn\n'), ((374, 400), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['outchannel'], {}), '(outchannel)\n', (388, 400), False, 'from torch import nn\n'), ((414, 423), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (421, 423), False, 'from torch import nn\n'), ((437, 491), 'torch.nn.Conv2d', 'nn.Conv2d', (['outchannel', 'outchannel', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(outchannel, outchannel, 3, 1, 1, bias=False)\n', (446, 491), False, 'from torch import nn\n'), ((505, 531), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['outchannel'], {}), '(outchannel)\n', (519, 531), False, 'from torch import nn\n'), ((914, 951), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(7)', '(2)', '(3)'], {'bias': '(False)'}), '(3, 64, 7, 2, 3, bias=False)\n', (923, 951), False, 'from torch import nn\n'), ((965, 983), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (979, 983), False, 'from torch import nn\n'), ((997, 1006), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1004, 1006), False, 'from torch import nn\n'), ((1020, 1041), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (1032, 1041), False, 'from torch import nn\n'), ((1465, 1520), 'torch.nn.Conv2d', 'nn.Conv2d', (['inchannel', 'outchannel', '(1)', 'stride'], {'bias': '(False)'}), '(inchannel, outchannel, 1, stride, bias=False)\n', (1474, 1520), False, 'from torch import nn\n'), ((1534, 1560), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['outchannel'], {}), '(outchannel)\n', (1548, 1560), False, 'from torch import nn\n'), ((2288, 2310), 'torch.load', 'torch.load', (['pretrained'], {}), '(pretrained)\n', (2298, 2310), False, 'import torch\n')] |
#!/bin/env python
# -*- coding: utf-8 -*-
#
# Created on 14.01.21
#
# Created for py_bacy
#
# @author: <NAME>, <EMAIL>
#
# Copyright (C) {2021} {<NAME>}
# System modules
import logging
from typing import Iterable, Tuple, List, Dict, Any, Union
import os.path
from collections import OrderedDict
# External modules
import prefect
from prefect import task
import numpy as np
import xarray as xr
import pandas as pd
from tabulate import tabulate
from distributed import Client
# Internal modules
__all__ = [
'info_observations',
'info_assimilation'
]
@task
def info_observations(
first_guess: xr.DataArray,
observations: Union[xr.Dataset, Iterable[xr.Dataset]],
run_dir: str,
client: Client
):
if isinstance(observations, xr.Dataset):
observations = (observations, )
obs_equivalent, filtered_obs = apply_obs_operator(
first_guess=first_guess, observations=observations
)
statistics = get_obs_mean_statistics(
obs_equivalent=obs_equivalent, filtered_obs=filtered_obs
)
for name, df in statistics.items():
write_df(
df, run_dir=run_dir,
filename='info_obs_{0:s}.txt'.format(name)
)
@task
def info_assimilation(
analysis: xr.Dataset,
background: xr.Dataset,
run_dir: str,
assim_config: Dict[str, Any],
cycle_config: Dict[str, Any],
client: Client
):
analysis_mean = analysis.mean('ensemble')
background_mean = background.mean('ensemble')
impact = analysis_mean - background_mean
write_info_df(
impact, 'info_impact.txt', assim_config['assim_vars'], run_dir
)
write_info_df(
background_mean, 'info_background.txt', assim_config['assim_vars'],
run_dir
)
write_info_df(
analysis_mean, 'info_analysis.txt', assim_config['assim_vars'], run_dir
)
def apply_obs_operator(
first_guess: xr.DataArray,
observations: Iterable[xr.Dataset]
) -> Tuple[xr.DataArray, xr.DataArray]:
obs_equivalent = []
filtered_observations = []
for obs in observations:
try:
obs_equivalent.append(obs.obs.operator(obs, first_guess))
filtered_observations.append(obs['observations'])
except NotImplementedError:
pass
obs_equivalent = xr.concat(obs_equivalent, dim='obs_group')
filtered_obs = xr.concat(filtered_observations, dim='obs_group')
return obs_equivalent, filtered_obs
def write_df(
info_df: pd.DataFrame,
run_dir: str,
filename: str
) -> str:
try:
info_text = tabulate(info_df, headers='keys', tablefmt='psql') + '\n'
except TypeError as e:
raise e
out_dir = os.path.join(run_dir, 'output')
file_path_info = os.path.join(out_dir, filename)
with open(file_path_info, 'a+') as fh_info:
fh_info.write(info_text)
return file_path_info
def describe_diff_mean(arr):
avail_dims = arr.dims[1:]
abs_diff = np.abs(arr)
diff_mean = arr.mean(dim=avail_dims).to_pandas()
diff_min = arr.min(dim=avail_dims).to_pandas()
diff_max = arr.max(dim=avail_dims).to_pandas()
diff_std = arr.std(dim=avail_dims).to_pandas()
diff_rmse = np.sqrt((arr**2).mean(dim=avail_dims)).to_pandas()
diff_mae = abs_diff.mean(dim=avail_dims).to_pandas()
diff_per = arr.quantile(
[0.05, 0.1, 0.5, 0.9, 0.95], dim=avail_dims
).T.to_pandas()
diff_per.columns = ['5%', '10 %', 'median', '90 %', '95%']
diff_df = pd.DataFrame(
data={
'min': diff_min, 'mean': diff_mean, 'max': diff_max,
'std': diff_std, 'mae': diff_mae, 'rmse': diff_rmse,
},
)
diff_df = pd.concat([diff_df, diff_per], axis=1)
return diff_df
def get_obs_mean_statistics(
obs_equivalent: xr.DataArray,
filtered_obs: xr.DataArray
):
statistics = OrderedDict()
fg_mean = obs_equivalent.mean('ensemble')
innovation = filtered_obs - fg_mean
statistics['innov'] = describe_diff_mean(innovation)
diff_mean_time = innovation.stack(
group_time=['obs_group', 'time']
).transpose('group_time', 'obs_grid_1')
statistics['innov_timed'] = describe_diff_mean(diff_mean_time)
obs_time = filtered_obs.stack(
group_time=['obs_group', 'time']
).transpose('group_time', 'obs_grid_1')
statistics['obs_time'] = describe_diff_mean(obs_time)
fg_time = fg_mean.stack(
group_time=['obs_group', 'time']
).transpose('group_time', 'obs_grid_1')
statistics['fg_time'] = describe_diff_mean(fg_time)
return statistics
| [
"pandas.DataFrame",
"numpy.abs",
"xarray.concat",
"tabulate.tabulate",
"collections.OrderedDict",
"pandas.concat"
] | [((2344, 2386), 'xarray.concat', 'xr.concat', (['obs_equivalent'], {'dim': '"""obs_group"""'}), "(obs_equivalent, dim='obs_group')\n", (2353, 2386), True, 'import xarray as xr\n'), ((2406, 2455), 'xarray.concat', 'xr.concat', (['filtered_observations'], {'dim': '"""obs_group"""'}), "(filtered_observations, dim='obs_group')\n", (2415, 2455), True, 'import xarray as xr\n'), ((3009, 3020), 'numpy.abs', 'np.abs', (['arr'], {}), '(arr)\n', (3015, 3020), True, 'import numpy as np\n'), ((3529, 3658), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'min': diff_min, 'mean': diff_mean, 'max': diff_max, 'std': diff_std,\n 'mae': diff_mae, 'rmse': diff_rmse}"}), "(data={'min': diff_min, 'mean': diff_mean, 'max': diff_max,\n 'std': diff_std, 'mae': diff_mae, 'rmse': diff_rmse})\n", (3541, 3658), True, 'import pandas as pd\n'), ((3719, 3757), 'pandas.concat', 'pd.concat', (['[diff_df, diff_per]'], {'axis': '(1)'}), '([diff_df, diff_per], axis=1)\n', (3728, 3757), True, 'import pandas as pd\n'), ((3901, 3914), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3912, 3914), False, 'from collections import OrderedDict\n'), ((2626, 2676), 'tabulate.tabulate', 'tabulate', (['info_df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(info_df, headers='keys', tablefmt='psql')\n", (2634, 2676), False, 'from tabulate import tabulate\n')] |
import gc
import os
from datetime import timedelta
from typing import Dict, Any, List
import cv2
import numpy as np
import torch
import torch.distributed as dist
from fire import Fire
from omegaconf import OmegaConf
from pytorch_toolbelt.utils.distributed import is_main_process
from pytorch_toolbelt.utils import fs
from tqdm import tqdm
from xview3 import *
from xview3.centernet.visualization import create_false_color_composite, vis_detections_opencv
from xview3.constants import PIX_TO_M
from xview3.inference import (
predict_multilabel_scenes,
)
def run_multilabel_predict(config: Dict[str, Any], scenes: List[str], submission_dir: str):
model, checkpoints, box_coder = ensemble_from_config(config)
checkpoint = checkpoints[0]
normalization_op = build_normalization(checkpoint["checkpoint_data"]["config"]["normalization"])
channels = checkpoint["checkpoint_data"]["config"]["dataset"]["channels"]
channels_last = config["inference"]["channels_last"]
tile_size = config["inference"]["tile_size"]
tile_step = config["inference"]["tile_step"]
os.makedirs(submission_dir, exist_ok=True)
if config["inference"]["use_traced_model"]:
traced_model_path = os.path.join(submission_dir, "traced_ensemble.jit")
if os.path.exists(traced_model_path):
model = torch.jit.load(traced_model_path)
else:
with torch.no_grad():
if channels_last:
model = model.to(memory_format=torch.channels_last)
print("Using channels last format")
model = torch.jit.trace(
model,
example_inputs=torch.randn(1, len(channels), tile_size, tile_size).cuda(),
strict=False,
)
# if is_main_process():
# torch.jit.save(model, traced_model_path)
del checkpoints
gc.collect()
os.makedirs(submission_dir, exist_ok=True)
multi_score_test_predictions = predict_multilabel_scenes(
model=model,
box_coder=box_coder,
scenes=scenes,
channels=channels,
normalization=normalization_op,
output_predictions_dir=submission_dir,
save_raw_predictions=False,
apply_activation=False,
# Inference options
accumulate_on_gpu=config["inference"]["accumulate_on_gpu"],
tile_size=tile_size,
tile_step=tile_step,
batch_size=config["inference"]["batch_size"],
fp16=config["inference"]["fp16"],
channels_last=channels_last,
# Thresholds
objectness_thresholds_lower_bound=0.3,
max_objects=2048,
)
if is_main_process():
multi_score_test_predictions.to_csv(os.path.join(submission_dir, "unfiltered_predictions.csv"), index=False)
for thresholds in config["thresholds"]:
objectness_threshold = float(thresholds["objectness"])
vessel_threshold = float(thresholds["is_vessel"])
fishing_threshold = float(thresholds["is_fishing"])
test_predictions = apply_thresholds(multi_score_test_predictions, objectness_threshold, vessel_threshold, fishing_threshold)
test_predictions_fname = os.path.join(
submission_dir,
f"predictions_obj_{objectness_threshold:.3f}_vsl_{vessel_threshold:.3f}_fsh_{fishing_threshold:.3f}.csv",
)
test_predictions.to_csv(test_predictions_fname, index=False)
if True:
for scene_path in tqdm(scenes, desc="Making visualizations"):
scene_id = fs.id_from_fname(scene_path)
scene_df = test_predictions[test_predictions.scene_id == scene_id]
image = read_multichannel_image(scene_path, ["vv", "vh"])
normalize = SigmoidNormalization()
size_down_4 = image["vv"].shape[1] // 4, image["vv"].shape[0] // 4
image_rgb = create_false_color_composite(
normalize(image=cv2.resize(image["vv"], dsize=size_down_4, interpolation=cv2.INTER_AREA))["image"],
normalize(image=cv2.resize(image["vh"], dsize=size_down_4, interpolation=cv2.INTER_AREA))["image"],
)
image_rgb[~np.isfinite(image_rgb)] = 0
targets = XView3DataModule.get_multilabel_targets_from_df(scene_df)
centers = (targets.centers * 0.25).astype(int)
image_rgb = vis_detections_opencv(
image_rgb,
centers=centers,
lengths=XView3DataModule.decode_lengths(targets.lengths) / PIX_TO_M,
is_vessel_vec=targets.is_vessel,
is_fishing_vec=targets.is_fishing,
is_vessel_probs=None,
is_fishing_probs=None,
scores=targets.objectness_probs,
show_title=True,
alpha=0.1,
)
cv2.imwrite(os.path.join(submission_dir, scene_id + ".jpg"), image_rgb)
def main(
*images: List[str],
config: str = None,
output_dir: str = None,
local_rank=int(os.environ.get("LOCAL_RANK", 0)),
world_size=int(os.environ.get("WORLD_SIZE", 1))
):
if config is None:
raise ValueError("--config must be set")
if output_dir is None:
raise ValueError("--output_dir must be set")
if world_size > 1:
torch.distributed.init_process_group(backend="nccl", timeout=timedelta(hours=4))
torch.cuda.set_device(local_rank)
print("Initialized distributed inference", local_rank, world_size)
run_multilabel_predict(OmegaConf.load(config), scenes=images, submission_dir=output_dir)
if world_size > 1:
torch.distributed.barrier()
if __name__ == "__main__":
# Give no chance to randomness
torch.manual_seed(0)
np.random.seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
Fire(main)
| [
"numpy.random.seed",
"gc.collect",
"torch.no_grad",
"os.path.join",
"os.path.exists",
"xview3.inference.predict_multilabel_scenes",
"numpy.isfinite",
"datetime.timedelta",
"torch.cuda.set_device",
"cv2.resize",
"tqdm.tqdm",
"torch.manual_seed",
"pytorch_toolbelt.utils.fs.id_from_fname",
"p... | [((1091, 1133), 'os.makedirs', 'os.makedirs', (['submission_dir'], {'exist_ok': '(True)'}), '(submission_dir, exist_ok=True)\n', (1102, 1133), False, 'import os\n'), ((1917, 1929), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1927, 1929), False, 'import gc\n'), ((1935, 1977), 'os.makedirs', 'os.makedirs', (['submission_dir'], {'exist_ok': '(True)'}), '(submission_dir, exist_ok=True)\n', (1946, 1977), False, 'import os\n'), ((2014, 2529), 'xview3.inference.predict_multilabel_scenes', 'predict_multilabel_scenes', ([], {'model': 'model', 'box_coder': 'box_coder', 'scenes': 'scenes', 'channels': 'channels', 'normalization': 'normalization_op', 'output_predictions_dir': 'submission_dir', 'save_raw_predictions': '(False)', 'apply_activation': '(False)', 'accumulate_on_gpu': "config['inference']['accumulate_on_gpu']", 'tile_size': 'tile_size', 'tile_step': 'tile_step', 'batch_size': "config['inference']['batch_size']", 'fp16': "config['inference']['fp16']", 'channels_last': 'channels_last', 'objectness_thresholds_lower_bound': '(0.3)', 'max_objects': '(2048)'}), "(model=model, box_coder=box_coder, scenes=scenes,\n channels=channels, normalization=normalization_op,\n output_predictions_dir=submission_dir, save_raw_predictions=False,\n apply_activation=False, accumulate_on_gpu=config['inference'][\n 'accumulate_on_gpu'], tile_size=tile_size, tile_step=tile_step,\n batch_size=config['inference']['batch_size'], fp16=config['inference'][\n 'fp16'], channels_last=channels_last, objectness_thresholds_lower_bound\n =0.3, max_objects=2048)\n", (2039, 2529), False, 'from xview3.inference import predict_multilabel_scenes\n'), ((2691, 2708), 'pytorch_toolbelt.utils.distributed.is_main_process', 'is_main_process', ([], {}), '()\n', (2706, 2708), False, 'from pytorch_toolbelt.utils.distributed import is_main_process\n'), ((5889, 5909), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (5906, 5909), False, 'import torch\n'), ((5914, 5931), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5928, 5931), True, 'import numpy as np\n'), ((6025, 6035), 'fire.Fire', 'Fire', (['main'], {}), '(main)\n', (6029, 6035), False, 'from fire import Fire\n'), ((1211, 1262), 'os.path.join', 'os.path.join', (['submission_dir', '"""traced_ensemble.jit"""'], {}), "(submission_dir, 'traced_ensemble.jit')\n", (1223, 1262), False, 'import os\n'), ((1274, 1307), 'os.path.exists', 'os.path.exists', (['traced_model_path'], {}), '(traced_model_path)\n', (1288, 1307), False, 'import os\n'), ((5196, 5227), 'os.environ.get', 'os.environ.get', (['"""LOCAL_RANK"""', '(0)'], {}), "('LOCAL_RANK', 0)\n", (5210, 5227), False, 'import os\n'), ((5249, 5280), 'os.environ.get', 'os.environ.get', (['"""WORLD_SIZE"""', '(1)'], {}), "('WORLD_SIZE', 1)\n", (5263, 5280), False, 'import os\n'), ((5558, 5591), 'torch.cuda.set_device', 'torch.cuda.set_device', (['local_rank'], {}), '(local_rank)\n', (5579, 5591), False, 'import torch\n'), ((5695, 5717), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (5709, 5717), False, 'from omegaconf import OmegaConf\n'), ((5793, 5820), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (5818, 5820), False, 'import torch\n'), ((1329, 1362), 'torch.jit.load', 'torch.jit.load', (['traced_model_path'], {}), '(traced_model_path)\n', (1343, 1362), False, 'import torch\n'), ((2754, 2812), 'os.path.join', 'os.path.join', (['submission_dir', '"""unfiltered_predictions.csv"""'], {}), "(submission_dir, 'unfiltered_predictions.csv')\n", (2766, 2812), False, 'import os\n'), ((3245, 3388), 'os.path.join', 'os.path.join', (['submission_dir', 'f"""predictions_obj_{objectness_threshold:.3f}_vsl_{vessel_threshold:.3f}_fsh_{fishing_threshold:.3f}.csv"""'], {}), "(submission_dir,\n f'predictions_obj_{objectness_threshold:.3f}_vsl_{vessel_threshold:.3f}_fsh_{fishing_threshold:.3f}.csv'\n )\n", (3257, 3388), False, 'import os\n'), ((3549, 3591), 'tqdm.tqdm', 'tqdm', (['scenes'], {'desc': '"""Making visualizations"""'}), "(scenes, desc='Making visualizations')\n", (3553, 3591), False, 'from tqdm import tqdm\n'), ((1394, 1409), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1407, 1409), False, 'import torch\n'), ((3620, 3648), 'pytorch_toolbelt.utils.fs.id_from_fname', 'fs.id_from_fname', (['scene_path'], {}), '(scene_path)\n', (3636, 3648), False, 'from pytorch_toolbelt.utils import fs\n'), ((5530, 5548), 'datetime.timedelta', 'timedelta', ([], {'hours': '(4)'}), '(hours=4)\n', (5539, 5548), False, 'from datetime import timedelta\n'), ((5029, 5076), 'os.path.join', 'os.path.join', (['submission_dir', "(scene_id + '.jpg')"], {}), "(submission_dir, scene_id + '.jpg')\n", (5041, 5076), False, 'import os\n'), ((4285, 4307), 'numpy.isfinite', 'np.isfinite', (['image_rgb'], {}), '(image_rgb)\n', (4296, 4307), True, 'import numpy as np\n'), ((4036, 4108), 'cv2.resize', 'cv2.resize', (["image['vv']"], {'dsize': 'size_down_4', 'interpolation': 'cv2.INTER_AREA'}), "(image['vv'], dsize=size_down_4, interpolation=cv2.INTER_AREA)\n", (4046, 4108), False, 'import cv2\n'), ((4156, 4228), 'cv2.resize', 'cv2.resize', (["image['vh']"], {'dsize': 'size_down_4', 'interpolation': 'cv2.INTER_AREA'}), "(image['vh'], dsize=size_down_4, interpolation=cv2.INTER_AREA)\n", (4166, 4228), False, 'import cv2\n')] |
from re import L
import numpy as np
from games.maze.maze_game import MazeGame
from games.maze.maze_level import MazeLevel
from novelty_neat.generation import NeatLevelGenerator
from novelty_neat.types import LevelNeuralNet
from games.level import Level
import neat
class GenerateMazeLevelUsingOnePass(NeatLevelGenerator):
"""This generates a maze level from just the output of the network, i.e. the network must return
a list of length width * height.
"""
def __init__(self, game: MazeGame, number_of_random_variables: int = 2):
super().__init__(number_of_random_variables)
self.game = game
def generate_maze_level_using_one_pass(self, input: np.ndarray, net: LevelNeuralNet) -> Level:
"""Performs a single forward pass and uses that as the level (after some reshaping)
Args:
input (np.ndarray): The random inputs
net (LevelNeuralNet): The network that generates the levels.
Returns:
Level: The generated level.
"""
outputs = np.array(net.activate(input))
total_number_of_elements_expected = self.game.level.width * self.game.level.height
assert outputs.shape == (total_number_of_elements_expected,), f"Shape of One pass output should be {total_number_of_elements_expected}"
return MazeLevel.from_map((outputs.reshape((self.game.level.height, self.game.level.width)) > 0.5).astype(np.int32))
def generate_level(self, net: LevelNeuralNet, input: np.ndarray) -> Level:
return self.generate_maze_level_using_one_pass(input, net)
class GenerateMazeLevelsUsingTiling(NeatLevelGenerator):
def __init__(self, game: MazeGame, tile_size: int = 1, number_of_random_variables: int = 2,
should_add_coords: bool = False,
do_padding_randomly: bool = False,
should_start_with_full_level: bool = False,
random_perturb_size: float = 0,
do_empty_start_goal: bool = False,
reverse: int = 0
):
"""Generates levels using a tiling approach, i.e. moving through all of the cells, giving the network the surrounding ones and taking the prediction as the current tile.
Args:
game (MazeGame):
tile_size (int, optional): Not used. Must be 1. Defaults to 1.
number_of_random_variables (int, optional): The number of random variables to add to the network. Defaults to 2.
should_add_coords (bool, optional): If this is true, we append the normalised coordinates of the current cell to the input to the network. Defaults to False.
do_padding_randomly (bool, optional): If this is true, then we don't pad with -1s around the borders, but we instead make those random as well. Defaults to False.
should_start_with_full_level (bool, optional): The initial level, instead of being random, is completely filled with 1s. Defaults to False.
random_perturb_size (float, optional): If this is nonzero, all inputs to the net, including coordinates and surrounding tiles will be randomly perturbed by a gaussian (mean 0, variance 1)
multiplied by this value. Defaults to 0.
do_empty_start_goal (bool, optional): If True, then we make the start and end positions empty, no matter what the network predicts. Defaults to False
reverse: 0 -> normal
1 -> iterate from bottom right to top left instead of other way around
"""
super().__init__(number_of_random_variables)
self.game = game
self.subtile_width = tile_size
self.tile_size = 3 # tile_size
self.should_add_coords = should_add_coords
self.do_padding_randomly = do_padding_randomly
self.should_start_with_full_level = should_start_with_full_level
self.random_perturb_size = random_perturb_size
self.do_empty_start_goal = do_empty_start_goal
self.reverse = reverse
assert self.tile_size == 3, "Not supported for different tiles sizes yet."
def generate_level(self, net: LevelNeuralNet, input: np.ndarray) -> Level:
return self.generate_maze_level_using_tiling(input, net)
def generate_maze_level_using_tiling(self, input: np.ndarray, net: LevelNeuralNet) -> Level:
"""What we want to do here is to generate levels incrementally, i.e. we give the network 10 inputs,
8 adjacent tiles and the given random numbers, then we ask it to predict the current tile.
Ideally we would want subtiles to be predicted, but I won't do that now.
Args:
input (np.ndarray): The random numbers that act as input
net (LevelNeuralNet): The network that actually generates the level.
Must take in len(input) + self.tile_size ** 2 - 1 numbers and output a single one.
Returns:
Level:
"""
h, w = self.game.level.height, self.game.level.width
half_tile = self.tile_size // 2
# net = neat.nn.FeedForwardNetwork.create(genome, config)
if self.do_padding_randomly:
# Pad randomly, and don't make the edges special.
output = 1.0 * (np.random.rand(h + 2 * half_tile, w + 2 * half_tile) > 0.5)
else:
output = np.zeros((h + half_tile * 2, w + half_tile * 2)) - 1 # pad it
if self.should_start_with_full_level:
output[half_tile:-half_tile, half_tile:-half_tile] = np.ones((h, w)) # initial level
else:
output[half_tile:-half_tile, half_tile:-half_tile] = 1.0 * (np.random.rand(h, w) > 0.5) # initial level
input_list = list(input)
# assert output.sum() != 0
row_range = range(half_tile, h + half_tile)
col_range = range(half_tile, w + half_tile)
if self.reverse == 1:
row_range = reversed(row_range)
col_range = reversed(col_range)
# This is super important, as the reversed thing is a one use iterator!
# You cannot iterate multiple times!!!!!!!!!!
row_range = list(row_range)
col_range = list(col_range)
for row in row_range:
for col in col_range:
# get state
little_slice = output[row - half_tile: row + half_tile + 1, col - half_tile: col + half_tile + 1]
# This should be a 3x3 slice now.
assert little_slice.shape == (self.tile_size, self.tile_size)
total = self.tile_size * self.tile_size
little_slice = little_slice.flatten()
# Remove the middle element, which corresponds to the current cell.
little_slice_list = list(little_slice)
little_slice_list.pop(total//2)
assert len(little_slice_list) == total - 1, f"{len(little_slice)} != {total-1}"
# Add in random input.
little_slice_list.extend(input_list)
if self.should_add_coords:
# Normalised coords between 0 and 1.
little_slice_list.extend([
(row - half_tile) / (h - 1),
(col - half_tile) / (w - 1)
])
input_to_net = little_slice_list
assert len(input_to_net) == total -1 + self.number_of_random_variables + self.should_add_coords * 2
if self.random_perturb_size != 0:
# Perturb input randomly.
input_to_net = np.add(input_to_net, np.random.randn(len(input_to_net)) * self.random_perturb_size)
output_tile = net.activate(input_to_net)[0]
# Threshold
output[row, col] = (output_tile > 0.5) * 1.0
if self.do_empty_start_goal:
# If we empty the start and end, and we are either at the goal or start, make this tile 0.
if row == half_tile and col == half_tile or row == h + half_tile - 1 and col == w + half_tile - 1:
output[row, col] = 0
thresh = 0.5
# if np.any(output < -0.1): thresh = 0
# Take only relevant parts.
output = output[half_tile:-half_tile, half_tile:-half_tile]
assert output.shape == (h, w)
return MazeLevel.from_map((output > thresh).astype(np.int32))
def __repr__(self) -> str:
return f"{self.__class__.__name__}(tile_size={self.tile_size}, number_of_random_variables={self.number_of_random_variables}, should_add_coords={self.should_add_coords}, do_padding_randomly={self.do_padding_randomly}, random_perturb_size={self.random_perturb_size}, do_empty_start_goal={self.do_empty_start_goal}, reverse={self.reverse})"
class GenerateMazeLevelsUsingTilingVariableTileSize(NeatLevelGenerator):
def __init__(self, game: MazeGame, tile_size: int = 1, number_of_random_variables: int = 2,
do_padding_randomly: bool = False,
random_perturb_size: float = 0):
super().__init__(number_of_random_variables)
self.game = game
self.subtile_width = tile_size
self.tile_size = 3 # tile_size
assert self.tile_size == 3, "Not supported for different tiles sizes yet."
self.do_padding_randomly = do_padding_randomly
self.random_perturb_size = random_perturb_size
def generate_level(self, net: LevelNeuralNet, input: np.ndarray) -> Level:
return self.generate_maze_level_using_tiling_bigger_sizes(input, net)
def generate_maze_level_using_tiling_bigger_sizes(self, input: np.ndarray, net: LevelNeuralNet) -> Level:
"""This should generate levels on the same principles, but we should take bigger tiles. Thus, to predict the following:
a b e f i j
c d g h k l
m n x y q r
o p z w s t
1 2 5 6 9 A
3 4 7 8 B C
If we want to predict the 4 tiles x, y, z, w, we give the network all the shown tiles as context (even x, y, z, w).
Args:
input (np.ndarray): [description]
net (LevelNeuralNet): [description]
Returns:
Level: [description]
"""
size = self.subtile_width
h, w = self.game.level.height, self.game.level.width
half_tile = self.tile_size // 2 * size
if self.do_padding_randomly:
# Pad randomly, and don't make the edges special.
output = 1.0 * (np.random.rand(h + 2 * half_tile, w + 2 * half_tile) > 0.5)
else:
output = np.zeros((h + half_tile * 2, w + half_tile * 2)) - 1 # pad it
output[half_tile:-half_tile, half_tile:-half_tile] = 1.0 * (np.random.rand(h, w) > 0.5) # initial level
input_list = list(input)
assert output[half_tile:-half_tile, half_tile:-half_tile].sum() != 0
output[half_tile:-half_tile, half_tile:-half_tile] = 1*(output[half_tile:-half_tile, half_tile:-half_tile] > 0.5)
for row in range(half_tile, h + half_tile, size):
for col in range(half_tile, w + half_tile, size):
# little_slice = output[row - half_tile: row + half_tile + 1, col - half_tile: col + half_tile + 1]
little_slice = output[row - size: row + size * 2, col - size: col + size * 2]
assert little_slice.shape == (3 * size, 3 * size)
little_slice_list = list(little_slice.flatten())
little_slice_list.extend(input_list)
input_to_net = little_slice_list
if self.random_perturb_size != 0:
# Perturb input randomly.
input_to_net = np.add(input_to_net, np.random.randn(len(input_to_net)) * self.random_perturb_size)
output_tiles = np.array(net.activate(input_to_net)).reshape(size, size)
output[row: row + size, col: col + size] = output_tiles > 0.5
output = output[half_tile:-half_tile, half_tile:-half_tile]
assert output.shape == (h, w)
return MazeLevel.from_map((output > 0.5).astype(np.int32))
def __repr__(self) -> str:
return f"{self.__class__.__name__}(tile_size={self.tile_size}, number_of_random_variables={self.number_of_random_variables}, do_padding_randomly={self.do_padding_randomly}, random_perturb_size={self.random_perturb_size})"
class GenerateMazeLevelsUsingCPPNCoordinates(NeatLevelGenerator):
"""Generates a level from a CPPN using the coordinates of the cells as well as a random input.
Thus, we input (x, y, r1, r2, ..., rn) and get out a tile type.
"""
def __init__(self, game: MazeGame, number_of_random_variables: int, new_random_at_each_step:bool = False):
"""
Args:
game (MazeGame): The game to generate levels for
number_of_random_variables (int): How many random numbers need to be generated for one pass through the network.
new_random_at_each_step (bool, optional): If this is true, then we generate a new random number (or numbers)
for each cell we generate. Defaults to False.
"""
super().__init__(number_of_random_variables=number_of_random_variables)
self.game = game
self.new_random_at_each_step = new_random_at_each_step
def generate_level(self, net: LevelNeuralNet, input: np.ndarray) -> Level:
h, w = self.game.level.height, self.game.level.width
output = np.zeros((h, w))
random_list = list(input)
for row in range(h):
for col in range(w):
if self.new_random_at_each_step:
random_list = list(np.random.randn(self.number_of_random_variables))
# normalise first
r, c = row / (h-1), col / (w-1)
# make inputs between -1 and 1
r = (r - 0.5) * 2
c = (c - 0.5) * 2
input = [r, c] + random_list
output[row, col] = net.activate(input)[0]
# Threshold
thresh = 0.5
if np.any(output < -0.01): thresh = 0
return MazeLevel.from_map((output > thresh).astype(np.int32))
def __repr__(self) -> str:
return f"{self.__class__.__name__}(number_of_random_variables={self.number_of_random_variables}, new_random_at_each_step={self.new_random_at_each_step})"
class GenerateMazeLevelsUsingMoreContext(NeatLevelGenerator):
def __init__(self, game: MazeGame, context_size: int = 1, number_of_random_variables: int = 2,
do_padding_randomly: bool = False,
random_perturb_size: float = 0):
super().__init__(number_of_random_variables)
self.game = game
self.context_size = context_size
self.do_padding_randomly = do_padding_randomly
self.random_perturb_size = random_perturb_size
self.tile_size = 2 * context_size + 1
# assert self.tile_size == 5
def generate_level(self, net: LevelNeuralNet, input: np.ndarray) -> Level:
return self.generate_maze_level_using_tiling_bigger_sizes(input, net)
def generate_maze_level_using_tiling_bigger_sizes(self, input: np.ndarray, net: LevelNeuralNet) -> Level:
"""
"""
h, w = self.game.level.height, self.game.level.width
half_tile = self.tile_size // 2
if self.do_padding_randomly:
# Pad randomly, and don't make the edges special.
output = 1.0 * (np.random.rand(h + 2 * half_tile, w + 2 * half_tile) > 0.5)
else:
output = np.zeros((h + half_tile * 2, w + half_tile * 2)) - 1 # pad it
output[half_tile:-half_tile, half_tile:-half_tile] = 1.0 * (np.random.rand(h, w) > 0.5) # initial level
input_list = list(input)
assert output[half_tile:-half_tile, half_tile:-half_tile].sum() != 0
output[half_tile:-half_tile, half_tile:-half_tile] = 1*(output[half_tile:-half_tile, half_tile:-half_tile] > 0.5)
for row in range(half_tile, h + half_tile):
for col in range(half_tile, w + half_tile):
# get state
little_slice = output[row - half_tile: row + half_tile + 1, col - half_tile: col + half_tile + 1]
# This should be a 3x3 slice now.
assert little_slice.shape == (self.tile_size, self.tile_size)
total = self.tile_size * self.tile_size
little_slice = little_slice.flatten()
# Remove the middle element, which corresponds to the current cell.
little_slice_list = list(little_slice)
little_slice_list.pop(total//2)
assert len(little_slice_list) == total - 1, f"{len(little_slice)} != {total-1}"
# Add in random input.
little_slice_list.extend(input_list)
input_to_net = little_slice_list
assert len(input_to_net) == total -1 + self.number_of_random_variables
if self.random_perturb_size != 0:
# Perturb input randomly.
input_to_net = np.add(input_to_net, np.random.randn(len(input_to_net)) * self.random_perturb_size)
output_tile = net.activate(input_to_net)[0]
# Threshold
output[row, col] = (output_tile > 0.5) * 1.0
thresh = 0.5
# if np.any(output < -0.1): thresh = 0
# Take only relevant parts.
output = output[half_tile:-half_tile, half_tile:-half_tile]
assert output.shape == (h, w)
return MazeLevel.from_map((output > thresh).astype(np.int32))
def __repr__(self) -> str:
return f"{self.__class__.__name__}(tile_size={self.tile_size}, number_of_random_variables={self.number_of_random_variables}, do_padding_randomly={self.do_padding_randomly}, random_perturb_size={self.random_perturb_size}, context_size={self.context_size})"
if __name__ == "__main__":
g = GenerateMazeLevelsUsingTiling(MazeGame(MazeLevel()), tile_size=2)
g.generate_maze_level_using_tiling_bigger_sizes(np.random.randn(2), None)
pass | [
"numpy.random.randn",
"games.maze.maze_level.MazeLevel",
"numpy.zeros",
"numpy.ones",
"numpy.any",
"numpy.random.rand"
] | [((13722, 13738), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (13730, 13738), True, 'import numpy as np\n'), ((14334, 14356), 'numpy.any', 'np.any', (['(output < -0.01)'], {}), '(output < -0.01)\n', (14340, 14356), True, 'import numpy as np\n'), ((18416, 18434), 'numpy.random.randn', 'np.random.randn', (['(2)'], {}), '(2)\n', (18431, 18434), True, 'import numpy as np\n'), ((18337, 18348), 'games.maze.maze_level.MazeLevel', 'MazeLevel', ([], {}), '()\n', (18346, 18348), False, 'from games.maze.maze_level import MazeLevel\n'), ((5355, 5403), 'numpy.zeros', 'np.zeros', (['(h + half_tile * 2, w + half_tile * 2)'], {}), '((h + half_tile * 2, w + half_tile * 2))\n', (5363, 5403), True, 'import numpy as np\n'), ((5536, 5551), 'numpy.ones', 'np.ones', (['(h, w)'], {}), '((h, w))\n', (5543, 5551), True, 'import numpy as np\n'), ((10796, 10844), 'numpy.zeros', 'np.zeros', (['(h + half_tile * 2, w + half_tile * 2)'], {}), '((h + half_tile * 2, w + half_tile * 2))\n', (10804, 10844), True, 'import numpy as np\n'), ((15843, 15891), 'numpy.zeros', 'np.zeros', (['(h + half_tile * 2, w + half_tile * 2)'], {}), '((h + half_tile * 2, w + half_tile * 2))\n', (15851, 15891), True, 'import numpy as np\n'), ((5260, 5312), 'numpy.random.rand', 'np.random.rand', (['(h + 2 * half_tile)', '(w + 2 * half_tile)'], {}), '(h + 2 * half_tile, w + 2 * half_tile)\n', (5274, 5312), True, 'import numpy as np\n'), ((10701, 10753), 'numpy.random.rand', 'np.random.rand', (['(h + 2 * half_tile)', '(w + 2 * half_tile)'], {}), '(h + 2 * half_tile, w + 2 * half_tile)\n', (10715, 10753), True, 'import numpy as np\n'), ((10930, 10950), 'numpy.random.rand', 'np.random.rand', (['h', 'w'], {}), '(h, w)\n', (10944, 10950), True, 'import numpy as np\n'), ((15748, 15800), 'numpy.random.rand', 'np.random.rand', (['(h + 2 * half_tile)', '(w + 2 * half_tile)'], {}), '(h + 2 * half_tile, w + 2 * half_tile)\n', (15762, 15800), True, 'import numpy as np\n'), ((15977, 15997), 'numpy.random.rand', 'np.random.rand', (['h', 'w'], {}), '(h, w)\n', (15991, 15997), True, 'import numpy as np\n'), ((5662, 5682), 'numpy.random.rand', 'np.random.rand', (['h', 'w'], {}), '(h, w)\n', (5676, 5682), True, 'import numpy as np\n'), ((13923, 13971), 'numpy.random.randn', 'np.random.randn', (['self.number_of_random_variables'], {}), '(self.number_of_random_variables)\n', (13938, 13971), True, 'import numpy as np\n')] |
import numpy as np
from opytimizer.optimizers.evolutionary import iwo
from opytimizer.spaces import search
np.random.seed(0)
def test_iwo_params():
params = {
'min_seeds': 0,
'max_seeds': 5,
'e': 2,
'final_sigma': 0.001,
'init_sigma': 3
}
new_iwo = iwo.IWO(params=params)
assert new_iwo.min_seeds == 0
assert new_iwo.max_seeds == 5
assert new_iwo.e == 2
assert new_iwo.final_sigma == 0.001
assert new_iwo.init_sigma == 3
def test_iwo_params_setter():
new_iwo = iwo.IWO()
try:
new_iwo.min_seeds = 'a'
except:
new_iwo.min_seeds = 0
try:
new_iwo.min_seeds = -1
except:
new_iwo.min_seeds = 0
assert new_iwo.min_seeds == 0
try:
new_iwo.max_seeds = 'b'
except:
new_iwo.max_seeds = 2
try:
new_iwo.max_seeds = -1
except:
new_iwo.max_seeds = 2
assert new_iwo.max_seeds == 2
try:
new_iwo.e = 'c'
except:
new_iwo.e = 1.5
try:
new_iwo.e = -1
except:
new_iwo.e = 1.5
assert new_iwo.e == 1.5
try:
new_iwo.final_sigma = 'd'
except:
new_iwo.final_sigma = 1.5
try:
new_iwo.final_sigma = -1
except:
new_iwo.final_sigma = 1.5
assert new_iwo.final_sigma == 1.5
try:
new_iwo.init_sigma = 'e'
except:
new_iwo.init_sigma = 2.0
try:
new_iwo.init_sigma = -1
except:
new_iwo.init_sigma = 2.0
try:
new_iwo.init_sigma = 1.3
except:
new_iwo.init_sigma = 2.0
assert new_iwo.init_sigma == 2.0
try:
new_iwo.sigma = 'f'
except:
new_iwo.sigma = 1
assert new_iwo.sigma == 1
def test_iwo_spatial_dispersal():
new_iwo = iwo.IWO()
new_iwo._spatial_dispersal(1, 10)
assert new_iwo.sigma == 2.43019
def test_iwo_produce_offspring():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=2, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_iwo = iwo.IWO()
agent = new_iwo._produce_offspring(search_space.agents[0], square)
assert type(agent).__name__ == 'Agent'
def test_iwo_update():
def square(x):
return np.sum(x**2)
new_iwo = iwo.IWO()
new_iwo.min_seeds = 5
new_iwo.max_seeds = 20
search_space = search.SearchSpace(n_agents=5, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_iwo.update(search_space, square, 1, 10)
| [
"opytimizer.optimizers.evolutionary.iwo.IWO",
"numpy.sum",
"numpy.random.seed",
"opytimizer.spaces.search.SearchSpace"
] | [((109, 126), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (123, 126), True, 'import numpy as np\n'), ((306, 328), 'opytimizer.optimizers.evolutionary.iwo.IWO', 'iwo.IWO', ([], {'params': 'params'}), '(params=params)\n', (313, 328), False, 'from opytimizer.optimizers.evolutionary import iwo\n'), ((549, 558), 'opytimizer.optimizers.evolutionary.iwo.IWO', 'iwo.IWO', ([], {}), '()\n', (556, 558), False, 'from opytimizer.optimizers.evolutionary import iwo\n'), ((1807, 1816), 'opytimizer.optimizers.evolutionary.iwo.IWO', 'iwo.IWO', ([], {}), '()\n', (1814, 1816), False, 'from opytimizer.optimizers.evolutionary import iwo\n'), ((1996, 2087), 'opytimizer.spaces.search.SearchSpace', 'search.SearchSpace', ([], {'n_agents': '(2)', 'n_variables': '(2)', 'lower_bound': '[1, 1]', 'upper_bound': '[10, 10]'}), '(n_agents=2, n_variables=2, lower_bound=[1, 1],\n upper_bound=[10, 10])\n', (2014, 2087), False, 'from opytimizer.spaces import search\n'), ((2137, 2146), 'opytimizer.optimizers.evolutionary.iwo.IWO', 'iwo.IWO', ([], {}), '()\n', (2144, 2146), False, 'from opytimizer.optimizers.evolutionary import iwo\n'), ((2350, 2359), 'opytimizer.optimizers.evolutionary.iwo.IWO', 'iwo.IWO', ([], {}), '()\n', (2357, 2359), False, 'from opytimizer.optimizers.evolutionary import iwo\n'), ((2433, 2524), 'opytimizer.spaces.search.SearchSpace', 'search.SearchSpace', ([], {'n_agents': '(5)', 'n_variables': '(2)', 'lower_bound': '[1, 1]', 'upper_bound': '[10, 10]'}), '(n_agents=5, n_variables=2, lower_bound=[1, 1],\n upper_bound=[10, 10])\n', (2451, 2524), False, 'from opytimizer.spaces import search\n'), ((1963, 1977), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (1969, 1977), True, 'import numpy as np\n'), ((2322, 2336), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (2328, 2336), True, 'import numpy as np\n')] |
# NEI.py (stewi)
# !/usr/bin/env python3
# coding=utf-8
"""
Imports NEI data and processes to Standardized EPA output format.
Uses the NEI data exports from EIS. Must contain locally downloaded data for
options A:C.
This file requires parameters be passed like:
Option -Y Year
Options:
A - for downloading NEI Point data and
generating inventory files for StEWI:
flowbyfacility
flowbyprocess
flows
facilities
B - for downloading national totals for validation
Year:
2018
2017
2016
2015
2014
2013
2012
2011
"""
import pandas as pd
import numpy as np
import os
import argparse
import requests
import zipfile
import io
from esupy.processed_data_mgmt import download_from_remote
from esupy.util import strip_file_extension
from stewi.globals import data_dir,write_metadata, USton_kg,lb_kg,\
log, store_inventory, config, read_source_metadata,\
paths, aggregate, get_reliability_table_for_source, set_stewi_meta
from stewi.validate import update_validationsets_sources, validate_inventory,\
write_validation_result
_config = config()['databases']['NEI']
ext_folder = 'NEI Data Files'
nei_external_dir = paths.local_path + '/' + ext_folder + '/'
nei_data_dir = data_dir + 'NEI/'
def read_data(year,file):
"""
Reads the NEI data in the named file and returns a dataframe based on
identified columns
:param year : str, Year of NEI dataset for identifying field names
:param file : str, File path containing NEI data (parquet).
:returns df : DataFrame of NEI data from a single file
with standardized column names.
"""
nei_required_fields = pd.read_table(
nei_data_dir + 'NEI_required_fields.csv',sep=',')
nei_required_fields = nei_required_fields[[year,'StandardizedEPA']]
usecols = list(nei_required_fields[year].dropna())
df = pd.read_parquet(file, columns = usecols, engine = 'pyarrow')
# change column names to Standardized EPA names
df = df.rename(columns=pd.Series(list(nei_required_fields['StandardizedEPA']),
index=list(nei_required_fields[year])).to_dict())
return df
def standardize_output(year, source='Point'):
"""
Reads and parses NEI data
:param year : str, Year of NEI dataset
:returns nei: DataFrame of parsed NEI data.
"""
nei = pd.DataFrame()
# read in nei files and concatenate all nei files into one dataframe
nei_file_path = _config[year]['file_name']
for file in nei_file_path:
if(not(os.path.exists(nei_external_dir + file))):
log.info('%s not found in %s, downloading source data',
file, nei_external_dir)
# download source file and metadata
file_meta = set_stewi_meta(strip_file_extension(file))
file_meta.category = ext_folder
file_meta.tool = file_meta.tool.lower()
download_from_remote(file_meta, paths)
# concatenate all other files
log.info('reading NEI data from '+ nei_external_dir + file)
nei = pd.concat([nei,read_data(year, nei_external_dir + file)])
log.debug(str(len(nei))+' records')
# convert TON to KG
nei['FlowAmount'] = nei['FlowAmount']*USton_kg
log.info('adding Data Quality information')
if source == 'Point':
nei_reliability_table = get_reliability_table_for_source('NEI')
nei_reliability_table['Code'] = nei_reliability_table['Code'].astype(float)
nei['ReliabilityScore'] = nei['ReliabilityScore'].astype(float)
nei = nei.merge(nei_reliability_table, left_on='ReliabilityScore',
right_on='Code', how='left')
nei['DataReliability'] = nei['DQI Reliability Score']
# drop Code and DQI Reliability Score columns
nei = nei.drop(['Code', 'DQI Reliability Score',
'ReliabilityScore'], 1)
nei['Compartment']='air'
'''
# Modify compartment based on stack height (ft)
nei.loc[nei['StackHeight'] < 32, 'Compartment'] = 'air/ground'
nei.loc[(nei['StackHeight'] >= 32) & (nei['StackHeight'] < 164),
'Compartment'] = 'air/low'
nei.loc[(nei['StackHeight'] >= 164) & (nei['StackHeight'] < 492),
'Compartment'] = 'air/high'
nei.loc[nei['StackHeight'] >= 492, 'Compartment'] = 'air/very high'
'''
else:
nei['DataReliability'] = 3
# add Source column
nei['Source'] = source
nei.reset_index(drop=True)
return nei
def generate_national_totals(year):
"""
Downloads and parses pollutant national totals from 'Facility-level by
Pollutant' data downloaded from EPA website. Used for validation.
Creates NationalTotals.csv files.
:param year : str, Year of NEI data for comparison.
"""
log.info('Downloading national totals')
## generate url based on data year
build_url = _config['national_url']
version = _config['national_version'][year]
url = build_url.replace('__year__', year)
url = url.replace('__version__', version)
## make http request
r = []
try:
r = requests.Session().get(url, verify=False)
except requests.exceptions.ConnectionError:
log.error("URL Connection Error for " + url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
log.error('Error in URL request!')
## extract data from zip archive
z = zipfile.ZipFile(io.BytesIO(r.content))
# create a list of files contained in the zip archive
znames = z.namelist()
# retain only those files that are in .csv format
znames = [s for s in znames if '.csv' in s]
# initialize the dataframe
df = pd.DataFrame()
# for all of the .csv data files in the .zip archive,
# read the .csv files into a dataframe
# and concatenate with the master dataframe
# captures various column headings across years
usecols = ['pollutant code','pollutant_cd',
'pollutant desc','pollutant_desc', 'description',
'total emissions','total_emissions',
'emissions uom', 'uom'
]
for i in range(len(znames)):
headers = pd.read_csv(z.open(znames[i]),nrows=0)
cols = [x for x in headers.columns if x in usecols]
df = pd.concat([df, pd.read_csv(z.open(znames[i]),
usecols = cols)])
## parse data
# rename columns to match standard format
df.columns = ['FlowID', 'FlowName', 'FlowAmount', 'UOM']
# convert LB/TON to KG
df['FlowAmount'] = np.where(df['UOM']=='LB',
df['FlowAmount']*lb_kg,df['FlowAmount']*USton_kg)
df = df.drop(['UOM'],1)
# sum across all facilities to create national totals
df = df.groupby(['FlowID','FlowName'])['FlowAmount'].sum().reset_index()
# save national totals to .csv
df.rename(columns={'FlowAmount':'FlowAmount[kg]'}, inplace=True)
log.info('saving NEI_%s_NationalTotals.csv to %s', year, data_dir)
df.to_csv(data_dir+'NEI_'+year+'_NationalTotals.csv',index=False)
# Update validationSets_Sources.csv
validation_dict = {'Inventory':'NEI',
'Version':version,
'Year':year,
'Name':'<NAME>',
'URL':url,
'Criteria':'Data Summaries tab, Facility-level by '
'Pollutant zip file download, summed to national level',
}
update_validationsets_sources(validation_dict)
def validate_national_totals(nei_flowbyfacility, year):
"""downloads
"""
log.info('validating flow by facility against national totals')
if not(os.path.exists(data_dir + 'NEI_'+ year + '_NationalTotals.csv')):
generate_national_totals(year)
else:
log.info('using already processed national totals validation file')
nei_national_totals = pd.read_csv(data_dir + 'NEI_'+ year + \
'_NationalTotals.csv',
header=0,dtype={"FlowAmount[kg]":float})
nei_national_totals.rename(columns={'FlowAmount[kg]':'FlowAmount'},
inplace=True)
validation_result = validate_inventory(nei_flowbyfacility,
nei_national_totals,
group_by='flow', tolerance=5.0)
write_validation_result('NEI',year,validation_result)
def generate_metadata(year, datatype = 'inventory'):
"""
Gets metadata and writes to .json
"""
nei_file_path = _config[year]['file_name']
if datatype == 'inventory':
source_meta = []
for file in nei_file_path:
meta = set_stewi_meta(strip_file_extension(file), ext_folder)
source_meta.append(read_source_metadata(paths, meta, force_JSON=True))
write_metadata('NEI_'+year, source_meta, datatype=datatype)
def main(**kwargs):
parser = argparse.ArgumentParser(argument_default = argparse.SUPPRESS)
parser.add_argument('Option',
help = 'What do you want to do:\
[A] Download NEI data and \
generate StEWI inventory outputs and validate \
to national totals\
[B] Download national totals',
type = str)
parser.add_argument('-Y', '--Year', nargs = '+',
help = 'What NEI year(s) you want to retrieve',
type = str)
if len(kwargs) == 0:
kwargs = vars(parser.parse_args())
for year in kwargs['Year']:
if kwargs['Option'] == 'A':
nei_point = standardize_output(year)
log.info('generating flow by facility output')
nei_flowbyfacility = aggregate(nei_point, ['FacilityID','FlowName'])
store_inventory(nei_flowbyfacility,'NEI_'+year,'flowbyfacility')
log.debug(len(nei_flowbyfacility))
#2017: 2184786
#2016: 1965918
#2014: 2057249
#2011: 1840866
log.info('generating flow by SCC output')
nei_flowbyprocess = aggregate(nei_point, ['FacilityID',
'FlowName','Process'])
nei_flowbyprocess['ProcessType'] = 'SCC'
store_inventory(nei_flowbyprocess, 'NEI_'+year, 'flowbyprocess')
log.debug(len(nei_flowbyprocess))
#2017: 4055707
log.info('generating flows output')
nei_flows = nei_point[['FlowName', 'FlowID', 'Compartment']]
nei_flows = nei_flows.drop_duplicates()
nei_flows['Unit']='kg'
nei_flows = nei_flows.sort_values(by='FlowName',axis=0)
store_inventory(nei_flows, 'NEI_'+year, 'flow')
log.debug(len(nei_flows))
#2017: 293
#2016: 282
#2014: 279
#2011: 277
log.info('generating facility output')
facility = nei_point[['FacilityID', 'FacilityName', 'Address',
'City', 'State', 'Zip', 'Latitude',
'Longitude', 'NAICS', 'County']]
facility = facility.drop_duplicates('FacilityID')
facility = facility.astype({'Zip':'str'})
store_inventory(facility, 'NEI_'+year, 'facility')
log.debug(len(facility))
#2017: 87162
#2016: 85802
#2014: 85125
#2011: 95565
generate_metadata(year, datatype='inventory')
if year in ['2011','2014','2017']:
validate_national_totals(nei_flowbyfacility, year)
else:
log.info('no validation performed')
elif kwargs['Option'] == 'B':
if year in ['2011','2014','2017']:
generate_national_totals(year)
else:
log.info('national totals do not exist for year %s' % year)
if __name__ == '__main__':
main()
| [
"stewi.globals.write_metadata",
"stewi.globals.config",
"argparse.ArgumentParser",
"pandas.read_csv",
"stewi.validate.write_validation_result",
"esupy.util.strip_file_extension",
"pandas.read_table",
"pandas.DataFrame",
"stewi.globals.aggregate",
"requests.Session",
"os.path.exists",
"stewi.va... | [((1683, 1747), 'pandas.read_table', 'pd.read_table', (["(nei_data_dir + 'NEI_required_fields.csv')"], {'sep': '""","""'}), "(nei_data_dir + 'NEI_required_fields.csv', sep=',')\n", (1696, 1747), True, 'import pandas as pd\n'), ((1892, 1948), 'pandas.read_parquet', 'pd.read_parquet', (['file'], {'columns': 'usecols', 'engine': '"""pyarrow"""'}), "(file, columns=usecols, engine='pyarrow')\n", (1907, 1948), True, 'import pandas as pd\n'), ((2387, 2401), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2399, 2401), True, 'import pandas as pd\n'), ((3288, 3331), 'stewi.globals.log.info', 'log.info', (['"""adding Data Quality information"""'], {}), "('adding Data Quality information')\n", (3296, 3331), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((4874, 4913), 'stewi.globals.log.info', 'log.info', (['"""Downloading national totals"""'], {}), "('Downloading national totals')\n", (4882, 4913), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((5781, 5795), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5793, 5795), True, 'import pandas as pd\n'), ((6674, 6760), 'numpy.where', 'np.where', (["(df['UOM'] == 'LB')", "(df['FlowAmount'] * lb_kg)", "(df['FlowAmount'] * USton_kg)"], {}), "(df['UOM'] == 'LB', df['FlowAmount'] * lb_kg, df['FlowAmount'] *\n USton_kg)\n", (6682, 6760), True, 'import numpy as np\n'), ((7053, 7119), 'stewi.globals.log.info', 'log.info', (['"""saving NEI_%s_NationalTotals.csv to %s"""', 'year', 'data_dir'], {}), "('saving NEI_%s_NationalTotals.csv to %s', year, data_dir)\n", (7061, 7119), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((7613, 7659), 'stewi.validate.update_validationsets_sources', 'update_validationsets_sources', (['validation_dict'], {}), '(validation_dict)\n', (7642, 7659), False, 'from stewi.validate import update_validationsets_sources, validate_inventory, write_validation_result\n'), ((7752, 7815), 'stewi.globals.log.info', 'log.info', (['"""validating flow by facility against national totals"""'], {}), "('validating flow by facility against national totals')\n", (7760, 7815), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((8044, 8152), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + 'NEI_' + year + '_NationalTotals.csv')"], {'header': '(0)', 'dtype': "{'FlowAmount[kg]': float}"}), "(data_dir + 'NEI_' + year + '_NationalTotals.csv', header=0,\n dtype={'FlowAmount[kg]': float})\n", (8055, 8152), True, 'import pandas as pd\n'), ((8365, 8460), 'stewi.validate.validate_inventory', 'validate_inventory', (['nei_flowbyfacility', 'nei_national_totals'], {'group_by': '"""flow"""', 'tolerance': '(5.0)'}), "(nei_flowbyfacility, nei_national_totals, group_by='flow',\n tolerance=5.0)\n", (8383, 8460), False, 'from stewi.validate import update_validationsets_sources, validate_inventory, write_validation_result\n'), ((8547, 8602), 'stewi.validate.write_validation_result', 'write_validation_result', (['"""NEI"""', 'year', 'validation_result'], {}), "('NEI', year, validation_result)\n", (8570, 8602), False, 'from stewi.validate import update_validationsets_sources, validate_inventory, write_validation_result\n'), ((9118, 9177), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'argument_default': 'argparse.SUPPRESS'}), '(argument_default=argparse.SUPPRESS)\n', (9141, 9177), False, 'import argparse\n'), ((1125, 1133), 'stewi.globals.config', 'config', ([], {}), '()\n', (1131, 1133), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((3032, 3092), 'stewi.globals.log.info', 'log.info', (["('reading NEI data from ' + nei_external_dir + file)"], {}), "('reading NEI data from ' + nei_external_dir + file)\n", (3040, 3092), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((3390, 3429), 'stewi.globals.get_reliability_table_for_source', 'get_reliability_table_for_source', (['"""NEI"""'], {}), "('NEI')\n", (3422, 3429), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((5532, 5553), 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), '(r.content)\n', (5542, 5553), False, 'import io\n'), ((7827, 7891), 'os.path.exists', 'os.path.exists', (["(data_dir + 'NEI_' + year + '_NationalTotals.csv')"], {}), "(data_dir + 'NEI_' + year + '_NationalTotals.csv')\n", (7841, 7891), False, 'import os\n'), ((7950, 8017), 'stewi.globals.log.info', 'log.info', (['"""using already processed national totals validation file"""'], {}), "('using already processed national totals validation file')\n", (7958, 8017), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((9014, 9075), 'stewi.globals.write_metadata', 'write_metadata', (["('NEI_' + year)", 'source_meta'], {'datatype': 'datatype'}), "('NEI_' + year, source_meta, datatype=datatype)\n", (9028, 9075), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((2568, 2607), 'os.path.exists', 'os.path.exists', (['(nei_external_dir + file)'], {}), '(nei_external_dir + file)\n', (2582, 2607), False, 'import os\n'), ((2623, 2702), 'stewi.globals.log.info', 'log.info', (['"""%s not found in %s, downloading source data"""', 'file', 'nei_external_dir'], {}), "('%s not found in %s, downloading source data', file, nei_external_dir)\n", (2631, 2702), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((2947, 2985), 'esupy.processed_data_mgmt.download_from_remote', 'download_from_remote', (['file_meta', 'paths'], {}), '(file_meta, paths)\n', (2967, 2985), False, 'from esupy.processed_data_mgmt import download_from_remote\n'), ((5298, 5342), 'stewi.globals.log.error', 'log.error', (["('URL Connection Error for ' + url)"], {}), "('URL Connection Error for ' + url)\n", (5307, 5342), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((5431, 5465), 'stewi.globals.log.error', 'log.error', (['"""Error in URL request!"""'], {}), "('Error in URL request!')\n", (5440, 5465), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((9910, 9956), 'stewi.globals.log.info', 'log.info', (['"""generating flow by facility output"""'], {}), "('generating flow by facility output')\n", (9918, 9956), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((9990, 10038), 'stewi.globals.aggregate', 'aggregate', (['nei_point', "['FacilityID', 'FlowName']"], {}), "(nei_point, ['FacilityID', 'FlowName'])\n", (9999, 10038), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((10050, 10118), 'stewi.globals.store_inventory', 'store_inventory', (['nei_flowbyfacility', "('NEI_' + year)", '"""flowbyfacility"""'], {}), "(nei_flowbyfacility, 'NEI_' + year, 'flowbyfacility')\n", (10065, 10118), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((10283, 10324), 'stewi.globals.log.info', 'log.info', (['"""generating flow by SCC output"""'], {}), "('generating flow by SCC output')\n", (10291, 10324), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((10357, 10416), 'stewi.globals.aggregate', 'aggregate', (['nei_point', "['FacilityID', 'FlowName', 'Process']"], {}), "(nei_point, ['FacilityID', 'FlowName', 'Process'])\n", (10366, 10416), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((10535, 10601), 'stewi.globals.store_inventory', 'store_inventory', (['nei_flowbyprocess', "('NEI_' + year)", '"""flowbyprocess"""'], {}), "(nei_flowbyprocess, 'NEI_' + year, 'flowbyprocess')\n", (10550, 10601), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((10686, 10721), 'stewi.globals.log.info', 'log.info', (['"""generating flows output"""'], {}), "('generating flows output')\n", (10694, 10721), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((10962, 11011), 'stewi.globals.store_inventory', 'store_inventory', (['nei_flows', "('NEI_' + year)", '"""flow"""'], {}), "(nei_flows, 'NEI_' + year, 'flow')\n", (10977, 11011), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((11153, 11191), 'stewi.globals.log.info', 'log.info', (['"""generating facility output"""'], {}), "('generating facility output')\n", (11161, 11191), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((11532, 11584), 'stewi.globals.store_inventory', 'store_inventory', (['facility', "('NEI_' + year)", '"""facility"""'], {}), "(facility, 'NEI_' + year, 'facility')\n", (11547, 11584), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((2811, 2837), 'esupy.util.strip_file_extension', 'strip_file_extension', (['file'], {}), '(file)\n', (2831, 2837), False, 'from esupy.util import strip_file_extension\n'), ((5200, 5218), 'requests.Session', 'requests.Session', ([], {}), '()\n', (5216, 5218), False, 'import requests\n'), ((8883, 8909), 'esupy.util.strip_file_extension', 'strip_file_extension', (['file'], {}), '(file)\n', (8903, 8909), False, 'from esupy.util import strip_file_extension\n'), ((8954, 9004), 'stewi.globals.read_source_metadata', 'read_source_metadata', (['paths', 'meta'], {'force_JSON': '(True)'}), '(paths, meta, force_JSON=True)\n', (8974, 9004), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((11941, 11976), 'stewi.globals.log.info', 'log.info', (['"""no validation performed"""'], {}), "('no validation performed')\n", (11949, 11976), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n'), ((12164, 12223), 'stewi.globals.log.info', 'log.info', (["('national totals do not exist for year %s' % year)"], {}), "('national totals do not exist for year %s' % year)\n", (12172, 12223), False, 'from stewi.globals import data_dir, write_metadata, USton_kg, lb_kg, log, store_inventory, config, read_source_metadata, paths, aggregate, get_reliability_table_for_source, set_stewi_meta\n')] |
import numpy as np
from argparse import ArgumentParser
from distutils.util import strtobool
from tpp.models import ENCODER_NAMES, DECODER_NAMES
def parse_args(allow_unknown=False):
parser = ArgumentParser(allow_abbrev=False)
# Run configuration
parser.add_argument("--seed", type=int, default=0, help="The random seed.")
parser.add_argument("--padding-id", type=float, default=-1.,
help="The value used in the temporal sequences to "
"indicate a non-event.")
parser.add_argument('--disable-cuda', action='store_true',
help='Disable CUDA')
parser.add_argument('--verbose', action='store_true',
help='If `True`, prints all the things.')
# Simulator configuration
parser.add_argument("--mu", type=float, default=[0.05, 0.05],
nargs="+", metavar='N',
help="The baseline intensity for the data generator.")
parser.add_argument("--alpha", type=float, default=[0.1, 0.2, 0.2, 0.1],
nargs="+", metavar='N',
help="The event parameter for the data generator. "
"This will be reshaped into a matrix the size of "
"[mu,mu].")
parser.add_argument("--beta", type=float, default=[1.0, 1.0, 1.0, 1.0],
nargs="+", metavar='N',
help="The decay parameter for the data generator. "
"This will be reshaped into a matrix the size of "
"[mu,mu].")
parser.add_argument("--marks", type=int, default=None,
help="Generate a process with this many marks. "
"Defaults to `None`. If this is set to an "
"integer, it will override `alpha`, `beta` and "
"`mu` with randomly generated values "
"corresponding to the number of requested marks.")
parser.add_argument("--hawkes-seed", type=int, default=0,
help="The random seed for generating the `alpha, "
"`beta` and `mu` if `marks` is not `None`.")
parser.add_argument("--window", type=int, default=100,
help="The window of the simulated process.py. Also "
"taken as the window of any parametric Hawkes "
"model if chosen.")
parser.add_argument("--train-size", type=int, default=128,
help="The number of unique sequences in each of the "
"train dataset.")
parser.add_argument("--val-size", type=int, default=128,
help="The number of unique sequences in each of the "
"validation dataset.")
parser.add_argument("--test-size", type=int, default=128,
help="The number of unique sequences in each of the "
"test dataset.")
# Common model hyperparameters
parser.add_argument("--include-poisson",
type=lambda x: bool(strtobool(x)), default=True,
help="Include base intensity (where appropriate).")
parser.add_argument("--batch-size", type=int, default=32,
help="The batch size to use for parametric model"
" training and evaluation.")
parser.add_argument("--train-epochs", type=int, default=501,
help="The number of training epochs.")
parser.add_argument("--use-coefficients",
type=lambda x: bool(strtobool(x)), default=True,
help="If true, the modular process will be trained "
"with coefficients")
parser.add_argument("--multi-labels",
type=lambda x: bool(strtobool(x)), default=False,
help="Whether the likelihood is computed on "
"multi-labels events or not")
parser.add_argument("--time-scale", type=float, default=1.,
help='Time scale used to prevent overflow')
# Learning rate and patience parameters
parser.add_argument("--lr-rate-init", type=float, default=0.01,
help='initial learning rate for optimization')
parser.add_argument("--lr-poisson-rate-init", type=float, default=0.01,
help='initial poisson learning rate for optimization')
parser.add_argument("--lr-scheduler",
choices=['plateau', 'step', 'milestones', 'cos',
'findlr', 'noam', 'clr', 'calr'],
default='noam',
help='method to adjust learning rate')
parser.add_argument("--lr-scheduler-patience", type=int, default=10,
help='lr scheduler plateau: Number of epochs with no '
'improvement after which learning rate will be '
'reduced')
parser.add_argument("--lr-scheduler-step-size", type=int, default=10,
help='lr scheduler step: number of epochs of '
'learning rate decay.')
parser.add_argument("--lr-scheduler-gamma", type=float, default=0.5,
help='learning rate is multiplied by the gamma to '
'decrease it')
parser.add_argument("--lr-scheduler-warmup", type=int, default=10,
help='The number of epochs to linearly increase the '
'learning rate. (noam only)')
parser.add_argument("--patience", type=int, default=501,
help="The patience for early stopping.")
parser.add_argument("--loss-relative-tolerance", type=float,
default=None,
help="The relative factor that the loss needs to "
"decrease by in order to not contribute to "
"patience. If `None`, will not use numerical "
"convergence to control early stopping. Defaults "
"to `None`.")
parser.add_argument("--mu-cheat",
type=lambda x: bool(strtobool(x)), default=False,
help="If True, the starting mu value will be the "
"actual mu value. Defaults to False.")
# Encoder specific hyperparameters
parser.add_argument("--encoder", type=str, default="stub",
choices=ENCODER_NAMES,
help="The type of encoder to use.")
# Encoder - Fixed history
parser.add_argument("--encoder-history-size", type=int, default=3,
help="The (fixed) history length to use for fixed "
"history size parametric models.")
# Encoder - Variable history
parser.add_argument("--encoder-emb-dim", type=int, default=4,
help="Size of the embeddings. This is the size of the "
"temporal encoding and/or the label embedding if "
"either is used.")
parser.add_argument("--encoder-encoding", type=str, default="times_only",
choices=["times_only", "marks_only", "concatenate",
"temporal", "learnable",
"temporal_with_labels",
"learnable_with_labels"],
help="Type of the event encoding.")
parser.add_argument("--encoder-time-encoding", type=str,
default="relative", choices=["absolute", "relative"])
parser.add_argument("--encoder-temporal-scaling", type=float, default=1.,
help="Rescale of times when using temporal encoding")
parser.add_argument("--encoder-embedding-constraint", type=str,
default=None,
help="Constraint on the embeddings. Either `None`, "
"'nonneg', 'sigmoid', 'softplus'. "
"Defaults to `None`.")
# Encoder - MLP
parser.add_argument("--encoder-units-mlp", type=int, default=[],
nargs="+", metavar='N',
help="Size of hidden layers in the encoder MLP. "
"This will have the decoder input size appended "
"to it during model build.")
parser.add_argument("--encoder-dropout-mlp", type=float, default=0.,
help="Dropout rate of the MLP")
parser.add_argument("--encoder-activation-mlp", type=str, default="relu",
help="Activation function of the MLP")
parser.add_argument("--encoder-constraint-mlp", type=str, default=None,
help="Constraint on the mlp weights. Either `None`, "
"'nonneg', 'sigmoid', 'softplus'. "
"Defaults to `None`.")
parser.add_argument("--encoder-activation-final-mlp",
type=str, default=None,
help="Final activation function of the MLP.")
# Encoder - RNN/Transformer
parser.add_argument("--encoder-attn-activation",
type=str, default="softmax",
choices=["identity", "sigmoid", "softmax"],
help="Activation function of the attention "
"coefficients")
parser.add_argument("--encoder-dropout-rnn", type=float, default=0.,
help="Dropout rate of the RNN.")
parser.add_argument("--encoder-layers-rnn", type=int, default=1,
help="Number of layers for RNN and self-attention "
"encoder.")
parser.add_argument("--encoder-units-rnn", type=int, default=32,
help="Hidden size for RNN and self attention encoder.")
parser.add_argument("--encoder-n-heads", type=int, default=1,
help="Number of heads for the transformer")
parser.add_argument("--encoder-constraint-rnn", type=str, default=None,
help="Constraint on the rnn/sa weights. Either `None`,"
"'nonneg', 'sigmoid', 'softplus'. "
"Defaults to `None`.")
# Decoder specific hyperparameters
parser.add_argument("--decoder", type=str, default="hawkes",
choices=DECODER_NAMES,
help="The type of decoder to use.")
parser.add_argument("--decoder-mc-prop-est", type=float, default=1.,
help="Proportion of MC samples, "
"compared to dataset size")
parser.add_argument("--decoder-model-log-cm",
type=lambda x: bool(strtobool(x)), default=False,
help="Whether the cumulative models the log integral"
"or the integral")
parser.add_argument("--decoder-do-zero-subtraction",
type=lambda x: bool(strtobool(x)), default=True,
help="For cumulative estimation. If `True` the class "
"computes Lambda(tau) = f(tau) - f(0) "
"in order to enforce Lambda(0) = 0. Defaults to "
"`true`, where instead Lambda(tau) = f(tau).")
# Decoder - Variable history
parser.add_argument("--decoder-emb-dim", type=int, default=4,
help="Size of the embeddings. This is the size of the "
"temporal encoding and/or the label embedding if "
"either is used.")
parser.add_argument("--decoder-encoding", type=str, default="times_only",
choices=["times_only", "marks_only", "concatenate",
"temporal", "learnable",
"temporal_with_labels",
"learnable_with_labels"],
help="Type of the event decoding.")
parser.add_argument("--decoder-time-encoding", type=str,
default="relative", choices=["absolute", "relative"])
parser.add_argument("--decoder-temporal-scaling", type=float, default=1.,
help="Rescale of times when using temporal encoding")
parser.add_argument("--decoder-embedding-constraint", type=str,
default=None,
help="Constraint on the embeddings. Either `None`, "
"'nonneg', 'sigmoid', 'softplus'. "
"Defaults to `None`.")
# Decoder - MLP
parser.add_argument("--decoder-units-mlp", type=int, default=[],
nargs="+", metavar='N',
help="Size of hidden layers in the decoder MLP. "
"This will have the number of marks appended "
"to it during model build.")
parser.add_argument("--decoder-dropout-mlp", type=float, default=0.,
help="Dropout rate of the MLP")
parser.add_argument("--decoder-activation-mlp", type=str, default="relu",
help="Activation function of the MLP")
parser.add_argument("--decoder-activation-final-mlp",
type=str, default=None,
help="Final activation function of the MLP.")
parser.add_argument("--decoder-constraint-mlp", type=str, default=None,
help="Constraint on the mlp weights. Either `None`, "
"'nonneg', 'sigmoid', 'softplus'. "
"Defaults to `None`.")
# Decoder - RNN/Transformer
parser.add_argument("--decoder-attn-activation", type=str,
default="softmax",
choices=["identity", "sigmoid", "softmax"],
help="Activation function of the attention "
"coefficients")
parser.add_argument("--decoder-activation-rnn", type=str, default="relu",
help="Activation for the rnn.")
parser.add_argument("--decoder-dropout-rnn", type=float, default=0.,
help="Dropout rate of the RNN")
parser.add_argument("--decoder-layers-rnn", type=int, default=1,
help="Number of layers for self attention decoder.")
parser.add_argument("--decoder-units-rnn", type=int, default=32,
help="Hidden size for self attention decoder.")
parser.add_argument("--decoder-n-heads", type=int, default=1,
help="Number of heads for the transformer")
parser.add_argument("--decoder-constraint-rnn", type=str, default=None,
help="Constraint on the rnn/sa weights. Either `None`,"
"'nonneg', 'sigmoid', 'softplus'. "
"Defaults to `None`.")
# Decoder - MM
parser.add_argument("--decoder-n-mixture", type=int, default=32,
help="Number of mixtures for the log normal mixture"
"model")
# MLFlow
parser.add_argument("--no-mlflow", dest="use_mlflow", action="store_false",
help="Do not use MLflow (default=False)")
parser.add_argument("--experiment-name",
type=str,
default="Default",
help="Name of MLflow experiment")
parser.add_argument("--run-name", type=str, default="Default",
help="Name of MLflow run")
parser.add_argument("--remote-server-uri",
type=str,
default="http://192.168.4.94:1234/",
help="Remote MLflow server URI")
parser.add_argument("--logging-frequency",
type=int,
default="1",
help="The frequency to log values to MLFlow.")
# Load and save
parser.add_argument("--load-from-dir", type=str, default=None,
help="If not None, load data from a directory")
parser.add_argument("--save-model-freq", type=int, default=25,
help="The best model is saved every nth epoch")
parser.add_argument("--eval-metrics",
type=lambda x: bool(strtobool(x)), default=False,
help="The model is evaluated using several metrics")
parser.add_argument("--eval-metrics-per-class",
type=lambda x: bool(strtobool(x)), default=False,
help="The model is evaluated using several metrics "
"per class")
parser.add_argument("--plots-dir", type=str,
default="~/neural-tpps/plots",
help="Directory to save the plots")
parser.add_argument("--data-dir", type=str, default="~/neural-tpps/data",
help="Directory to save the preprocessed data")
if allow_unknown:
args = parser.parse_known_args()[0]
else:
args = parser.parse_args()
if args.marks is None:
args.mu = np.array(args.mu, dtype=np.float32)
args.alpha = np.array(args.alpha, dtype=np.float32).reshape(
args.mu.shape * 2)
args.beta = np.array(args.beta, dtype=np.float32).reshape(
args.mu.shape * 2)
args.marks = len(args.mu)
else:
np.random.seed(args.hawkes_seed)
args.mu = np.random.uniform(
low=0.01, high=0.2, size=[args.marks]).astype(dtype=np.float32)
args.alpha = np.random.uniform(
low=0.01, high=0.2, size=[args.marks] * 2).astype(dtype=np.float32)
args.beta = np.random.uniform(
low=1.01, high=1.3, size=[args.marks] * 2).astype(dtype=np.float32)
args.mu /= float(args.marks)
args.alpha /= float(args.marks)
return args
| [
"numpy.random.uniform",
"numpy.random.seed",
"argparse.ArgumentParser",
"distutils.util.strtobool",
"numpy.array"
] | [((198, 232), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'allow_abbrev': '(False)'}), '(allow_abbrev=False)\n', (212, 232), False, 'from argparse import ArgumentParser\n'), ((17560, 17595), 'numpy.array', 'np.array', (['args.mu'], {'dtype': 'np.float32'}), '(args.mu, dtype=np.float32)\n', (17568, 17595), True, 'import numpy as np\n'), ((17846, 17878), 'numpy.random.seed', 'np.random.seed', (['args.hawkes_seed'], {}), '(args.hawkes_seed)\n', (17860, 17878), True, 'import numpy as np\n'), ((17617, 17655), 'numpy.array', 'np.array', (['args.alpha'], {'dtype': 'np.float32'}), '(args.alpha, dtype=np.float32)\n', (17625, 17655), True, 'import numpy as np\n'), ((17716, 17753), 'numpy.array', 'np.array', (['args.beta'], {'dtype': 'np.float32'}), '(args.beta, dtype=np.float32)\n', (17724, 17753), True, 'import numpy as np\n'), ((17897, 17953), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.01)', 'high': '(0.2)', 'size': '[args.marks]'}), '(low=0.01, high=0.2, size=[args.marks])\n', (17914, 17953), True, 'import numpy as np\n'), ((18013, 18073), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.01)', 'high': '(0.2)', 'size': '([args.marks] * 2)'}), '(low=0.01, high=0.2, size=[args.marks] * 2)\n', (18030, 18073), True, 'import numpy as np\n'), ((18132, 18192), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1.01)', 'high': '(1.3)', 'size': '([args.marks] * 2)'}), '(low=1.01, high=1.3, size=[args.marks] * 2)\n', (18149, 18192), True, 'import numpy as np\n'), ((3223, 3235), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (3232, 3235), False, 'from distutils.util import strtobool\n'), ((3740, 3752), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (3749, 3752), False, 'from distutils.util import strtobool\n'), ((3982, 3994), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (3991, 3994), False, 'from distutils.util import strtobool\n'), ((6424, 6436), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (6433, 6436), False, 'from distutils.util import strtobool\n'), ((11087, 11099), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (11096, 11099), False, 'from distutils.util import strtobool\n'), ((11344, 11356), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (11353, 11356), False, 'from distutils.util import strtobool\n'), ((16737, 16749), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (16746, 16749), False, 'from distutils.util import strtobool\n'), ((16940, 16952), 'distutils.util.strtobool', 'strtobool', (['x'], {}), '(x)\n', (16949, 16952), False, 'from distutils.util import strtobool\n')] |
#! /usr/bin/env python
import argparse,os,subprocess
from wpipe import *
from stips.observation_module import ObservationModule
import numpy as np
filtdict = {'R':'R062',
'Z':'Z087',
'Y':'Y106',
'J':'J129',
'H':'H158',
'F':'F184'}
def register(PID,task_name):
myPipe = Pipeline.get(PID)
myTask = Task(task_name,myPipe).create()
_t = Task.add_mask(myTask,'*','start',task_name)
_t = Task.add_mask(myTask,'*','new_stips_catalog','*')
return
def hyak_stips(job_id,event_id,dp_id,stips_script):
myJob = Job.get(job_id)
myPipe = Pipeline.get(int(myJob.pipeline_id))
catalogID = Options.get('event',event_id)['dp_id']
catalogDP = DataProduct.get(int(catalogID))
myTarget = Target.get(int(catalogDP.target_id))
myConfig = Configuration.get(int(catalogDP.config_id))
myParams = Parameters.getParam(int(myConfig.config_id))
fileroot = str(catalogDP.relativepath)
filename = str(catalogDP.filename) # for example, Mixed_h15_shell_3Mpc_Z.tbl
filtroot = filename.split('_')[-1].split('.')[0]
filtername = filtdict[filtroot]
slurmfile = stips_script+'.slurm'
with open(slurmfile, 'w') as f:
f.write('#!/bin/bash' + '\n'+
'## Job Name' + '\n'+
'#SBATCH --job-name=stips'+str(dp_id) + '\n'+
'## Allocation Definition ' + '\n'+
'#SBATCH --account=astro' + '\n'+
'#SBATCH --partition=astro' + '\n'+
'## Resources' + '\n'+
'## Nodes' + '\n'+
'#SBATCH --ntasks=1' + '\n'+
'## Walltime (3 hours)' + '\n'+
'#SBATCH --time=10:00:00' + '\n'+
'## Memory per node' + '\n'+
'#SBATCH --mem=10G' + '\n'+
'## Specify the working directory for this job' + '\n'+
'#SBATCH --workdir='+myConfig.procpath + '\n'+
'##turn on e-mail notification' + '\n'+
'#SBATCH --mail-type=ALL' + '\n'+
'#SBATCH --mail-user=<EMAIL>' + '\n'+
'source activate forSTIPS'+'\n'+
'python2.7 '+stips_script)
subprocess.run(['sbatch',slurmfile],cwd=myConfig.procpath)
def run_stips(job_id,event_id,dp_id,run_id):
myJob = Job.get(job_id)
myPipe = Pipeline.get(int(myJob.pipeline_id))
catalogID = dp_id
catalogDP = DataProduct.get(int(catalogID))
myTarget = Target.get(int(catalogDP.target_id))
myConfig = Configuration.get(int(catalogDP.config_id))
myParams = Parameters.getParam(int(myConfig.config_id))
fileroot = str(catalogDP.relativepath)
filename = str(catalogDP.filename) # for example, Mixed_h15_shell_3Mpc_Z.tbl
filtroot = filename.split('_')[-1].split('.')[0]
filtername = filtdict[filtroot]
filename = fileroot+'/'+filename
seed = np.random.randint(9999)+1000
with open(filename) as myfile:
head = [next(myfile) for x in range(3)]
pos = head[2].split(' ')
crud,ra = pos[2].split('(')
dec,crud = pos[4].split(')')
print("Running ",filename,ra,dec)
print("SEED ",seed)
scene_general = {'ra': myParams['racent'],'dec': myParams['deccent'], 'pa': 0.0, 'seed': seed}
obs = {'instrument': 'WFI', 'filters': [filtername], 'detectors': 1,'distortion': False, 'oversample': myParams['oversample'], 'pupil_mask': '', 'background': myParams['background_val'], 'observations_id': str(dp_id), 'exptime': myParams['exptime'], 'offsets': [{'offset_id': str(run_id), 'offset_centre': False,'offset_ra': 0.0, 'offset_dec': 0.0, 'offset_pa': 0.0}]}
obm = ObservationModule(obs, scene_general=scene_general)
obm.nextObservation()
source_count_catalogues = obm.addCatalogue(str(filename))
psf_file = obm.addError()
fits_file, mosaic_file, params = obm.finalize(mosaic=False)
#dp_opt = Parameters.getParam(myConfig.config_id) # Attach config params used tp run sim to the DP
_dp = DataProduct(filename=fits_file,relativepath=fileroot,group='proc',subtype='stips_image',filtername=filtername,ra=myParams['racent'], dec=myParams['deccent'],configuration=myConfig).create()
def parse_all():
parser = argparse.ArgumentParser()
parser.add_argument('--R','-R', dest='REG', action='store_true',
help='Specify to Register')
parser.add_argument('--P','-p',type=int, dest='PID',
help='Pipeline ID')
parser.add_argument('--N','-n',type=str, dest='task_name',
help='Name of Task to be Registered')
parser.add_argument('--E','-e',type=int, dest='event_id',
help='Event ID')
parser.add_argument('--J','-j',type=int, dest='job_id',
help='Job ID')
parser.add_argument('--DP','-dp',type=int, dest='dp_id',
help='Dataproduct ID')
return parser.parse_args()
if __name__ == '__main__':
args = parse_all()
if args.REG:
_t = register(int(args.PID),str(args.task_name))
else:
job_id = int(args.job_id)
event_id = int(args.event_id)
event = Event.get(event_id)
dp_id = Options.get('event',event_id)['dp_id']
parent_job_id = int(event.job_id)
compname = Options.get('event',event_id)['name']
update_option = int(Options.get('job',parent_job_id)[compname])
run_stips(job_id,event_id,dp_id,update_option)
update_option = update_option+1
_update = Options.addOption('job',parent_job_id,compname,update_option)
to_run = int(Options.get('event',event_id)['to_run'])
completed = update_option
thisjob = Job.get(job_id)
catalogID = Options.get('event',event_id)['dp_id']
catalogDP = DataProduct.get(int(catalogID))
thisconf = Configuration.get(int(catalogDP.config_id))
myTarget = Target.get(int(thisconf.target_id))
print(''.join(["Completed ",str(completed)," of ",str(to_run)]))
logprint(thisconf,thisjob,''.join(["Completed ",str(completed)," of ",str(to_run),"\n"]))
if (completed>=to_run):
logprint(thisconf,thisjob,''.join(["Completed ",str(completed)," and to run is ",str(to_run)," firing event\n"]))
DP = DataProduct.get(int(dp_id))
tid = int(DP.target_id)
#image_dps = DataProduct.get({relativepath==config.procpath,subtype=='stips_image'})
path = thisconf.procpath
image_dps=Store().select('data_products', where='target_id=='+str(tid)+' & subtype=='+'"stips_image"')
comp_name = 'completed'+myTarget['name']
options = {comp_name:0}
_opt = Options(options).create('job',job_id)
total = len(image_dps)
#print(image_dps(0))
for index, dps in image_dps.iterrows():
print(dps)
dpid = int(dps.dp_id)
newevent = Job.getEvent(thisjob,'stips_done',options={'target_id':tid,'dp_id':dpid,'name':comp_name,'to_run':total})
fire(newevent)
logprint(thisconf,thisjob,'stips_done\n')
logprint(thisconf,thisjob,''.join(["Event= ",str(event.event_id)]))
| [
"subprocess.run",
"stips.observation_module.ObservationModule",
"numpy.random.randint",
"argparse.ArgumentParser"
] | [((2151, 2211), 'subprocess.run', 'subprocess.run', (["['sbatch', slurmfile]"], {'cwd': 'myConfig.procpath'}), "(['sbatch', slurmfile], cwd=myConfig.procpath)\n", (2165, 2211), False, 'import argparse, os, subprocess\n'), ((3571, 3622), 'stips.observation_module.ObservationModule', 'ObservationModule', (['obs'], {'scene_general': 'scene_general'}), '(obs, scene_general=scene_general)\n', (3588, 3622), False, 'from stips.observation_module import ObservationModule\n'), ((4137, 4162), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4160, 4162), False, 'import argparse, os, subprocess\n'), ((2831, 2854), 'numpy.random.randint', 'np.random.randint', (['(9999)'], {}), '(9999)\n', (2848, 2854), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pylab as plt
class KalmanFilter(object):
def __init__(self, initial_position=(0, 0)):
self.delta_t = 1
self.P = np.identity(4)
self.Q = np.identity(4)
self.R = np.identity(2)
self.H = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
self.A = np.identity(4)
self.A[0, 2] = self.delta_t
self.A[1, 3] = self.delta_t
self.A_predict = np.identity(4)
self.A_predict[0, 2] = self.delta_t / 2
self.A_predict[1, 3] = self.delta_t / 2
self.x_before = np.array([[initial_position[0], initial_position[1], 0, 0]])
self.x_before = self.x_before.transpose()
# Initial current priori state estimate and posteriori state estimate
self.pre_before = None
self.pre_current = None
self.cor_current = None
self.save_list = (0, 1)
def run(self, new_data, show_priori=False, show_posteriori=False):
# Predict
x_pre = self.predict(mode=1)
P_pre = np.dot(np.dot(self.A, self.P), self.A.transpose()) + self.Q
# Correct
k1 = np.dot(P_pre, self.H.transpose())
k2 = np.dot(self.H, k1) + self.R
K = np.dot(k1, np.linalg.inv(k2))
z = np.array([new_data]).transpose()
x_cor = x_pre + np.dot(K, z - np.dot(self.H, x_pre))
self.P = np.dot(np.identity(self.P.shape[0]) - np.dot(K, self.H), P_pre)
self.x_before = x_cor
if show_priori:
print(x_pre.transpose())
if show_posteriori:
print(x_cor)
self.cor_current = [x_cor[i, 0] for i in self.save_list]
return self.pre_current, self.cor_current
def predict(self, mode=0, replace=False):
# Predict
if mode == 0:
x_pre = np.dot(self.A_predict, self.x_before)
else:
x_pre = np.dot(self.A, self.x_before)
# replace x_before by predict value
if replace:
self.x_before = x_pre
self.pre_before = self.pre_current
self.pre_current = [x_pre[i, 0] for i in self.save_list]
return x_pre
def get_priori_estimate(self, reset=True, t=0):
if t == 0:
z_pri = self.pre_current
if reset:
self.pre_current = None
elif t == -1:
z_pri = self.pre_before
if reset:
self.pre_before = None
return z_pri
def get_posteriori_estimate(self):
z_cor = self.cor_current
self.cor_current = None
return z_cor
def show(samples, pre_list, cor_list, show_priori=True, show_posteriori=True):
x = [samples[i][0] for i in range(len(samples))]
y = [samples[i][1] for i in range(len(samples))]
plt.plot(x, y, label='Actual Track')
plt.scatter(x, y)
if show_priori:
x = [round(pre_list[i][0], 3) for i in range(len(pre_list))]
y = [round(pre_list[i][1], 3) for i in range(len(pre_list))]
plt.plot(x, y, 'r', label='Priori state estimate')
plt.scatter(x, y, c='r')
if show_posteriori:
x = [round(cor_list[i][0], 3) for i in range(len(cor_list))]
y = [round(cor_list[i][1], 3) for i in range(len(cor_list))]
plt.plot(x, y, 'g', label='Posteriori state estimate')
plt.scatter(x, y, c='g')
plt.legend()
plt.show()
if __name__ == "__main__":
t = np.linspace(0, 10, 100)
x = np.sin(t)
x_pre_list = []
x_cor_list = []
x_samples = []
my_filter = KalmanFilter(initial_position=[t[0], x[0]])
import time
t0 = time.time()
for i in range(len(t)):
a, b = my_filter.run([t[i], x[i]])
x_samples.append([t[i], x[i]])
x_pre_list.append(a)
x_cor_list.append(b)
#my_filter.predict()
for i in range(10):
pass
#my_filter.run(my_filter.x_cor_list[-1])
t = time.time() - t0
print(t)
show(x_samples, x_pre_list, x_cor_list, show_posteriori=False)
| [
"matplotlib.pylab.scatter",
"matplotlib.pylab.legend",
"numpy.identity",
"time.time",
"matplotlib.pylab.plot",
"numpy.sin",
"numpy.array",
"numpy.linalg.inv",
"numpy.linspace",
"numpy.dot",
"matplotlib.pylab.show"
] | [((2840, 2876), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {'label': '"""Actual Track"""'}), "(x, y, label='Actual Track')\n", (2848, 2876), True, 'import matplotlib.pylab as plt\n'), ((2882, 2899), 'matplotlib.pylab.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (2893, 2899), True, 'import matplotlib.pylab as plt\n'), ((3429, 3441), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (3439, 3441), True, 'import matplotlib.pylab as plt\n'), ((3447, 3457), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (3455, 3457), True, 'import matplotlib.pylab as plt\n'), ((3499, 3522), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (3510, 3522), True, 'import numpy as np\n'), ((3532, 3541), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (3538, 3541), True, 'import numpy as np\n'), ((3692, 3703), 'time.time', 'time.time', ([], {}), '()\n', (3701, 3703), False, 'import time\n'), ((178, 192), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (189, 192), True, 'import numpy as np\n'), ((211, 225), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (222, 225), True, 'import numpy as np\n'), ((244, 258), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (255, 258), True, 'import numpy as np\n'), ((277, 315), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0]])\n', (285, 315), True, 'import numpy as np\n'), ((334, 348), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (345, 348), True, 'import numpy as np\n'), ((449, 463), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (460, 463), True, 'import numpy as np\n'), ((589, 649), 'numpy.array', 'np.array', (['[[initial_position[0], initial_position[1], 0, 0]]'], {}), '([[initial_position[0], initial_position[1], 0, 0]])\n', (597, 649), True, 'import numpy as np\n'), ((3072, 3122), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y', '"""r"""'], {'label': '"""Priori state estimate"""'}), "(x, y, 'r', label='Priori state estimate')\n", (3080, 3122), True, 'import matplotlib.pylab as plt\n'), ((3132, 3156), 'matplotlib.pylab.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""r"""'}), "(x, y, c='r')\n", (3143, 3156), True, 'import matplotlib.pylab as plt\n'), ((3333, 3387), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y', '"""g"""'], {'label': '"""Posteriori state estimate"""'}), "(x, y, 'g', label='Posteriori state estimate')\n", (3341, 3387), True, 'import matplotlib.pylab as plt\n'), ((3397, 3421), 'matplotlib.pylab.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""g"""'}), "(x, y, c='g')\n", (3408, 3421), True, 'import matplotlib.pylab as plt\n'), ((4005, 4016), 'time.time', 'time.time', ([], {}), '()\n', (4014, 4016), False, 'import time\n'), ((1204, 1222), 'numpy.dot', 'np.dot', (['self.H', 'k1'], {}), '(self.H, k1)\n', (1210, 1222), True, 'import numpy as np\n'), ((1256, 1273), 'numpy.linalg.inv', 'np.linalg.inv', (['k2'], {}), '(k2)\n', (1269, 1273), True, 'import numpy as np\n'), ((1849, 1886), 'numpy.dot', 'np.dot', (['self.A_predict', 'self.x_before'], {}), '(self.A_predict, self.x_before)\n', (1855, 1886), True, 'import numpy as np\n'), ((1923, 1952), 'numpy.dot', 'np.dot', (['self.A', 'self.x_before'], {}), '(self.A, self.x_before)\n', (1929, 1952), True, 'import numpy as np\n'), ((1068, 1090), 'numpy.dot', 'np.dot', (['self.A', 'self.P'], {}), '(self.A, self.P)\n', (1074, 1090), True, 'import numpy as np\n'), ((1290, 1310), 'numpy.array', 'np.array', (['[new_data]'], {}), '([new_data])\n', (1298, 1310), True, 'import numpy as np\n'), ((1410, 1438), 'numpy.identity', 'np.identity', (['self.P.shape[0]'], {}), '(self.P.shape[0])\n', (1421, 1438), True, 'import numpy as np\n'), ((1441, 1458), 'numpy.dot', 'np.dot', (['K', 'self.H'], {}), '(K, self.H)\n', (1447, 1458), True, 'import numpy as np\n'), ((1362, 1383), 'numpy.dot', 'np.dot', (['self.H', 'x_pre'], {}), '(self.H, x_pre)\n', (1368, 1383), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Tutorial: Creating Visuals
==========================
02. Making physical measurements
--------------------------------
In the last tutorial we created a simple Visual subclass that draws a
rectangle. In this tutorial, we will make two additions:
1. Draw a rectangular border instead of a solid rectangle
2. Make the border a fixed pixel width, even when displayed inside a
user-zoomable ViewBox.
The border is made by drawing a line_strip with 10 vertices::
1--------------3
| |
| 2------4 | [ note that points 9 and 10 are
| | | | the same as points 1 and 2 ]
| 8------6 |
| |
7--------------5
In order to ensure that the border has a fixed width in pixels, we need to
adjust the spacing between the inner and outer rectangles whenever the user
changes the zoom of the ViewBox.
How? Recall that each
time the visual is drawn, it is given a TransformSystem instance that carries
information about the size of logical and physical pixels relative to the
visual [link to TransformSystem documentation]. Essentially, we have 4
coordinate systems:
Visual -> Document -> Framebuffer -> Render
The user specifies the position and size of the rectangle in Visual
coordinates, and in [tutorial 1] we used the vertex shader to convert directly
from Visual coordinates to render coordinates. In this tutorial we will
convert first to document coordinates, then make the adjustment for the border
width, then convert the remainder of the way to render coordinates.
Let's say, for example that the user specifies the box width to be 20, and the
border width to be 5. To draw the border correctly, we cannot simply
add/subtract 5 from the inner rectangle coordinates; if the user zooms
in by a factor of 2 then the border would become 10 px wide.
Another way to say this is that a vector with length=1 in Visual coordinates
does not _necessarily_ have a length of 1 pixel on the canvas. Instead, we must
make use of the Document coordinate system, in which a vector of length=1
does correspond to 1 pixel.
There are a few ways we could make this measurement of pixel length. Here's
how we'll do it in this tutorial:
1. Begin with vertices for a rectangle with border width 0 (that is, vertex
1 is the same as vertex 2, 3=4, and so on).
2. In the vertex shader, first map the vertices to the document coordinate
system using the visual->document transform.
3. Add/subtract the line width from the mapped vertices.
4. Map the rest of the way to render coordinates with a second transform:
document->framebuffer->render.
Note that this problem _cannot_ be solved using a simple scale factor! It is
necessary to use these transformations in order to draw correctly when there
is rotation or anosotropic scaling involved.
"""
from vispy import app, gloo, visuals, scene
import numpy as np
vertex_shader = """
void main() {
// First map the vertex to document coordinates
vec4 doc_pos = $visual_to_doc(vec4($position, 0, 1));
// Also need to map the adjustment direction vector, but this is tricky!
// We need to adjust separately for each component of the vector:
vec4 adjusted;
if ( $adjust_dir.x == 0. ) {
// If this is an outer vertex, no adjustment for line weight is needed.
// (In fact, trying to make the adjustment would result in no
// triangles being drawn, hence the if/else block)
adjusted = doc_pos;
}
else {
// Inner vertexes must be adjusted for line width, but this is
// surprisingly tricky given that the rectangle may have been scaled
// and rotated!
vec4 doc_x = $visual_to_doc(vec4($adjust_dir.x, 0, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
vec4 doc_y = $visual_to_doc(vec4(0, $adjust_dir.y, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
doc_x = normalize(doc_x);
doc_y = normalize(doc_y);
// Now doc_x + doc_y points in the direction we need in order to
// correct the line weight of _both_ segments, but the magnitude of
// that correction is wrong. To correct it we first need to
// measure the width that would result from using doc_x + doc_y:
vec4 proj_y_x = dot(doc_x, doc_y) * doc_x; // project y onto x
float cur_width = length(doc_y - proj_y_x); // measure current weight
// And now we can adjust vertex position for line width:
adjusted = doc_pos + ($line_width / cur_width) * (doc_x + doc_y);
}
// Finally map the remainder of the way to render coordinates
gl_Position = $doc_to_render(adjusted);
}
"""
fragment_shader = """
void main() {
gl_FragColor = $color;
}
"""
class MyRectVisual(visuals.Visual):
"""Visual that draws a rectangular outline.
Parameters
----------
x : float
x coordinate of rectangle origin
y : float
y coordinate of rectangle origin
w : float
width of rectangle
h : float
height of rectangle
weight : float
width of border (in px)
"""
def __init__(self, x, y, w, h, weight=4.0):
visuals.Visual.__init__(self, vertex_shader, fragment_shader)
# 10 vertices for 8 triangles (using triangle_strip) forming a
# rectangular outline
self.vert_buffer = gloo.VertexBuffer(np.array([
[x, y],
[x, y],
[x+w, y],
[x+w, y],
[x+w, y+h],
[x+w, y+h],
[x, y+h],
[x, y+h],
[x, y],
[x, y],
], dtype=np.float32))
# Direction each vertex should move to correct for line width
# (the length of this vector will be corrected in the shader)
self.adj_buffer = gloo.VertexBuffer(np.array([
[0, 0],
[1, 1],
[0, 0],
[-1, 1],
[0, 0],
[-1, -1],
[0, 0],
[1, -1],
[0, 0],
[1, 1],
], dtype=np.float32))
self.shared_program.vert['position'] = self.vert_buffer
self.shared_program.vert['adjust_dir'] = self.adj_buffer
self.shared_program.vert['line_width'] = weight
self.shared_program.frag['color'] = (1, 0, 0, 1)
self.set_gl_state(cull_face=False)
self._draw_mode = 'triangle_strip'
def _prepare_transforms(self, view):
# Set the two transforms required by the vertex shader:
tr = view.transforms
view_vert = view.view_program.vert
view_vert['visual_to_doc'] = tr.get_transform('visual', 'document')
view_vert['doc_to_render'] = tr.get_transform('document', 'render')
# As in the previous tutorial, we auto-generate a Visual+Node class for use
# in the scenegraph.
MyRect = scene.visuals.create_visual_node(MyRectVisual)
# Finally we will test the visual by displaying in a scene.
canvas = scene.SceneCanvas(keys='interactive', show=True)
# This time we add a ViewBox to let the user zoom/pan
view = canvas.central_widget.add_view()
view.camera = 'panzoom'
view.camera.rect = (0, 0, 800, 800)
# ..and add the rects to the view instead of canvas.scene
rects = [MyRect(100, 100, 200, 300, parent=view.scene),
MyRect(500, 100, 200, 300, parent=view.scene)]
# Again, rotate one rectangle to ensure the transforms are working as we
# expect.
tr = visuals.transforms.MatrixTransform()
tr.rotate(25, (0, 0, 1))
rects[1].transform = tr
# Add some text instructions
text = scene.visuals.Text("Drag right mouse button to zoom.", color='w',
anchor_x='left', parent=view, pos=(20, 30))
# ..and optionally start the event loop
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1:
app.run()
| [
"vispy.scene.visuals.Text",
"vispy.app.run",
"vispy.visuals.Visual.__init__",
"vispy.visuals.transforms.MatrixTransform",
"vispy.scene.visuals.create_visual_node",
"numpy.array",
"vispy.scene.SceneCanvas"
] | [((7262, 7308), 'vispy.scene.visuals.create_visual_node', 'scene.visuals.create_visual_node', (['MyRectVisual'], {}), '(MyRectVisual)\n', (7294, 7308), False, 'from vispy import app, gloo, visuals, scene\n'), ((7381, 7429), 'vispy.scene.SceneCanvas', 'scene.SceneCanvas', ([], {'keys': '"""interactive"""', 'show': '(True)'}), "(keys='interactive', show=True)\n", (7398, 7429), False, 'from vispy import app, gloo, visuals, scene\n'), ((7845, 7881), 'vispy.visuals.transforms.MatrixTransform', 'visuals.transforms.MatrixTransform', ([], {}), '()\n', (7879, 7881), False, 'from vispy import app, gloo, visuals, scene\n'), ((7968, 8082), 'vispy.scene.visuals.Text', 'scene.visuals.Text', (['"""Drag right mouse button to zoom."""'], {'color': '"""w"""', 'anchor_x': '"""left"""', 'parent': 'view', 'pos': '(20, 30)'}), "('Drag right mouse button to zoom.', color='w', anchor_x=\n 'left', parent=view, pos=(20, 30))\n", (7986, 8082), False, 'from vispy import app, gloo, visuals, scene\n'), ((5592, 5653), 'vispy.visuals.Visual.__init__', 'visuals.Visual.__init__', (['self', 'vertex_shader', 'fragment_shader'], {}), '(self, vertex_shader, fragment_shader)\n', (5615, 5653), False, 'from vispy import app, gloo, visuals, scene\n'), ((8230, 8239), 'vispy.app.run', 'app.run', ([], {}), '()\n', (8237, 8239), False, 'from vispy import app, gloo, visuals, scene\n'), ((5802, 5946), 'numpy.array', 'np.array', (['[[x, y], [x, y], [x + w, y], [x + w, y], [x + w, y + h], [x + w, y + h], [x,\n y + h], [x, y + h], [x, y], [x, y]]'], {'dtype': 'np.float32'}), '([[x, y], [x, y], [x + w, y], [x + w, y], [x + w, y + h], [x + w, y +\n h], [x, y + h], [x, y + h], [x, y], [x, y]], dtype=np.float32)\n', (5810, 5946), True, 'import numpy as np\n'), ((6250, 6367), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [0, 0], [-1, 1], [0, 0], [-1, -1], [0, 0], [1, -1], [0, 0],\n [1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0], [1, 1], [0, 0], [-1, 1], [0, 0], [-1, -1], [0, 0], [1, -1\n ], [0, 0], [1, 1]], dtype=np.float32)\n', (6258, 6367), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
import datajoint as dj
import matplotlib.pyplot as plt
from scipy import signal
from pipeline import experiment, tracking, ephys
# ======== Define some useful variables ==============
_side_cam = {'tracking_device': 'Camera 0'}
_tracked_nose_features = [n for n in tracking.Tracking.NoseTracking.heading.names if n not in tracking.Tracking.heading.names]
_tracked_tongue_features = [n for n in tracking.Tracking.TongueTracking.heading.names if n not in tracking.Tracking.heading.names]
_tracked_jaw_features = [n for n in tracking.Tracking.JawTracking.heading.names if n not in tracking.Tracking.heading.names]
def plot_correct_proportion(session_key, window_size=None, axs=None, plot=True):
"""
For a particular session (specified by session_key), extract all behavior trials
Get outcome of each trials, map to (0, 1) - 1 if 'hit'
Compute the moving average of these outcomes, based on the specified window_size (number of trial to average)
window_size is set to 10% of the total trial number if not specified
Return the figure handle and the performance data array
"""
trial_outcomes = (experiment.BehaviorTrial & session_key).fetch('outcome')
trial_outcomes = (trial_outcomes == 'hit').astype(int)
window_size = int(.1 * len(trial_outcomes)) if not window_size else int(window_size)
kernel = np.full((window_size, ), 1/window_size)
mv_outcomes = signal.convolve(trial_outcomes, kernel, mode='same')
fig = None
if plot:
if not axs:
fig, axs = plt.subplots(1, 1)
axs.bar(range(len(mv_outcomes)), trial_outcomes * mv_outcomes.max(), alpha=0.3)
axs.plot(range(len(mv_outcomes)), mv_outcomes, 'k', linewidth=3)
axs.set_xlabel('Trial')
axs.set_ylabel('Proportion correct')
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_title('Behavior Performance')
return fig, mv_outcomes
def plot_photostim_effect(session_key, photostim_key, axs=None, title='', plot=True):
"""
For all trials in this "session_key", split to 4 groups:
+ control left-lick
+ control right-lick
+ photostim left-lick (specified by "photostim_key")
+ photostim right-lick (specified by "photostim_key")
Plot correct proportion for each group
Note: ignore "early lick" trials
Return: fig, (cp_ctrl_left, cp_ctrl_right), (cp_stim_left, cp_stim_right)
"""
ctrl_trials = experiment.BehaviorTrial - experiment.PhotostimTrial & session_key
stim_trials = experiment.BehaviorTrial * experiment.PhotostimTrial & session_key
ctrl_left = ctrl_trials & 'trial_instruction="left"' & 'early_lick="no early"'
ctrl_right = ctrl_trials & 'trial_instruction="right"' & 'early_lick="no early"'
stim_left = stim_trials & 'trial_instruction="left"' & 'early_lick="no early"'
stim_right = stim_trials & 'trial_instruction="right"' & 'early_lick="no early"'
# Restrict by stim location (from photostim_key)
stim_left = stim_left * experiment.PhotostimEvent & photostim_key
stim_right = stim_right * experiment.PhotostimEvent & photostim_key
def get_correct_proportion(trials):
correct = (trials.fetch('outcome') == 'hit').astype(int)
return correct.sum()/len(correct)
# Extract and compute correct proportion
cp_ctrl_left = get_correct_proportion(ctrl_left)
cp_ctrl_right = get_correct_proportion(ctrl_right)
cp_stim_left = get_correct_proportion(stim_left)
cp_stim_right = get_correct_proportion(stim_right)
fig = None
if plot:
if not axs:
fig, axs = plt.subplots(1, 1)
axs.plot([0, 1], [cp_ctrl_left, cp_stim_left], 'b', label='lick left trials')
axs.plot([0, 1], [cp_ctrl_right, cp_stim_right], 'r', label='lick right trials')
# plot cosmetic
ylim = (min([cp_ctrl_left, cp_stim_left, cp_ctrl_right, cp_stim_right]) - 0.1, 1)
ylim = (0, 1) if ylim[0] < 0 else ylim
axs.set_xlim((0, 1))
axs.set_ylim(ylim)
axs.set_xticks([0, 1])
axs.set_xticklabels(['Control', 'Photostim'])
axs.set_ylabel('Proportion correct')
axs.legend(loc='lower left')
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_title(title)
return fig, (cp_ctrl_left, cp_ctrl_right), (cp_stim_left, cp_stim_right)
def plot_tracking(session_key, unit_key,
tracking_feature='jaw_y', camera_key=_side_cam,
trial_offset=0, trial_limit=10, xlim=(-0.5, 1.5), axs=None):
"""
Plot jaw movement per trial, time-locked to cue-onset, with spike times overlay
:param session_key: session where the trials are from
:param unit_key: unit for spike times overlay
:param tracking_feature: which tracking feature to plot (default to `jaw_y`)
:param camera_key: tracking from which camera to plot (default to Camera 0, i.e. the side camera)
:param trial_offset: index of trial to plot from (if a decimal between 0 and 1, indicates the proportion of total trial to plot from)
:param trial_limit: number of trial to plot
"""
if tracking_feature not in _tracked_nose_features + _tracked_tongue_features + _tracked_jaw_features:
print(f'Unknown tracking type: {tracking_feature}\nAvailable tracking types are: {_tracked_nose_features + _tracked_tongue_features + _tracked_jaw_features}')
return
trk = (tracking.Tracking.JawTracking * tracking.Tracking.TongueTracking * tracking.Tracking.NoseTracking
* experiment.BehaviorTrial & camera_key & session_key & experiment.ActionEvent & ephys.Unit.TrialSpikes)
tracking_fs = float((tracking.TrackingDevice & tracking.Tracking & camera_key & session_key).fetch1('sampling_rate'))
l_trial_trk = trk & 'trial_instruction="left"' & 'early_lick="no early"' & 'outcome="hit"'
r_trial_trk = trk & 'trial_instruction="right"' & 'early_lick="no early"' & 'outcome="hit"'
def get_trial_track(trial_tracks):
if trial_offset < 1 and isinstance(trial_offset, float):
offset = int(len(trial_tracks) * trial_offset)
else:
offset = trial_offset
for tr in trial_tracks.fetch(as_dict=True, offset=offset, limit=trial_limit, order_by='trial'):
trk_feat = tr[tracking_feature]
tongue_out_bool = tr['tongue_likelihood'] > 0.9
sample_counts = len(trk_feat)
tvec = np.arange(sample_counts) / tracking_fs
first_lick_time = (experiment.ActionEvent & tr
& {'action_event_type': f'{tr["trial_instruction"]} lick'}).fetch(
'action_event_time', order_by='action_event_time', limit=1)[0]
go_time = (experiment.TrialEvent & tr & 'trial_event_type="go"').fetch1('trial_event_time')
spike_times = (ephys.Unit.TrialSpikes & tr & unit_key).fetch1('spike_times')
spike_times = spike_times + float(go_time) - float(first_lick_time) # realigned to first-lick
tvec = tvec - float(first_lick_time)
yield trk_feat, tongue_out_bool, spike_times, tvec
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(16, 8))
assert len(axs) == 2
h_spacing = 150
for trial_tracks, ax, ax_name, spk_color in zip((l_trial_trk, r_trial_trk), axs,
('left lick trials', 'right lick trials'), ('b', 'r')):
for tr_id, (trk_feat, tongue_out_bool, spike_times, tvec) in enumerate(get_trial_track(trial_tracks)):
ax.plot(tvec, trk_feat + tr_id * h_spacing, '.k', markersize=1)
ax.plot(tvec[tongue_out_bool], trk_feat[tongue_out_bool] + tr_id * h_spacing, '.', color='lime', markersize=2)
ax.plot(spike_times, np.full_like(spike_times, trk_feat[tongue_out_bool].mean() + h_spacing/10) + tr_id * h_spacing,
'|', color=spk_color, markersize=4)
ax.set_title(ax_name)
ax.axvline(x=0, linestyle='--', color='k')
# cosmetic
ax.set_xlim(xlim)
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
return fig
def plot_unit_jaw_phase_dist(session_key, unit_key, bin_counts=20, axs=None):
trk = (tracking.Tracking.JawTracking * tracking.Tracking.TongueTracking
* experiment.BehaviorTrial & _side_cam & session_key & experiment.ActionEvent & ephys.Unit.TrialSpikes)
tracking_fs = float((tracking.TrackingDevice & tracking.Tracking & _side_cam & session_key).fetch1('sampling_rate'))
l_trial_trk = trk & 'trial_instruction="left"' & 'early_lick="no early"' & 'outcome="hit"'
r_trial_trk = trk & 'trial_instruction="right"' & 'early_lick="no early"' & 'outcome="hit"'
def get_trial_track(trial_tracks):
jaws, spike_times, go_times = (ephys.Unit.TrialSpikes * trial_tracks * experiment.TrialEvent
& unit_key & 'trial_event_type="go"').fetch(
'jaw_y', 'spike_times', 'trial_event_time')
spike_times = spike_times + go_times.astype(float)
flattened_jaws = np.hstack(jaws)
jsize = np.cumsum([0] + [j.size for j in jaws])
_, phase = compute_insta_phase_amp(flattened_jaws, tracking_fs, freq_band=(5, 15))
stacked_insta_phase = [phase[start: end] for start, end in zip(jsize[:-1], jsize[1:])]
for spks, jphase in zip(spike_times, stacked_insta_phase):
j_tvec = np.arange(len(jphase)) / tracking_fs
# find the tracking timestamps corresponding to the spiketimes; and get the corresponding phase
nearest_indices = np.searchsorted(j_tvec, spks, side="left")
nearest_indices = np.where(nearest_indices == len(j_tvec), len(j_tvec) - 1, nearest_indices)
yield jphase[nearest_indices]
l_insta_phase = np.hstack(list(get_trial_track(l_trial_trk)))
r_insta_phase = np.hstack(list(get_trial_track(r_trial_trk)))
fig = None
if axs is None:
fig, axs = plt.subplots(1, 2, figsize=(12, 8), subplot_kw=dict(polar=True))
fig.subplots_adjust(wspace=0.6)
assert len(axs) == 2
plot_polar_histogram(l_insta_phase, axs[0], bin_counts=bin_counts)
axs[0].set_title('left lick trials', loc='left', fontweight='bold')
plot_polar_histogram(r_insta_phase, axs[1], bin_counts=bin_counts)
axs[1].set_title('right lick trials', loc='left', fontweight='bold')
return fig
def plot_trial_tracking(trial_key, tracking_feature='jaw_y', camera_key=_side_cam):
"""
Plot trial-specific Jaw Movement time-locked to "go" cue
"""
if tracking_feature not in _tracked_nose_features + _tracked_tongue_features + _tracked_jaw_features:
print(f'Unknown tracking type: {tracking_feature}\nAvailable tracking types are: {_tracked_nose_features + _tracked_tongue_features + _tracked_jaw_features}')
return
trk = (tracking.Tracking.JawTracking * tracking.Tracking.NoseTracking * tracking.Tracking.TongueTracking
* experiment.BehaviorTrial & camera_key & trial_key & experiment.TrialEvent)
if len(trk) == 0:
return 'The selected trial has no Action Event (e.g. cue start)'
tracking_fs = float((tracking.TrackingDevice & tracking.Tracking & trial_key).fetch1('sampling_rate'))
trk_feat = trk.fetch1(tracking_feature)
go_time = (experiment.TrialEvent & trk & 'trial_event_type="go"').fetch1('trial_event_time')
tvec = np.arange(len(trk_feat)) / tracking_fs - float(go_time)
b, a = signal.butter(5, (5, 15), btype='band', fs=tracking_fs)
filt_trk_feat = signal.filtfilt(b, a, trk_feat)
analytic_signal = signal.hilbert(filt_trk_feat)
insta_amp = np.abs(analytic_signal)
insta_phase = np.angle(analytic_signal)
fig, axs = plt.subplots(2, 2, figsize=(16, 6))
fig.subplots_adjust(hspace=0.4)
axs[0, 0].plot(tvec, trk_feat, '.k')
axs[0, 0].set_title(trk_feat)
axs[1, 0].plot(tvec, filt_trk_feat, '.k')
axs[1, 0].set_title('Bandpass filtered 5-15Hz')
axs[1, 0].set_xlabel('Time(s)')
axs[0, 1].plot(tvec, insta_amp, '.k')
axs[0, 1].set_title('Amplitude')
axs[1, 1].plot(tvec, insta_phase, '.k')
axs[1, 1].set_title('Phase')
axs[1, 1].set_xlabel('Time(s)')
# cosmetic
for ax in axs.flatten():
ax.set_xlim((-3, 3))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
return fig
def plot_windowed_jaw_phase_dist(session_key, xlim=(-0.12, 0.3), w_size=0.01, bin_counts=20):
trks = (tracking.Tracking.JawTracking * experiment.BehaviorTrial & _side_cam
& session_key & experiment.TrialEvent)
tracking_fs = float((tracking.TrackingDevice & tracking.Tracking & _side_cam & session_key).fetch1('sampling_rate'))
tr_ids, jaws, trial_instructs, go_times = (trks * experiment.TrialEvent & 'trial_event_type="go"').fetch(
'trial', 'jaw_y', 'trial_instruction', 'trial_event_time')
flattened_jaws = np.hstack(jaws)
jsize = np.cumsum([0] + [j.size for j in jaws])
_, phase = compute_insta_phase_amp(flattened_jaws, tracking_fs, freq_band = (5, 15))
stacked_insta_phase = [phase[start: end] for start, end in zip(jsize[:-1], jsize[1:])]
# realign and segment - return trials x times
insta_phase = np.vstack(get_trial_track(session_key, tr_ids, stacked_insta_phase,
trial_instructs, go_times, tracking_fs, xlim))
tvec = np.linspace(xlim[0], xlim[1], insta_phase.shape[1])
windows = np.arange(xlim[0], xlim[1], w_size)
# plot
col_counts = 7
row_counts = int(np.ceil(len(windows) / col_counts))
fig, axs = plt.subplots(row_counts, col_counts,
figsize=(16, 2.5*row_counts),
subplot_kw=dict(polar=True))
fig.subplots_adjust(wspace=0.8, hspace=0.5)
[a.axis('off') for a in axs.flatten()]
# non-overlapping windowed histogram
for w_start, ax in zip(windows, axs.flatten()):
phase = insta_phase[:, np.logical_and(tvec >= w_start, tvec <= w_start + w_size)].flatten()
plot_polar_histogram(phase, ax, bin_counts=bin_counts)
ax.set_xlabel(f'{w_start*1000:.0f} to {(w_start + w_size)*1000:.0f}ms', fontweight='bold')
ax.axis('on')
return fig
def plot_jaw_phase_dist(session_key, xlim=(-0.12, 0.3), bin_counts=20):
trks = (tracking.Tracking.JawTracking * experiment.BehaviorTrial & _side_cam & session_key & experiment.TrialEvent)
tracking_fs = float((tracking.TrackingDevice & tracking.Tracking & _side_cam & session_key).fetch1('sampling_rate'))
l_trial_trk = trks & 'trial_instruction="left"' & 'early_lick="no early"'
r_trial_trk = trks & 'trial_instruction="right"' & 'early_lick="no early"'
insta_phases = []
for trial_trks in (l_trial_trk, r_trial_trk):
tr_ids, jaws, trial_instructs, go_times = (trial_trks * experiment.TrialEvent & 'trial_event_type="go"').fetch(
'trial', 'jaw_y', 'trial_instruction', 'trial_event_time')
flattened_jaws = np.hstack(jaws)
jsize = np.cumsum([0] + [j.size for j in jaws])
_, phase = compute_insta_phase_amp(flattened_jaws, tracking_fs, freq_band = (5, 15))
stacked_insta_phase = [phase[start: end] for start, end in zip(jsize[:-1], jsize[1:])]
# realign and segment - return trials x times
insta_phases.append(np.vstack(get_trial_track(session_key, tr_ids, stacked_insta_phase,
trial_instructs, go_times, tracking_fs, xlim)))
l_insta_phase, r_insta_phase = insta_phases
fig, axs = plt.subplots(1, 2, figsize=(12, 8), subplot_kw=dict(polar=True))
fig.subplots_adjust(wspace=0.6)
plot_polar_histogram(l_insta_phase.flatten(), axs[0], bin_counts=bin_counts)
axs[0].set_title('left lick trials', loc='left', fontweight='bold')
plot_polar_histogram(r_insta_phase.flatten(), axs[1], bin_counts=bin_counts)
axs[1].set_title('right lick trials', loc='left', fontweight='bold')
return fig
def plot_polar_histogram(data, ax=None, bin_counts=30):
"""
:param data: phase in rad
:param ax: axes to plot
:param bin_counts: bin number for histograph
:return:
"""
if ax is None:
fig, ax = plt.subplots(1, 1, subplot_kw=dict(polar=True))
bottom = 2
# theta = np.linspace(0.0, 2 * np.pi, bin_counts, endpoint=False)
radii, tick = np.histogram(data, bins=bin_counts)
# width of each bin on the plot
width = (2 * np.pi) / bin_counts
# make a polar plot
ax.bar(tick[1:], radii, width=width, bottom=bottom)
# set the label starting from East
ax.set_theta_zero_location("E")
# clockwise
ax.set_theta_direction(1)
def get_trial_track(session_key, tr_ids, data, trial_instructs, go_times, fs, xlim):
"""
Realign and segment the "data" - returning trial x time
Realign to first left lick if 'trial_instructs' is 'left'
or first rigth lick if 'trial_instructs' is 'right'
or 'go cue' if no lick found
Segment based on 'xlim'
"""
d_length = int(np.floor((xlim[1] - xlim[0]) * fs) - 1)
for tr_id, jaw, trial_instruct, go_time in zip(tr_ids, data, trial_instructs, go_times):
first_lick_time = (experiment.ActionEvent & session_key
& {'trial': tr_id} & {'action_event_type': f'{trial_instruct} lick'}).fetch(
'action_event_time', order_by='action_event_time', limit=1)
align_time = first_lick_time[0] if first_lick_time.size > 0 else go_time
t = np.arange(len(jaw)) / fs - float(align_time)
segmented_jaw = jaw[np.logical_and(t >= xlim[0], t <= xlim[1])]
if len(segmented_jaw) >= d_length:
yield segmented_jaw[:d_length]
def compute_insta_phase_amp(data, fs, freq_band=(5, 15)):
"""
:param data: trial x time
:param fs: sampling rate
:param freq_band: frequency band for bandpass
"""
if data.ndim > 1:
trial_count, time_count = data.shape
# flatten
data = data.reshape(-1)
# band pass
b, a = signal.butter(5, freq_band, btype='band', fs=fs)
data = signal.filtfilt(b, a, data)
# hilbert
analytic_signal = signal.hilbert(data)
insta_amp = np.abs(analytic_signal)
insta_phase = np.angle(analytic_signal)
if data.ndim > 1:
return insta_amp.reshape((trial_count, time_count)), insta_phase.reshape((trial_count, time_count))
else:
return insta_amp, insta_phase
def get_event_locked_tracking_insta_phase(trials, event, tracking_feature):
"""
Get instantaneous phase of the jaw movement, at the time of the specified "event", for each of the specified "trials"
:param trials: query of the SessionTrial - note: the subsequent fetch() will be order_by='trial'
:param event: "event" can be
+ a list of time equal length to the trials - specifying the time of each trial to extract the insta-phase
+ a single string, representing the event-name to extract the time for each trial
In such case, the "event" can be the events in
+ experiment.TrialEvent
+ experiment.ActionEvent
In the case that multiple of such "event_name" are found in a trial, the 1st one will be selected
(e.g. multiple "lick left", then the 1st "lick left" is selected)
:param tracking_feature: any attribute name under the tracking.Tracking table - e.g. 'jaw_y', 'tongue_x', etc.
:return: list of instantaneous phase, in the same order of specified "trials"
"""
trials = trials.proj()
tracking_fs = (tracking.TrackingDevice & tracking.Tracking & trials).fetch('sampling_rate')
if len(set(tracking_fs)) > 1:
raise Exception('Multiple tracking Fs found!')
else:
tracking_fs = float(tracking_fs[0])
# ---- process the "tracking_feature" input ----
if tracking_feature not in _tracked_nose_features + _tracked_tongue_features + _tracked_jaw_features:
print(f'Unknown tracking type: {tracking_feature}\nAvailable tracking types are: {_tracked_nose_features + _tracked_tongue_features + _tracked_jaw_features}')
return
for trk_types, trk_tbl in zip((_tracked_nose_features, _tracked_tongue_features, _tracked_jaw_features),
(tracking.Tracking.NoseTracking, tracking.Tracking.TongueTracking, tracking.Tracking.JawTracking)):
if tracking_feature in trk_types:
d_tbl = trk_tbl
# ---- process the "event" input ----
if isinstance(event, (list, np.ndarray)):
assert len(event) == len(trials)
tr_ids, trk_data = trials.aggr(d_tbl, trk_data=tracking_feature, keep_all_rows=True).fetch(
'trial', 'trk_data', order_by='trial')
eve_idx = np.array(event).astype(float) * tracking_fs
elif isinstance(event, str):
trial_event_types = experiment.TrialEventType.fetch('trial_event_type')
action_event_types = experiment.ActionEventType.fetch('action_event_type')
if event in trial_event_types:
event_tbl = experiment.TrialEvent
eve_type_attr = 'trial_event_type'
eve_time_attr = 'trial_event_time'
elif event in action_event_types:
event_tbl = experiment.ActionEvent
eve_type_attr = 'action_event_type'
eve_time_attr = 'trial_event_time'
else:
print(f'Unknown event: {event}\nAvailable events are: {list(trial_event_types) + list(action_event_types)}')
return
tr_ids, trk_data, eve_times = trials.aggr(d_tbl, trk_data=tracking_feature, keep_all_rows=True).aggr(
event_tbl & {eve_type_attr: event}, 'trk_data', event_time=f'min({eve_time_attr})', keep_all_rows=True).fetch(
'trial', 'trk_data', 'event_time', order_by='trial')
eve_idx = eve_times.astype(float) * tracking_fs
else:
print('Unknown "event" argument!')
return
# ---- the computation part ----
# for trials with no jaw data (None), set to np.nan array
no_trk_trid = [idx for idx, jaw in enumerate(trk_data) if jaw is None]
with_trk_trid = np.array(list(set(range(len(trk_data))) ^ set(no_trk_trid))).astype(int)
if len(with_trk_trid) == 0:
print(f'The specified trials do not have any {tracking_feature}')
return
trk_data = [d for d in trk_data if d is not None]
flattened_jaws = np.hstack(trk_data)
jsize = np.cumsum([0] + [j.size for j in trk_data])
_, phase = compute_insta_phase_amp(flattened_jaws, tracking_fs, freq_band=(5, 15))
stacked_insta_phase = [phase[start: end] for start, end in zip(jsize[:-1], jsize[1:])]
trial_eve_insta_phase = [stacked_insta_phase[np.where(with_trk_trid == tr_id)[0][0]][int(e_idx)]
if not np.isnan(e_idx) and tr_id in with_trk_trid else np.nan
for tr_id, e_idx in enumerate(eve_idx)]
return trial_eve_insta_phase
| [
"numpy.abs",
"numpy.angle",
"numpy.floor",
"numpy.isnan",
"pipeline.experiment.TrialEventType.fetch",
"numpy.histogram",
"numpy.arange",
"numpy.full",
"numpy.cumsum",
"numpy.linspace",
"pipeline.experiment.ActionEventType.fetch",
"matplotlib.pyplot.subplots",
"scipy.signal.butter",
"numpy.... | [((1387, 1427), 'numpy.full', 'np.full', (['(window_size,)', '(1 / window_size)'], {}), '((window_size,), 1 / window_size)\n', (1394, 1427), True, 'import numpy as np\n'), ((1446, 1498), 'scipy.signal.convolve', 'signal.convolve', (['trial_outcomes', 'kernel'], {'mode': '"""same"""'}), "(trial_outcomes, kernel, mode='same')\n", (1461, 1498), False, 'from scipy import signal\n'), ((11739, 11794), 'scipy.signal.butter', 'signal.butter', (['(5)', '(5, 15)'], {'btype': '"""band"""', 'fs': 'tracking_fs'}), "(5, (5, 15), btype='band', fs=tracking_fs)\n", (11752, 11794), False, 'from scipy import signal\n'), ((11815, 11846), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'trk_feat'], {}), '(b, a, trk_feat)\n', (11830, 11846), False, 'from scipy import signal\n'), ((11870, 11899), 'scipy.signal.hilbert', 'signal.hilbert', (['filt_trk_feat'], {}), '(filt_trk_feat)\n', (11884, 11899), False, 'from scipy import signal\n'), ((11916, 11939), 'numpy.abs', 'np.abs', (['analytic_signal'], {}), '(analytic_signal)\n', (11922, 11939), True, 'import numpy as np\n'), ((11958, 11983), 'numpy.angle', 'np.angle', (['analytic_signal'], {}), '(analytic_signal)\n', (11966, 11983), True, 'import numpy as np\n'), ((12000, 12035), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(16, 6)'}), '(2, 2, figsize=(16, 6))\n', (12012, 12035), True, 'import matplotlib.pyplot as plt\n'), ((13203, 13218), 'numpy.hstack', 'np.hstack', (['jaws'], {}), '(jaws)\n', (13212, 13218), True, 'import numpy as np\n'), ((13231, 13270), 'numpy.cumsum', 'np.cumsum', (['([0] + [j.size for j in jaws])'], {}), '([0] + [j.size for j in jaws])\n', (13240, 13270), True, 'import numpy as np\n'), ((13691, 13742), 'numpy.linspace', 'np.linspace', (['xlim[0]', 'xlim[1]', 'insta_phase.shape[1]'], {}), '(xlim[0], xlim[1], insta_phase.shape[1])\n', (13702, 13742), True, 'import numpy as np\n'), ((13757, 13792), 'numpy.arange', 'np.arange', (['xlim[0]', 'xlim[1]', 'w_size'], {}), '(xlim[0], xlim[1], w_size)\n', (13766, 13792), True, 'import numpy as np\n'), ((16682, 16717), 'numpy.histogram', 'np.histogram', (['data'], {'bins': 'bin_counts'}), '(data, bins=bin_counts)\n', (16694, 16717), True, 'import numpy as np\n'), ((18378, 18426), 'scipy.signal.butter', 'signal.butter', (['(5)', 'freq_band'], {'btype': '"""band"""', 'fs': 'fs'}), "(5, freq_band, btype='band', fs=fs)\n", (18391, 18426), False, 'from scipy import signal\n'), ((18438, 18465), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'data'], {}), '(b, a, data)\n', (18453, 18465), False, 'from scipy import signal\n'), ((18502, 18522), 'scipy.signal.hilbert', 'signal.hilbert', (['data'], {}), '(data)\n', (18516, 18522), False, 'from scipy import signal\n'), ((18539, 18562), 'numpy.abs', 'np.abs', (['analytic_signal'], {}), '(analytic_signal)\n', (18545, 18562), True, 'import numpy as np\n'), ((18581, 18606), 'numpy.angle', 'np.angle', (['analytic_signal'], {}), '(analytic_signal)\n', (18589, 18606), True, 'import numpy as np\n'), ((22721, 22740), 'numpy.hstack', 'np.hstack', (['trk_data'], {}), '(trk_data)\n', (22730, 22740), True, 'import numpy as np\n'), ((22753, 22796), 'numpy.cumsum', 'np.cumsum', (['([0] + [j.size for j in trk_data])'], {}), '([0] + [j.size for j in trk_data])\n', (22762, 22796), True, 'import numpy as np\n'), ((7275, 7310), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 8)'}), '(1, 2, figsize=(16, 8))\n', (7287, 7310), True, 'import matplotlib.pyplot as plt\n'), ((9331, 9346), 'numpy.hstack', 'np.hstack', (['jaws'], {}), '(jaws)\n', (9340, 9346), True, 'import numpy as np\n'), ((9363, 9402), 'numpy.cumsum', 'np.cumsum', (['([0] + [j.size for j in jaws])'], {}), '([0] + [j.size for j in jaws])\n', (9372, 9402), True, 'import numpy as np\n'), ((15296, 15311), 'numpy.hstack', 'np.hstack', (['jaws'], {}), '(jaws)\n', (15305, 15311), True, 'import numpy as np\n'), ((15328, 15367), 'numpy.cumsum', 'np.cumsum', (['([0] + [j.size for j in jaws])'], {}), '([0] + [j.size for j in jaws])\n', (15337, 15367), True, 'import numpy as np\n'), ((1571, 1589), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1583, 1589), True, 'import matplotlib.pyplot as plt\n'), ((3671, 3689), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (3683, 3689), True, 'import matplotlib.pyplot as plt\n'), ((9854, 9896), 'numpy.searchsorted', 'np.searchsorted', (['j_tvec', 'spks'], {'side': '"""left"""'}), "(j_tvec, spks, side='left')\n", (9869, 9896), True, 'import numpy as np\n'), ((17370, 17404), 'numpy.floor', 'np.floor', (['((xlim[1] - xlim[0]) * fs)'], {}), '((xlim[1] - xlim[0]) * fs)\n', (17378, 17404), True, 'import numpy as np\n'), ((17917, 17959), 'numpy.logical_and', 'np.logical_and', (['(t >= xlim[0])', '(t <= xlim[1])'], {}), '(t >= xlim[0], t <= xlim[1])\n', (17931, 17959), True, 'import numpy as np\n'), ((21175, 21226), 'pipeline.experiment.TrialEventType.fetch', 'experiment.TrialEventType.fetch', (['"""trial_event_type"""'], {}), "('trial_event_type')\n", (21206, 21226), False, 'from pipeline import experiment, tracking, ephys\n'), ((21256, 21309), 'pipeline.experiment.ActionEventType.fetch', 'experiment.ActionEventType.fetch', (['"""action_event_type"""'], {}), "('action_event_type')\n", (21288, 21309), False, 'from pipeline import experiment, tracking, ephys\n'), ((6529, 6553), 'numpy.arange', 'np.arange', (['sample_counts'], {}), '(sample_counts)\n', (6538, 6553), True, 'import numpy as np\n'), ((21069, 21084), 'numpy.array', 'np.array', (['event'], {}), '(event)\n', (21077, 21084), True, 'import numpy as np\n'), ((23113, 23128), 'numpy.isnan', 'np.isnan', (['e_idx'], {}), '(e_idx)\n', (23121, 23128), True, 'import numpy as np\n'), ((14264, 14321), 'numpy.logical_and', 'np.logical_and', (['(tvec >= w_start)', '(tvec <= w_start + w_size)'], {}), '(tvec >= w_start, tvec <= w_start + w_size)\n', (14278, 14321), True, 'import numpy as np\n'), ((23025, 23057), 'numpy.where', 'np.where', (['(with_trk_trid == tr_id)'], {}), '(with_trk_trid == tr_id)\n', (23033, 23057), True, 'import numpy as np\n')] |
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score ,mean_squared_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import AdaBoostRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
from sklearn.linear_model import Lasso,Ridge,BayesianRidge,ElasticNet,LinearRegression
import pandas as pd
import numpy as np
import sys
sys.path.insert(1, 'streamlit-app')
import logging
from gpa import load_data
#import joblib
import json
import flask
from flask import Flask,request
app=Flask(__name__)
FILE_NAME='D:\ANKIT\GraduateAdmissions\catboost_model.sav' #Use your own path
df=load_data()
model=None
logging.basicConfig(filename="app.log",level=logging.DEBUG,format="%(asctime)s %(levelname)s %(name)s %(threadname)s : %(message)s")
#print("Data loading.........","\nEmpty model object instantiated....")
logging.info("** ---------------LOGS----------------**")
logging.info("** ---------------****----------------**")
logging.info("** Data loading.........")
logging.info("** Empty model object instantiated....")
def preprocess():
"""
Splits and drops categorical and predictor features from dataframe
"""
X = df.drop(['Chance of Admit ','Serial No.'], axis=1).values
y = df['Chance of Admit '].values
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.20, shuffle=False)
return X_train,X_test, y_train, y_test
def find_min_mse():
"""
Applies mutiple models and prints list of models with least MSE which is our objective (i.e minimize MSE loss)
"""
X_train,X_test, y_train, y_test=preprocess()
models = [['DecisionTree :',DecisionTreeRegressor()],
['Linear Regression :', LinearRegression()],
['RandomForest :',RandomForestRegressor(n_estimators=150)],
['KNeighbours :', KNeighborsRegressor(n_neighbors = 2)],
['SVM :', SVR(kernel='linear')],
['AdaBoostClassifier :', AdaBoostRegressor(n_estimators=100)],
['Xgboost: ', XGBRegressor()],
['CatBoost: ', CatBoostRegressor(logging_level='Silent')],
['Lasso: ', Lasso()],
['Ridge: ', Ridge()],
['BayesianRidge: ', BayesianRidge()],
['ElasticNet: ', ElasticNet()],
]
print("Mean Square Error...")
res=[]
d=[]
for name,model in models:
model = model
model.fit(X_train, y_train)
predictions = model.predict(X_test)
res.append( np.sqrt(mean_squared_error(y_test, predictions)))
print(name, (np.sqrt(mean_squared_error(y_test, predictions))))
d.append([np.sqrt(mean_squared_error(y_test, predictions)),name])
#print(sorted(res))
for i in d[:]:
if i[0]==sorted(res)[0]:
print(i[1])
def save():
"""
Method that receives split data and saves the Model as .cbm file
"""
X_train,_, y_train,__=preprocess()
model= CatBoostRegressor(logging_level='Silent')
model.fit(X_train,y_train)
model.save_model("D:\ANKIT\GraduateAdmissions\data\catboost",format="cbm")
#joblib.dump(model, FILE_NAME)
def load_model():
""" Function that intializes global model state to that of saved ".cbm "catboost model
"""
global model
model=CatBoostRegressor()
model.load_model("D:\ANKIT\GraduateAdmissions\data\catboost")
@app.route('/')
def home():
return {"message":"Welcome to API..."}
@app.route('/predict',methods=['POST'])
def predict():
if flask.request.method == "POST":
data = request.json
inputs=np.array(data['Input'])
chance=model.predict(data['Input'])[0]
#print("Chance is :{}%".format(chance))
app.logger.info(" **Chance :"+str(chance) )
return {"Chance":chance}
# New users will have to run methods below:
# save() --> In order to retrain
# load_model() --> or else use pre-trained model
# Some inputs for verifying correct model loading and prediction
#print(model.predict(np.array([[315.0 , 105.0 , 2.0 , 3.0 , 3.0 , 7.5 , 1.0]]))[0])
if __name__=="__main__":
logging.info("** Loading Catboost model and Flask starting server...")
logging.info("** Please wait until server has fully started")
#print("* Loading Catboost model and Flask starting server...","please wait until server has fully started")
load_model() #<---------Does'nt work when serving with waitress :D
app.run(debug=False)
| [
"gpa.load_data",
"sklearn.model_selection.train_test_split",
"xgboost.XGBRegressor",
"catboost.CatBoostRegressor",
"sklearn.tree.DecisionTreeRegressor",
"sklearn.linear_model.ElasticNet",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.Ridge",
"sklearn.linear_model.Lasso",
"sklearn.lin... | [((565, 600), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""streamlit-app"""'], {}), "(1, 'streamlit-app')\n", (580, 600), False, 'import sys\n'), ((731, 746), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (736, 746), False, 'from flask import Flask, request\n'), ((832, 843), 'gpa.load_data', 'load_data', ([], {}), '()\n', (841, 843), False, 'from gpa import load_data\n'), ((859, 998), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""app.log"""', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)s %(name)s %(threadname)s : %(message)s"""'}), "(filename='app.log', level=logging.DEBUG, format=\n '%(asctime)s %(levelname)s %(name)s %(threadname)s : %(message)s')\n", (878, 998), False, 'import logging\n'), ((1066, 1122), 'logging.info', 'logging.info', (['"""** ---------------LOGS----------------**"""'], {}), "('** ---------------LOGS----------------**')\n", (1078, 1122), False, 'import logging\n'), ((1124, 1180), 'logging.info', 'logging.info', (['"""** ---------------****----------------**"""'], {}), "('** ---------------****----------------**')\n", (1136, 1180), False, 'import logging\n'), ((1182, 1222), 'logging.info', 'logging.info', (['"""** Data loading........."""'], {}), "('** Data loading.........')\n", (1194, 1222), False, 'import logging\n'), ((1224, 1278), 'logging.info', 'logging.info', (['"""** Empty model object instantiated...."""'], {}), "('** Empty model object instantiated....')\n", (1236, 1278), False, 'import logging\n'), ((1542, 1594), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'shuffle': '(False)'}), '(X, y, test_size=0.2, shuffle=False)\n', (1558, 1594), False, 'from sklearn.model_selection import train_test_split\n'), ((3214, 3255), 'catboost.CatBoostRegressor', 'CatBoostRegressor', ([], {'logging_level': '"""Silent"""'}), "(logging_level='Silent')\n", (3231, 3255), False, 'from catboost import CatBoostRegressor\n'), ((3560, 3579), 'catboost.CatBoostRegressor', 'CatBoostRegressor', ([], {}), '()\n', (3577, 3579), False, 'from catboost import CatBoostRegressor\n'), ((4411, 4481), 'logging.info', 'logging.info', (['"""** Loading Catboost model and Flask starting server..."""'], {}), "('** Loading Catboost model and Flask starting server...')\n", (4423, 4481), False, 'import logging\n'), ((4486, 4547), 'logging.info', 'logging.info', (['"""** Please wait until server has fully started"""'], {}), "('** Please wait until server has fully started')\n", (4498, 4547), False, 'import logging\n'), ((3877, 3900), 'numpy.array', 'np.array', (["data['Input']"], {}), "(data['Input'])\n", (3885, 3900), True, 'import numpy as np\n'), ((1880, 1903), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (1901, 1903), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((1942, 1960), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1958, 1960), False, 'from sklearn.linear_model import Lasso, Ridge, BayesianRidge, ElasticNet, LinearRegression\n'), ((1993, 2032), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(150)'}), '(n_estimators=150)\n', (2014, 2032), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2065, 2099), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {'n_neighbors': '(2)'}), '(n_neighbors=2)\n', (2084, 2099), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((2126, 2146), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (2129, 2146), False, 'from sklearn.svm import SVR\n'), ((2186, 2221), 'sklearn.ensemble.AdaBoostRegressor', 'AdaBoostRegressor', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (2203, 2221), False, 'from sklearn.ensemble import AdaBoostRegressor\n'), ((2250, 2264), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {}), '()\n', (2262, 2264), False, 'from xgboost import XGBRegressor\n'), ((2294, 2335), 'catboost.CatBoostRegressor', 'CatBoostRegressor', ([], {'logging_level': '"""Silent"""'}), "(logging_level='Silent')\n", (2311, 2335), False, 'from catboost import CatBoostRegressor\n'), ((2362, 2369), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (2367, 2369), False, 'from sklearn.linear_model import Lasso, Ridge, BayesianRidge, ElasticNet, LinearRegression\n'), ((2396, 2403), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (2401, 2403), False, 'from sklearn.linear_model import Lasso, Ridge, BayesianRidge, ElasticNet, LinearRegression\n'), ((2438, 2453), 'sklearn.linear_model.BayesianRidge', 'BayesianRidge', ([], {}), '()\n', (2451, 2453), False, 'from sklearn.linear_model import Lasso, Ridge, BayesianRidge, ElasticNet, LinearRegression\n'), ((2485, 2497), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {}), '()\n', (2495, 2497), False, 'from sklearn.linear_model import Lasso, Ridge, BayesianRidge, ElasticNet, LinearRegression\n'), ((2744, 2783), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2762, 2783), False, 'from sklearn.metrics import accuracy_score, mean_squared_error\n'), ((2816, 2855), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2834, 2855), False, 'from sklearn.metrics import accuracy_score, mean_squared_error\n'), ((2886, 2925), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (2904, 2925), False, 'from sklearn.metrics import accuracy_score, mean_squared_error\n')] |
"""
This software is an implementation of
Deep MRI brain extraction: A 3D convolutional neural network for skull stripping
You can download the paper at http://dx.doi.org/10.1016/j.neuroimage.2016.01.024
If you use this software for your projects please cite:
Kleesiek and Urban et al, Deep MRI brain extraction: A 3D convolutional neural network for skull stripping,
NeuroImage, Volume 129, April 2016, Pages 460-469.
The MIT License (MIT)
Copyright (c) 2016 <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
import numpy
import theano
import theano.tensor as T
import TransferFunctions as transfer
if 1:
try:
try:
from conv3d2d_cudnn import conv3d
except:
from conv3d2d import conv3d
except:
from theano.tensor.nnet.conv3d2d import conv3d
from maxPool3D import my_max_pool_3d
def max_pool_along_channel_axis(sym_input, pool_factor):
""" for 3D conv."""
s = None
for i in xrange(pool_factor):
t = sym_input[:,:,i::pool_factor]
if s is None:
s = t
else:
s = T.maximum(s, t)
return s
# Ns, Ts, C, Hs, Ws = 1, 70, 1, 70, 70 -> 70^3
# Nf, Tf, C, Hf, Wf = 32, 5 , 1, 5 , 5 -> 32 filters of shape 5^3
# signals = numpy.arange(Ns*Ts*C*Hs*Ws).reshape(Ns, Ts, C, Hs, Ws).astype('float32')
# filters = numpy.arange(Nf*Tf*C*Hf*Wf).reshape(Nf, Tf, C, Hf, Wf).astype('float32')
#
# in 3D
# input: (1, 70, 3, 70, 70)
# filters: (32, 5 , 3, 5 , 5)
# --> output: (1, 66, 32, 66, 66)
import time
def offset_map(output_stride):
for x in np.log2(output_stride):
assert np.float.is_integer(x), 'Stride must be power of 2; is: '+str(output_stride)
if np.all(output_stride == 2):
return np.array([[0,0,0],[0,0,1],[0,1,0],[0,1,1],[1,0,0],[1,0,1],[1,1,0],[1,1,1]])
else:
prev = offset_map(output_stride/2)
current = []
for i in xrange(2):
for j in xrange(2):
for k in xrange(2):
for p in prev:
new = p.copy()
new[0] += i*2
new[1] += j*2
new[2] += k*2
current.append(new)
return np.array(current)
class ConvPoolLayer3D(object):
"""Pool Layer of a convolutional network
you could change this easily into using different pooling in any of the directions..."""
def __init__(self, input, filter_shape, input_shape, poolsize=2, bDropoutEnabled_=False,
bUpsizingLayer=False, ActivationFunction = 'abs',
use_fragment_pooling = False, dense_output_from_fragments = False, output_stride = None,
b_use_FFT_convolution=False, input_layer=None,
W=None, b=None, b_deconvolution=False, verbose = 1):
"""
Allocate a ConvPoolLayer with shared variable internal parameters.
bUpsizingLayer = True: => bordermode = full (zero padding) thus increasing the output image size (as opposed to shrinking it in 'valid' mode)
:type input: ftensor5
:param input: symbolic image tensor, of shape input_shape
:type filter_shape: tuple or list of length 5
:param filter_shape: (number of filters, filter X, num input feature maps, filter Y,filter Z)
:type input_shape: tuple or list of length 5
:param input_shape: (batch size, X, num input feature maps, Y, Z)
:type poolsize: integer (typically 1 or 2)
:param poolsize: the downsampling (max-pooling) factor
accessible via "this"/self pointer:
input -> conv_out -> ... -> output
"""
assert len(filter_shape)==5
if b_deconvolution:
raise NotImplementedError()
assert input_shape[2] == filter_shape[2]
self.input = input
prod_pool = np.prod(poolsize)
try:
if prod_pool==poolsize:
prod_pool = poolsize**3
poolsize = (poolsize,)*3
except:
pass
poolsize = np.asanyarray(poolsize)
self.pooling_factor=poolsize
self.number_of_filters = filter_shape[0]
self.filter_shape=filter_shape
self.input_shape = input_shape
self.input_layer = input_layer
self.output_stride = output_stride
if prod_pool>1 and use_fragment_pooling:
assert prod_pool==8,"currently only 2^3 pooling"
# n inputs to each hidden unit
fan_in = 1.0*numpy.prod(filter_shape[1:])
fan_out = 1.0*(numpy.prod(filter_shape[0:2]) * numpy.prod(filter_shape[3:]))/prod_pool
# initialize weights with random weights
W_bound = numpy.sqrt(3. / (fan_in + fan_out))#W_bound = 0.035#/(np.sqrt(fan_in/1400))##was 0.02 which was fine. #numpy.sqrt(0.04 / (fan_in + fan_out)) #6.0 / numpy.prod(filter_shape[1:]) #
if verbose:
print("ConvPoolLayer3D"+("(FFT_based)" if b_use_FFT_convolution else "")+":")
print(" input (image) ="+input_shape)
print(" filter ="+filter_shape + " @ std ="+W_bound)
print(" poolsize"+poolsize)
if W==None:
self.W = theano.shared(
numpy.asarray(numpy.random.normal(0, W_bound, filter_shape), dtype=theano.config.floatX)
, borrow=True, name='W_conv')
else:
self.W = W
if ActivationFunction in ['ReLU', 'relu']:
b_values = numpy.ones((filter_shape[0],), dtype=theano.config.floatX)#/filter_shape[1]/filter_shape[3]/filter_shape[4]
elif ActivationFunction in ['sigmoid', 'sig']:
b_values = 0.5*numpy.ones((filter_shape[0],), dtype=theano.config.floatX)
else:
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
if b==None:
self.b = theano.shared(value=b_values, borrow=True, name='b_conv')
else:
self.b = b
if b_use_FFT_convolution:
self.mode = theano.compile.get_default_mode().including('conv3d_fft', 'convgrad3d_fft', 'convtransp3d_fft')
filters_flip = self.W[:,::-1,:,::-1,::-1] # flip x, y, z
conv_res = T.nnet.conv3D(
V=input.dimshuffle(0,3,4,1,2), # (batch, row, column, time, in channel)
W=filters_flip.dimshuffle(0,3,4,1,2), # (out_channel, row, column, time, in channel)
b=self.b,
d=(1,1,1))
self.conv_out = conv_res.dimshuffle(0,3,4,1,2) # (batchsize, time, channels, height, width)
else:
self.mode = theano.compile.get_default_mode()
self.conv_out = conv3d(signals=input, filters=self.W, border_mode = 'full' if bUpsizingLayer else 'valid',
filters_shape=filter_shape, signals_shape = input_shape if input_shape[0]!=None else None
)
if np.any(poolsize>1):
#print " use_fragment_pooling =",use_fragment_pooling
if use_fragment_pooling:
assert np.all(poolsize==2), "Fragment Pooling (currently) allows only a Poolingfactor of 2! GIVEN: "+str(poolsize)
pooled_out = self.fragmentpool(self.conv_out)
else:
pooled_out = my_max_pool_3d(self.conv_out, pool_shape = (poolsize[0],poolsize[1],poolsize[2]))
else:
pooled_out = self.conv_out
if bDropoutEnabled_:
print(" dropout: on")
if b_use_FFT_convolution:
print(" !!! WARNING: b was already added, this might mess things up!\n"*2)
raise NotImplementedError("BAD: FFT & Dropout")
self.SGD_dropout_rate = theano.shared(np.asarray(np.float32(0.5), dtype=theano.config.floatX)) # lower = less dropped units
rng = T.shared_randomstreams.RandomStreams(int(time.time()))
self.dropout_gate = (np.float32(1.)/(np.float32(1.)-self.SGD_dropout_rate))* rng.binomial(pooled_out.shape,1,1.0-self.SGD_dropout_rate,dtype=theano.config.floatX)
pooled_out = pooled_out * self.dropout_gate
if b_use_FFT_convolution==0:
lin_output = pooled_out + self.b.dimshuffle('x', 'x', 0, 'x', 'x') #the value will be added EVERYWHERE!, don't choose a too big b!
else:
lin_output = pooled_out
# MFP Code #
if dense_output_from_fragments and (input_shape[0]>1 or (use_fragment_pooling and np.any(poolsize>1))):
output_shape = list( theano.function([input], lin_output.shape, mode = self.mode)(numpy.zeros((1 if input_shape[0]==None else input_shape[0],)+input_shape[1:],dtype=numpy.float32)))
if input_shape[0]==None:
output_shape[0] = input_shape[0]
output_shape=tuple(output_shape)
print(' dense_output_from_fragments:: (lin_output) reshaped into dense volume...') #class_probabilities, output too...
lin_output = self.combine_fragments_to_dense_bxcyz(lin_output, output_shape) #(batch, x, channels, y, z)
self.lin_output = lin_output
func, self.ActivationFunction, dic = transfer.parse_transfer_function(ActivationFunction)
pool_ratio = dic["cross_channel_pooling_groups"]
if pool_ratio is not None:
self.output = max_pool_along_channel_axis(lin_output, pool_ratio)
else:
self.output = func(lin_output)
output_shape = list( theano.function([input], self.output.shape, mode = self.mode)(numpy.zeros((1 if input_shape[0]==None else input_shape[0],)+input_shape[1:],dtype=numpy.float32)))
if input_shape[0]==None:
output_shape[0] = input_shape[0]
output_shape=tuple(output_shape)
if verbose:
print(" output ="+output_shape+ "Dropout",("enabled" if bDropoutEnabled_ else "disabled"))
print(" ActivationFunction ="+self.ActivationFunction)
self.output_shape = output_shape
#lin_output:
# bxcyz
#dimshuffle((2,0,1,3,4))
# cbxyz
#flatten(2).dimshuffle((1,0))
# bxyz,c
self.class_probabilities = T.nnet.softmax( lin_output.dimshuffle((2,0,1,3,4)).flatten(2).dimshuffle((1,0)) )#e.g. shape is (22**3, 5) for 5 classes ( i.e. have to set n.of filters = 5) and predicting 22 * 22 * 22 labels at once
#class_probabilities_realshape:
# (b*x*y*z,c) -> (b,x,y,z,c) -> (b,x,c,y,z) #last by: (0,1,4,2,3)
self.class_probabilities_realshape = self.class_probabilities.reshape((output_shape[0],output_shape[1],output_shape[3],output_shape[4], self.number_of_filters)).dimshuffle((0,1,4,2,3)) #lin_output.shape[:2]+lin_output.shape[3:5]+(output_shape[2],)
self.class_prediction = T.argmax(self.class_probabilities_realshape,axis=2)
# store parameters of this layer
self.params = [self.W, self.b]
return
def fragmentpool(self, conv_out):
p000 = my_max_pool_3d(conv_out[:,:-1,:,:-1,:-1], pool_shape=(2,2,2))
p001 = my_max_pool_3d(conv_out[:,:-1,:,:-1, 1:], pool_shape=(2,2,2))
p010 = my_max_pool_3d(conv_out[:,:-1,:, 1:,:-1], pool_shape=(2,2,2))
p011 = my_max_pool_3d(conv_out[:,:-1,:, 1:, 1:], pool_shape=(2,2,2))
p100 = my_max_pool_3d(conv_out[:, 1:,:,:-1,:-1], pool_shape=(2,2,2))
p101 = my_max_pool_3d(conv_out[:, 1:,:,:-1, 1:], pool_shape=(2,2,2))
p110 = my_max_pool_3d(conv_out[:, 1:,:, 1:,:-1], pool_shape=(2,2,2))
p111 = my_max_pool_3d(conv_out[:, 1:,:, 1:, 1:], pool_shape=(2,2,2))
result = T.concatenate((p000, p001, p010, p011, p100, p101, p110, p111), axis=0)
return result
def combine_fragments_to_dense_bxcyz(self, tensor, sh):
""" expected shape: (batch, x, channels, y, z)"""
ttensor = tensor # be same shape as result, no significant time cost
output_stride = self.output_stride
if isinstance(output_stride, list) or isinstance(output_stride, tuple):
example_stride = np.prod(output_stride)#**3
else:
example_stride = output_stride**3
output_stride = np.asarray((output_stride,)*3)
zero = np.array((0), dtype=theano.config.floatX)
embedding = T.alloc( zero, 1, sh[1]*output_stride[0], sh[2], sh[3]*output_stride[1], sh[4]*output_stride[2]) # first arg. is fill-value (0 in this case) and not an element of the shape
ix = offset_map(output_stride)
print(" output_stride"+output_stride)
print(" example_stride"+example_stride)
for i,(n,m,k) in enumerate(ix):
embedding = T.set_subtensor(embedding[:,n::output_stride[0],:,m::output_stride[1],k::output_stride[2]], ttensor[i::example_stride])
return embedding
def randomize_weights(self, scale_w = 3.0):
fan_in = 1.0*numpy.prod(self.filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width * filter depth" / pooling size**3
fan_out = 1.0*(numpy.prod(self.filter_shape[0:2]) * numpy.prod(self.filter_shape[3:])/np.mean(self.pooling_factor)**3)
# initialize weights with random weights
W_bound = numpy.sqrt(scale_w / (fan_in + fan_out))
self.W.set_value(numpy.asarray(numpy.random.normal(0, W_bound, self.filter_shape), dtype=theano.config.floatX))
self.b.set_value(numpy.asarray(numpy.zeros((self.filter_shape[0],), dtype=theano.config.floatX)))
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given 'target distribution'.
! ONLY WORKS IF y IS A VECTOR OF INTEGERS !
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
return -T.mean(T.log(self.class_probabilities)[T.arange(y.shape[0]),y]) #shape of class_probabilities is e.g. (14*14,2) for 2 classes and 14**2 labels
def negative_log_likelihood_true(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
! y must be a float32-matrix of shape ('batchsize', num_classes) !
"""
return -T.mean(T.sum(T.log(self.class_probabilities)*y,axis=1)) #shape of class_probabilities is e.g. (14*14,2) for 2 classes and 14**2 labels
def negative_log_likelihood_ignore_zero(self, y):
"""--Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
-- zeros in <y> code for "not labeled", these examples will be ignored!
side effect: class 0 in the NNet is basically useless.
--Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
return -T.mean((theano.tensor.neq(y,0))*T.log(self.class_probabilities)[T.arange(y.shape[0]),y]) #shape of class_probabilities is e.g. (14*14,2) for 2 classes and 14**2 labels
def negative_log_likelihood_modulated(self, y, modulation):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
<modulation> is an float32 vector, value=1 is default behaviour, 0==ignore
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
return -T.mean(modulation*T.log(self.class_probabilities)[T.arange(y.shape[0]),y])
def negative_log_likelihood_modulated_margin(self, y, modulation=1, margin=0.7, penalty_multiplier = 0):
print("negative_log_likelihood_modulated_margin:: Penalty down to "+100.*penalty_multiplier+"% if prediction is close to the target! Threshold is"+margin)
penalty_multiplier = np.float32(penalty_multiplier)
margin = np.float32(margin)
selected = self.class_probabilities[T.arange(y.shape[0]),y]
r = modulation*T.log(selected)
return -T.mean(r*(selected<margin) + (0 if penalty_multiplier==0 else penalty_multiplier*r*(selected>=margin)) )
def squared_distance(self, Target,b_flatten=False):
"""Target is the TARGET image (vectorized), -> shape(x) = (batchsize, imgsize**2)
output: scalar float32
"""
if b_flatten:
return T.mean( (self.output.flatten(2) - Target)**2 )
else:
return T.mean( (self.output - Target)**2 )
def squared_distance_w_margin(self, TARGET, margin=0.3):
""" output: scalar float32
"""
print("Conv3D::squared_distance_w_margin (binary predictions).")
margin = np.float32(margin)
out = self.output
NULLz = T.zeros_like(out)
sqsq_err = TARGET * T.maximum(NULLz, 1 - out - margin)**2 + (1-TARGET) * T.maximum(NULLz, out - margin)**2
return T.mean(sqsq_err)
def cross_entropy(self, Target):
"""Target is the TARGET image (vectorized), -> shape(x) = (batchsize, imgsize**2)
output: scalar float32
"""
#print np.shape( theano.function([self.input], self.colorchannel_probabilities)(np.random.random( self.input_shape ).astype(np.float32)) )
return -T.mean( T.log(self.class_probabilities)*Target + T.log(1-self.class_probabilities)*(1-Target) )# #.reshape(new_shape)[index[0]:index[2],index[1]:index[3]]
def cross_entropy_array(self, Target):
"""Target is the TARGET image (vectorized), -> shape(x) = (batchsize, imgsize**2)
the output is of length: <batchsize>, Use cross_entropy() to get a scalar output.
"""
return -T.mean( T.log(self.class_probabilities)*Target + T.log(1-self.class_probabilities)*(1-Target) ,axis=1)
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.class_prediction.ndim:
raise TypeError('y should have the same shape as self.class_prediction',
('y', y.type, 'class_prediction', self.class_prediction.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.class_prediction, y))
else:
print("something went wrong")
raise NotImplementedError()
| [
"theano.tensor.maximum",
"numpy.ones",
"numpy.mean",
"numpy.random.normal",
"numpy.prod",
"theano.compile.get_default_mode",
"theano.tensor.log",
"theano.tensor.concatenate",
"theano.tensor.set_subtensor",
"theano.tensor.zeros_like",
"theano.tensor.mean",
"theano.shared",
"numpy.float.is_int... | [((2613, 2635), 'numpy.log2', 'np.log2', (['output_stride'], {}), '(output_stride)\n', (2620, 2635), True, 'import numpy as np\n'), ((2736, 2762), 'numpy.all', 'np.all', (['(output_stride == 2)'], {}), '(output_stride == 2)\n', (2742, 2762), True, 'import numpy as np\n'), ((2652, 2674), 'numpy.float.is_integer', 'np.float.is_integer', (['x'], {}), '(x)\n', (2671, 2674), True, 'import numpy as np\n'), ((2780, 2882), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0\n ], [1, 1, 1]]'], {}), '([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1],\n [1, 1, 0], [1, 1, 1]])\n', (2788, 2882), True, 'import numpy as np\n'), ((3273, 3290), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (3281, 3290), True, 'import numpy as np\n'), ((4914, 4931), 'numpy.prod', 'np.prod', (['poolsize'], {}), '(poolsize)\n', (4921, 4931), True, 'import numpy as np\n'), ((5112, 5135), 'numpy.asanyarray', 'np.asanyarray', (['poolsize'], {}), '(poolsize)\n', (5125, 5135), True, 'import numpy as np\n'), ((5748, 5784), 'numpy.sqrt', 'numpy.sqrt', (['(3.0 / (fan_in + fan_out))'], {}), '(3.0 / (fan_in + fan_out))\n', (5758, 5784), False, 'import numpy\n'), ((7950, 7970), 'numpy.any', 'np.any', (['(poolsize > 1)'], {}), '(poolsize > 1)\n', (7956, 7970), True, 'import numpy as np\n'), ((10197, 10249), 'TransferFunctions.parse_transfer_function', 'transfer.parse_transfer_function', (['ActivationFunction'], {}), '(ActivationFunction)\n', (10229, 10249), True, 'import TransferFunctions as transfer\n'), ((11828, 11880), 'theano.tensor.argmax', 'T.argmax', (['self.class_probabilities_realshape'], {'axis': '(2)'}), '(self.class_probabilities_realshape, axis=2)\n', (11836, 11880), True, 'import theano.tensor as T\n'), ((12032, 12099), 'maxPool3D.my_max_pool_3d', 'my_max_pool_3d', (['conv_out[:, :-1, :, :-1, :-1]'], {'pool_shape': '(2, 2, 2)'}), '(conv_out[:, :-1, :, :-1, :-1], pool_shape=(2, 2, 2))\n', (12046, 12099), False, 'from maxPool3D import my_max_pool_3d\n'), ((12107, 12173), 'maxPool3D.my_max_pool_3d', 'my_max_pool_3d', (['conv_out[:, :-1, :, :-1, 1:]'], {'pool_shape': '(2, 2, 2)'}), '(conv_out[:, :-1, :, :-1, 1:], pool_shape=(2, 2, 2))\n', (12121, 12173), False, 'from maxPool3D import my_max_pool_3d\n'), ((12182, 12248), 'maxPool3D.my_max_pool_3d', 'my_max_pool_3d', (['conv_out[:, :-1, :, 1:, :-1]'], {'pool_shape': '(2, 2, 2)'}), '(conv_out[:, :-1, :, 1:, :-1], pool_shape=(2, 2, 2))\n', (12196, 12248), False, 'from maxPool3D import my_max_pool_3d\n'), ((12257, 12322), 'maxPool3D.my_max_pool_3d', 'my_max_pool_3d', (['conv_out[:, :-1, :, 1:, 1:]'], {'pool_shape': '(2, 2, 2)'}), '(conv_out[:, :-1, :, 1:, 1:], pool_shape=(2, 2, 2))\n', (12271, 12322), False, 'from maxPool3D import my_max_pool_3d\n'), ((12332, 12398), 'maxPool3D.my_max_pool_3d', 'my_max_pool_3d', (['conv_out[:, 1:, :, :-1, :-1]'], {'pool_shape': '(2, 2, 2)'}), '(conv_out[:, 1:, :, :-1, :-1], pool_shape=(2, 2, 2))\n', (12346, 12398), False, 'from maxPool3D import my_max_pool_3d\n'), ((12407, 12472), 'maxPool3D.my_max_pool_3d', 'my_max_pool_3d', (['conv_out[:, 1:, :, :-1, 1:]'], {'pool_shape': '(2, 2, 2)'}), '(conv_out[:, 1:, :, :-1, 1:], pool_shape=(2, 2, 2))\n', (12421, 12472), False, 'from maxPool3D import my_max_pool_3d\n'), ((12482, 12547), 'maxPool3D.my_max_pool_3d', 'my_max_pool_3d', (['conv_out[:, 1:, :, 1:, :-1]'], {'pool_shape': '(2, 2, 2)'}), '(conv_out[:, 1:, :, 1:, :-1], pool_shape=(2, 2, 2))\n', (12496, 12547), False, 'from maxPool3D import my_max_pool_3d\n'), ((12557, 12621), 'maxPool3D.my_max_pool_3d', 'my_max_pool_3d', (['conv_out[:, 1:, :, 1:, 1:]'], {'pool_shape': '(2, 2, 2)'}), '(conv_out[:, 1:, :, 1:, 1:], pool_shape=(2, 2, 2))\n', (12571, 12621), False, 'from maxPool3D import my_max_pool_3d\n'), ((12634, 12705), 'theano.tensor.concatenate', 'T.concatenate', (['(p000, p001, p010, p011, p100, p101, p110, p111)'], {'axis': '(0)'}), '((p000, p001, p010, p011, p100, p101, p110, p111), axis=0)\n', (12647, 12705), True, 'import theano.tensor as T\n'), ((13221, 13260), 'numpy.array', 'np.array', (['(0)'], {'dtype': 'theano.config.floatX'}), '(0, dtype=theano.config.floatX)\n', (13229, 13260), True, 'import numpy as np\n'), ((13281, 13386), 'theano.tensor.alloc', 'T.alloc', (['zero', '(1)', '(sh[1] * output_stride[0])', 'sh[2]', '(sh[3] * output_stride[1])', '(sh[4] * output_stride[2])'], {}), '(zero, 1, sh[1] * output_stride[0], sh[2], sh[3] * output_stride[1],\n sh[4] * output_stride[2])\n', (13288, 13386), True, 'import theano.tensor as T\n'), ((14260, 14300), 'numpy.sqrt', 'numpy.sqrt', (['(scale_w / (fan_in + fan_out))'], {}), '(scale_w / (fan_in + fan_out))\n', (14270, 14300), False, 'import numpy\n'), ((16965, 16995), 'numpy.float32', 'np.float32', (['penalty_multiplier'], {}), '(penalty_multiplier)\n', (16975, 16995), True, 'import numpy as np\n'), ((17013, 17031), 'numpy.float32', 'np.float32', (['margin'], {}), '(margin)\n', (17023, 17031), True, 'import numpy as np\n'), ((17815, 17833), 'numpy.float32', 'np.float32', (['margin'], {}), '(margin)\n', (17825, 17833), True, 'import numpy as np\n'), ((17877, 17894), 'theano.tensor.zeros_like', 'T.zeros_like', (['out'], {}), '(out)\n', (17889, 17894), True, 'import theano.tensor as T\n'), ((18025, 18041), 'theano.tensor.mean', 'T.mean', (['sqsq_err'], {}), '(sqsq_err)\n', (18031, 18041), True, 'import theano.tensor as T\n'), ((2104, 2119), 'theano.tensor.maximum', 'T.maximum', (['s', 't'], {}), '(s, t)\n', (2113, 2119), True, 'import theano.tensor as T\n'), ((5556, 5584), 'numpy.prod', 'numpy.prod', (['filter_shape[1:]'], {}), '(filter_shape[1:])\n', (5566, 5584), False, 'import numpy\n'), ((6529, 6587), 'numpy.ones', 'numpy.ones', (['(filter_shape[0],)'], {'dtype': 'theano.config.floatX'}), '((filter_shape[0],), dtype=theano.config.floatX)\n', (6539, 6587), False, 'import numpy\n'), ((6921, 6978), 'theano.shared', 'theano.shared', ([], {'value': 'b_values', 'borrow': '(True)', 'name': '"""b_conv"""'}), "(value=b_values, borrow=True, name='b_conv')\n", (6934, 6978), False, 'import theano\n'), ((7661, 7694), 'theano.compile.get_default_mode', 'theano.compile.get_default_mode', ([], {}), '()\n', (7692, 7694), False, 'import theano\n'), ((7723, 7911), 'conv3d2d.conv3d', 'conv3d', ([], {'signals': 'input', 'filters': 'self.W', 'border_mode': "('full' if bUpsizingLayer else 'valid')", 'filters_shape': 'filter_shape', 'signals_shape': '(input_shape if input_shape[0] != None else None)'}), "(signals=input, filters=self.W, border_mode='full' if bUpsizingLayer else\n 'valid', filters_shape=filter_shape, signals_shape=input_shape if \n input_shape[0] != None else None)\n", (7729, 7911), False, 'from conv3d2d import conv3d\n'), ((13067, 13089), 'numpy.prod', 'np.prod', (['output_stride'], {}), '(output_stride)\n', (13074, 13089), True, 'import numpy as np\n'), ((13177, 13209), 'numpy.asarray', 'np.asarray', (['((output_stride,) * 3)'], {}), '((output_stride,) * 3)\n', (13187, 13209), True, 'import numpy as np\n'), ((13651, 13779), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['embedding[:, n::output_stride[0], :, m::output_stride[1], k::output_stride[2]]', 'ttensor[i::example_stride]'], {}), '(embedding[:, n::output_stride[0], :, m::output_stride[1], k\n ::output_stride[2]], ttensor[i::example_stride])\n', (13666, 13779), True, 'import theano.tensor as T\n'), ((13867, 13900), 'numpy.prod', 'numpy.prod', (['self.filter_shape[1:]'], {}), '(self.filter_shape[1:])\n', (13877, 13900), False, 'import numpy\n'), ((17123, 17138), 'theano.tensor.log', 'T.log', (['selected'], {}), '(selected)\n', (17128, 17138), True, 'import theano.tensor as T\n'), ((17155, 17275), 'theano.tensor.mean', 'T.mean', (['(r * (selected < margin) + (0 if penalty_multiplier == 0 else \n penalty_multiplier * r * (selected >= margin)))'], {}), '(r * (selected < margin) + (0 if penalty_multiplier == 0 else \n penalty_multiplier * r * (selected >= margin)))\n', (17161, 17275), True, 'import theano.tensor as T\n'), ((17578, 17613), 'theano.tensor.mean', 'T.mean', (['((self.output - Target) ** 2)'], {}), '((self.output - Target) ** 2)\n', (17584, 17613), True, 'import theano.tensor as T\n'), ((6818, 6877), 'numpy.zeros', 'numpy.zeros', (['(filter_shape[0],)'], {'dtype': 'theano.config.floatX'}), '((filter_shape[0],), dtype=theano.config.floatX)\n', (6829, 6877), False, 'import numpy\n'), ((8099, 8120), 'numpy.all', 'np.all', (['(poolsize == 2)'], {}), '(poolsize == 2)\n', (8105, 8120), True, 'import numpy as np\n'), ((8318, 8403), 'maxPool3D.my_max_pool_3d', 'my_max_pool_3d', (['self.conv_out'], {'pool_shape': '(poolsize[0], poolsize[1], poolsize[2])'}), '(self.conv_out, pool_shape=(poolsize[0], poolsize[1],\n poolsize[2]))\n', (8332, 8403), False, 'from maxPool3D import my_max_pool_3d\n'), ((10516, 10575), 'theano.function', 'theano.function', (['[input]', 'self.output.shape'], {'mode': 'self.mode'}), '([input], self.output.shape, mode=self.mode)\n', (10531, 10575), False, 'import theano\n'), ((10578, 10684), 'numpy.zeros', 'numpy.zeros', (['((1 if input_shape[0] == None else input_shape[0],) + input_shape[1:])'], {'dtype': 'numpy.float32'}), '((1 if input_shape[0] == None else input_shape[0],) +\n input_shape[1:], dtype=numpy.float32)\n', (10589, 10684), False, 'import numpy\n'), ((14341, 14391), 'numpy.random.normal', 'numpy.random.normal', (['(0)', 'W_bound', 'self.filter_shape'], {}), '(0, W_bound, self.filter_shape)\n', (14360, 14391), False, 'import numpy\n'), ((14461, 14525), 'numpy.zeros', 'numpy.zeros', (['(self.filter_shape[0],)'], {'dtype': 'theano.config.floatX'}), '((self.filter_shape[0],), dtype=theano.config.floatX)\n', (14472, 14525), False, 'import numpy\n'), ((17076, 17096), 'theano.tensor.arange', 'T.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (17084, 17096), True, 'import theano.tensor as T\n'), ((19775, 19806), 'theano.tensor.neq', 'T.neq', (['self.class_prediction', 'y'], {}), '(self.class_prediction, y)\n', (19780, 19806), True, 'import theano.tensor as T\n'), ((5609, 5638), 'numpy.prod', 'numpy.prod', (['filter_shape[0:2]'], {}), '(filter_shape[0:2])\n', (5619, 5638), False, 'import numpy\n'), ((5641, 5669), 'numpy.prod', 'numpy.prod', (['filter_shape[3:]'], {}), '(filter_shape[3:])\n', (5651, 5669), False, 'import numpy\n'), ((6297, 6342), 'numpy.random.normal', 'numpy.random.normal', (['(0)', 'W_bound', 'filter_shape'], {}), '(0, W_bound, filter_shape)\n', (6316, 6342), False, 'import numpy\n'), ((6721, 6779), 'numpy.ones', 'numpy.ones', (['(filter_shape[0],)'], {'dtype': 'theano.config.floatX'}), '((filter_shape[0],), dtype=theano.config.floatX)\n', (6731, 6779), False, 'import numpy\n'), ((7080, 7113), 'theano.compile.get_default_mode', 'theano.compile.get_default_mode', ([], {}), '()\n', (7111, 7113), False, 'import theano\n'), ((8779, 8794), 'numpy.float32', 'np.float32', (['(0.5)'], {}), '(0.5)\n', (8789, 8794), True, 'import numpy as np\n'), ((8913, 8924), 'time.time', 'time.time', ([], {}), '()\n', (8922, 8924), False, 'import time\n'), ((8961, 8976), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (8971, 8976), True, 'import numpy as np\n'), ((9506, 9526), 'numpy.any', 'np.any', (['(poolsize > 1)'], {}), '(poolsize > 1)\n', (9512, 9526), True, 'import numpy as np\n'), ((9562, 9620), 'theano.function', 'theano.function', (['[input]', 'lin_output.shape'], {'mode': 'self.mode'}), '([input], lin_output.shape, mode=self.mode)\n', (9577, 9620), False, 'import theano\n'), ((9623, 9729), 'numpy.zeros', 'numpy.zeros', (['((1 if input_shape[0] == None else input_shape[0],) + input_shape[1:])'], {'dtype': 'numpy.float32'}), '((1 if input_shape[0] == None else input_shape[0],) +\n input_shape[1:], dtype=numpy.float32)\n', (9634, 9729), False, 'import numpy\n'), ((14089, 14123), 'numpy.prod', 'numpy.prod', (['self.filter_shape[0:2]'], {}), '(self.filter_shape[0:2])\n', (14099, 14123), False, 'import numpy\n'), ((14126, 14159), 'numpy.prod', 'numpy.prod', (['self.filter_shape[3:]'], {}), '(self.filter_shape[3:])\n', (14136, 14159), False, 'import numpy\n'), ((14160, 14188), 'numpy.mean', 'np.mean', (['self.pooling_factor'], {}), '(self.pooling_factor)\n', (14167, 14188), True, 'import numpy as np\n'), ((14922, 14953), 'theano.tensor.log', 'T.log', (['self.class_probabilities'], {}), '(self.class_probabilities)\n', (14927, 14953), True, 'import theano.tensor as T\n'), ((15987, 16010), 'theano.tensor.neq', 'theano.tensor.neq', (['y', '(0)'], {}), '(y, 0)\n', (16004, 16010), False, 'import theano\n'), ((17923, 17957), 'theano.tensor.maximum', 'T.maximum', (['NULLz', '(1 - out - margin)'], {}), '(NULLz, 1 - out - margin)\n', (17932, 17957), True, 'import theano.tensor as T\n'), ((17976, 18006), 'theano.tensor.maximum', 'T.maximum', (['NULLz', '(out - margin)'], {}), '(NULLz, out - margin)\n', (17985, 18006), True, 'import theano.tensor as T\n'), ((8977, 8992), 'numpy.float32', 'np.float32', (['(1.0)'], {}), '(1.0)\n', (8987, 8992), True, 'import numpy as np\n'), ((14954, 14974), 'theano.tensor.arange', 'T.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (14962, 14974), True, 'import theano.tensor as T\n'), ((15359, 15390), 'theano.tensor.log', 'T.log', (['self.class_probabilities'], {}), '(self.class_probabilities)\n', (15364, 15390), True, 'import theano.tensor as T\n'), ((16011, 16042), 'theano.tensor.log', 'T.log', (['self.class_probabilities'], {}), '(self.class_probabilities)\n', (16016, 16042), True, 'import theano.tensor as T\n'), ((16603, 16634), 'theano.tensor.log', 'T.log', (['self.class_probabilities'], {}), '(self.class_probabilities)\n', (16608, 16634), True, 'import theano.tensor as T\n'), ((18391, 18422), 'theano.tensor.log', 'T.log', (['self.class_probabilities'], {}), '(self.class_probabilities)\n', (18396, 18422), True, 'import theano.tensor as T\n'), ((18432, 18467), 'theano.tensor.log', 'T.log', (['(1 - self.class_probabilities)'], {}), '(1 - self.class_probabilities)\n', (18437, 18467), True, 'import theano.tensor as T\n'), ((18803, 18834), 'theano.tensor.log', 'T.log', (['self.class_probabilities'], {}), '(self.class_probabilities)\n', (18808, 18834), True, 'import theano.tensor as T\n'), ((18844, 18879), 'theano.tensor.log', 'T.log', (['(1 - self.class_probabilities)'], {}), '(1 - self.class_probabilities)\n', (18849, 18879), True, 'import theano.tensor as T\n'), ((16043, 16063), 'theano.tensor.arange', 'T.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (16051, 16063), True, 'import theano.tensor as T\n'), ((16635, 16655), 'theano.tensor.arange', 'T.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (16643, 16655), True, 'import theano.tensor as T\n')] |
import numpy as np
from numpy_fracdiff import fracdiff
from statsmodels.tsa.stattools import adfuller
import scipy.optimize
def loss_fn(d: float, x: np.array, n_trunc: int) -> float:
# compute fractal diff for given order and truncation
z = fracdiff(x, order=d, truncation=n_trunc)
# compute ADF test
stat, pval, _, _, crit, _ = adfuller(
z[n_trunc:], regression='c', autolag='BIC')
# pick the fractal order with its ADF-statistics ==< critical value
return (stat - crit['1%'])**2 + pval**2
def fracadf1(X: np.array, n_trunc: int = 100,
lb: float = 0.01, ub: float = 1.5,
xtol: float = 1e-4, n_maxiter: int = 200) -> float:
"""constant truncation order"""
# ensure that the truncation order does not exceed n_obs-30
n_trunc_ = min(X.shape[0] - 30, n_trunc)
# one time series
if len(X.shape) == 1:
return scipy.optimize.fminbound(
loss_fn, lb, ub, args=(X, n_trunc_),
xtol=xtol, maxfun=n_maxiter)
# many time series, one for each column
n_features = X.shape[1]
d = np.empty((n_features,))
for j in range(n_features):
d[j] = scipy.optimize.fminbound(
loss_fn, lb, ub, args=(X[:, j], n_trunc_),
xtol=xtol, maxfun=n_maxiter)
return d
| [
"numpy.empty",
"statsmodels.tsa.stattools.adfuller",
"numpy_fracdiff.fracdiff"
] | [((251, 291), 'numpy_fracdiff.fracdiff', 'fracdiff', (['x'], {'order': 'd', 'truncation': 'n_trunc'}), '(x, order=d, truncation=n_trunc)\n', (259, 291), False, 'from numpy_fracdiff import fracdiff\n'), ((347, 399), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (['z[n_trunc:]'], {'regression': '"""c"""', 'autolag': '"""BIC"""'}), "(z[n_trunc:], regression='c', autolag='BIC')\n", (355, 399), False, 'from statsmodels.tsa.stattools import adfuller\n'), ((1092, 1115), 'numpy.empty', 'np.empty', (['(n_features,)'], {}), '((n_features,))\n', (1100, 1115), True, 'import numpy as np\n')] |
import random
import numpy as np
from lib.tools.grid import Grid
from lib.objects.tracktile import TrackTile
# this part could obviously be streamlined for smaller code footprint
# not sure how to go about it without sacrificing code readability
# since this is called once every game, this wouldn't be the main bottleneck
# for the performance
def create_track(width, height):
# when dealing with arrays x and y indices are swapped
arr = create_track_arr(height, width)
grids = arr_to_grids(arr)
tiles = grids_to_tiles(grids)
for tile in tiles:
tile.set_track_properties()
return tiles
# creates two half sized mazes and glues them together
def create_track_arr(height, width):
half_height = height // 2
arrs = []
# creating half mazes
for _ in range(2):
while True:
arr = create_maze(half_height, width)
if check_maze(arr):
arrs.append(arr)
break
# glueing
arrs[0][0, 1] = 1
arrs[1][0, 1] = 1
arrs[0][0, width - 2] = 1
arrs[1][0, width - 2] = 1
arrs[0] = arrs[0][::-1, :]
arr = np.vstack(arrs)
return arr
def arr_to_grids(arr):
result = []
# find the most upper left coordinate with value not equal to zero
# and set it as a temporary starting point
for i, _ in enumerate(arr):
for j, _ in enumerate(arr):
if arr[i][j] == 1:
result.append(Grid(j, i))
break
if result:
break
start = result[0]
while True:
current = result[-1]
# get four adjacent grids to check
adjacents = current.adjacents()
# if any of the adjacent grid is the same as the starting grid
# and the length of the result is sufficient, we've completed the loop
if any([adjacent == start for adjacent in adjacents]) \
and len(result) > 2:
break
for adjacent in adjacents:
if arr[adjacent.y][adjacent.x] == 1 and adjacent not in result:
result.append(adjacent)
break
return result
def grids_to_tiles(grids):
result = []
for grid in grids:
if result:
previous_tile = result[-1]
current_tile = TrackTile(grid)
previous_tile.next = current_tile
current_tile.prev = previous_tile
result.append(current_tile)
else:
result.append(TrackTile(grid))
# connect the end points to complete the loop
result[-1].next = result[0]
result[0].prev = result[-1]
return result
# modified version of depth first search algorithm
# snice walls also take up space on the grid,
# this is not quite the same as the original
# and there are some problems :V
# too lazy to come up with a proper one
def create_maze(height, width):
arr = np.zeros((height, width), dtype='int')
start = (1, 1)
end = (1, width - 2)
# taking care of edge cases
path = [start]
current = path[-1]
arr[current] = 1
while True:
neighbors = possible_neighbors(arr, current)
# if end is reached, terminate
if end in neighbors:
current = end
path.append(current)
arr[current] = 1
break
# if there is a viable step forward, take a step
if neighbors:
neighbor = random.choice(neighbors)
current = neighbor
path.append(current)
arr[current] = 1
# otherwise, start backtracking
else:
arr[current] = -1
path.pop()
if not path:
break
current = path[-1]
return arr
def check_maze(arr):
# due to special cases, start and end points might not connect
# since the failure to connect rate is low, just fix this with another attempt
# also for a track, impose that it meets another certain condition
# although the following if can be reduced to the latter half
# it is left like this for readability
return arr[1][1] != -1 and any(arr[-2, :] == 1)
def is_edge(arr, idx):
if (idx[0] == 0 or idx[0] == arr.shape[0] - 1) or (idx[1] == 0 or idx[1] == arr.shape[1] - 1):
return True
else:
return False
def is_visited(arr, idx):
return arr[idx] == 1
def is_backtracked(arr, idx):
return arr[idx] == -1
def no_space(arr, idx):
neighbors = [
(idx[0] - 1, idx[1]),
(idx[0] + 1, idx[1]),
(idx[0], idx[1] - 1),
(idx[0], idx[1] + 1),
]
count = 0
for neighbor in neighbors:
if arr[neighbor] == 1 or arr[neighbor] == -1:
count += 1
return count > 1
def possible_neighbors(arr, idx):
neighbors = [
(idx[0] - 1, idx[1]),
(idx[0] + 1, idx[1]),
(idx[0], idx[1] - 1),
(idx[0], idx[1] + 1),
]
for neighbor in neighbors[:]:
if is_edge(arr, neighbor):
neighbors.remove(neighbor)
elif is_visited(arr, neighbor):
neighbors.remove(neighbor)
elif is_backtracked(arr, neighbor):
neighbors.remove(neighbor)
elif no_space(arr, neighbor):
neighbors.remove(neighbor)
return neighbors
| [
"numpy.zeros",
"random.choice",
"lib.tools.grid.Grid",
"lib.objects.tracktile.TrackTile",
"numpy.vstack"
] | [((1125, 1140), 'numpy.vstack', 'np.vstack', (['arrs'], {}), '(arrs)\n', (1134, 1140), True, 'import numpy as np\n'), ((2876, 2914), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': '"""int"""'}), "((height, width), dtype='int')\n", (2884, 2914), True, 'import numpy as np\n'), ((2279, 2294), 'lib.objects.tracktile.TrackTile', 'TrackTile', (['grid'], {}), '(grid)\n', (2288, 2294), False, 'from lib.objects.tracktile import TrackTile\n'), ((3399, 3423), 'random.choice', 'random.choice', (['neighbors'], {}), '(neighbors)\n', (3412, 3423), False, 'import random\n'), ((2467, 2482), 'lib.objects.tracktile.TrackTile', 'TrackTile', (['grid'], {}), '(grid)\n', (2476, 2482), False, 'from lib.objects.tracktile import TrackTile\n'), ((1444, 1454), 'lib.tools.grid.Grid', 'Grid', (['j', 'i'], {}), '(j, i)\n', (1448, 1454), False, 'from lib.tools.grid import Grid\n')] |
"""Problem solutions for approximation lecture."""
from itertools import product
import numpy as np
import pandas as pd
from approximation_algorithms import get_interpolator
from approximation_auxiliary import compute_interpolation_error_df
from approximation_auxiliary import get_uniform_nodes
from approximation_problems import problem_kinked
from approximation_problems import problem_reciprocal_exponential
from approximation_problems import problem_runge
def test_exercise_1():
"""Run test exercise 1."""
index = product([10, 20, 30, 40, 50], np.linspace(-1, 1, 1000))
index = pd.MultiIndex.from_tuples(index, names=("Degree", "Point"))
df = pd.DataFrame(columns=["Value", "Approximation"], index=index)
df["Value"] = problem_runge(df.index.get_level_values("Point"))
for degree in [10, 20, 30, 40, 50]:
xnodes = get_uniform_nodes(degree, -1, 1)
poly = np.polynomial.Polynomial.fit(xnodes, problem_runge(xnodes), degree)
xvalues = df.index.get_level_values("Point").unique()
yvalues = poly(xvalues)
df.loc[(degree, slice(None)), "Approximation"] = yvalues
df["Error"] = df["Value"] - df["Approximation"]
df.groupby("Degree").apply(compute_interpolation_error_df).plot()
def test_exercise_2():
"""Run test exercise 2."""
index = product(
["runge", "reciprocal_exponential", "kinked"],
["linear", "cubic", "chebychev"],
[10, 20, 30],
get_uniform_nodes(1000, -1, 1),
)
index = pd.MultiIndex.from_tuples(index, names=("Function", "Method", "Degree", "Point"))
df = pd.DataFrame(columns=["Value", "Approximation"], index=index)
test_functions = {}
test_functions["runge"] = problem_runge
test_functions["reciprocal_exponential"] = problem_reciprocal_exponential
test_functions["kinked"] = problem_kinked
points = df.index.get_level_values("Point").unique()
for function in df.index.get_level_values("Function").unique():
test_function = test_functions[function]
for method in df.index.get_level_values("Method").unique():
for degree in df.index.get_level_values("Degree").unique():
interp = get_interpolator(method, degree, test_function)
index = (function, method, degree, slice(None))
df.loc[index, "Approximation"] = interp(points)
df.loc[index, "Value"] = test_function(points)
df["Error"] = df["Value"] - df["Approximation"]
df.groupby(["Function", "Method", "Degree"]).apply(compute_interpolation_error_df)
| [
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"approximation_auxiliary.get_uniform_nodes",
"numpy.linspace",
"approximation_algorithms.get_interpolator",
"approximation_problems.problem_runge"
] | [((598, 657), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['index'], {'names': "('Degree', 'Point')"}), "(index, names=('Degree', 'Point'))\n", (623, 657), True, 'import pandas as pd\n'), ((667, 728), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Value', 'Approximation']", 'index': 'index'}), "(columns=['Value', 'Approximation'], index=index)\n", (679, 728), True, 'import pandas as pd\n'), ((1517, 1602), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['index'], {'names': "('Function', 'Method', 'Degree', 'Point')"}), "(index, names=('Function', 'Method', 'Degree',\n 'Point'))\n", (1542, 1602), True, 'import pandas as pd\n'), ((1608, 1669), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Value', 'Approximation']", 'index': 'index'}), "(columns=['Value', 'Approximation'], index=index)\n", (1620, 1669), True, 'import pandas as pd\n'), ((559, 583), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (570, 583), True, 'import numpy as np\n'), ((857, 889), 'approximation_auxiliary.get_uniform_nodes', 'get_uniform_nodes', (['degree', '(-1)', '(1)'], {}), '(degree, -1, 1)\n', (874, 889), False, 'from approximation_auxiliary import get_uniform_nodes\n'), ((1466, 1496), 'approximation_auxiliary.get_uniform_nodes', 'get_uniform_nodes', (['(1000)', '(-1)', '(1)'], {}), '(1000, -1, 1)\n', (1483, 1496), False, 'from approximation_auxiliary import get_uniform_nodes\n'), ((942, 963), 'approximation_problems.problem_runge', 'problem_runge', (['xnodes'], {}), '(xnodes)\n', (955, 963), False, 'from approximation_problems import problem_runge\n'), ((2204, 2251), 'approximation_algorithms.get_interpolator', 'get_interpolator', (['method', 'degree', 'test_function'], {}), '(method, degree, test_function)\n', (2220, 2251), False, 'from approximation_algorithms import get_interpolator\n')] |
# -*- coding: utf-8 -*-
# ---------------------------- IMPORTS ---------------------------- #
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# multiprocessing
from builtins import zip
from builtins import object
import itertools as it
from multiprocessing.pool import ThreadPool as Pool
# three-party
import cv2
import numpy as np
# custom
from .config import MANAGER, FLAG_DEBUG
#from cache import memoize
from .arrayops import SimKeyPoint, normsigmoid
from .root import NO_CPUs
# ---------------------------- GLOBALS ---------------------------- #
if FLAG_DEBUG:
print("configured to use {} CPUs".format(NO_CPUs))
# DO NOT USE _pool when module is imported and this runs within it. It
# creates a deadlock"
_pool = Pool(processes=NO_CPUs)
feature_name = 'sift-flann'
# ----------------------------SPECIALIZED FUNCTIONS---------------------------- #
class Feature(object):
"""
Class to manage detection and computation of features
:param pool: multiprocessing pool (dummy, it uses multithreading)
:param useASIFT: if True adds Affine perspectives to the detector.
:param debug: if True prints to the stdout debug messages.
"""
def __init__(self, pool=_pool, useASIFT=True, debug=True):
self.pool = pool
self.detector = None
self.matcher = None
self.useASIFT = useASIFT
self.debug = debug
def detectAndCompute(self, img, mask=None):
"""
detect keypoints and descriptors
:param img: image to find keypoints and its descriptors
:param mask: mask to detect keypoints (it uses default, mask[:] = 255)
:return: keypoints,descriptors
"""
# bulding parameters of tilt and rotation variations
if self.useASIFT:
params = [(1.0, 0.0)] # first tilt and rotation
# phi rotations for t tilts of the image
for t in 2**(0.5 * np.arange(1, 6)):
for phi in np.arange(0, 180, 72.0 / t):
params.append((t, phi))
def helper(param):
t, phi = param # tilt, phi (rotation)
# computing the affine transform
# get tilted image, mask and transformation
timg, tmask, Ai = affine_skew(t, phi, img, mask)
# Find keypoints and descriptors with the detector
keypoints, descrs = self.detector.detectAndCompute(
timg, tmask) # use detector
for kp in keypoints:
x, y = kp.pt # get actual keypoints
# transform keypoints to original img
kp.pt = tuple(np.dot(Ai, (x, y, 1)))
if descrs is None:
descrs = [] # faster than: descrs or []
return keypoints, descrs
if self.pool is None:
try:
ires = it.imap(helper, params) # process asynchronously
except AttributeError: # compatibility with python 3
ires = map(helper, params)
else:
# process asynchronously in pool
ires = self.pool.imap(helper, params)
keypoints, descrs = [], []
for i, (k, d) in enumerate(ires):
keypoints.extend(k)
descrs.extend(d)
if self.debug:
print('affine sampling: %d / %d\r' %
(i + 1, len(params)), end=' ')
else:
keypoints, descrs = self.detector.detectAndCompute(
img, mask) # use detector
# convert to dictionaries
keypoints = [vars(SimKeyPoint(obj)) for obj in keypoints]
# return keyPoint2tuple(keypoints), np.array(descrs)
return keypoints, np.array(descrs)
def config(self, name, separator="-"):
"""
This function takes parameters from a command to initialize a detector and matcher.
:param name: "[a-]<sift|surf|orb>[-flann]" (str) Ex: "a-sift-flann"
:param separator: separator character
:return: detector, matcher
"""
# Here is agood explanation for all the decisions in the features and matchers
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
chunks = name.split(separator)
index = 0
if chunks[index].lower() == "a":
self.useASIFT = True
index += 1
if chunks[index].lower() == 'sift':
try: # opencv 2
detector = cv2.SIFT() # Scale-invariant feature transform
except AttributeError: # opencv 3
detector = cv2.xfeatures2d.SIFT_create()
norm = cv2.NORM_L2 # distance measurement to be used
elif chunks[index].lower() == 'surf':
try: # opencv 2
# Hessian Threshold to 800, 500 #
# http://stackoverflow.com/a/18891668/5288758
detector = cv2.SURF()
except AttributeError: # opencv 3
detector = cv2.xfeatures2d.SURF_create()
# http://docs.opencv.org/2.4/modules/nonfree/doc/feature_detection.html
norm = cv2.NORM_L2 # distance measurement to be used
elif chunks[index].lower() == 'orb':
try: # opencv 2
detector = cv2.ORB() # around 400, binary string based descriptors
except AttributeError: # opencv 3
detector = cv2.ORB_create()
norm = cv2.NORM_HAMMING # Hamming distance
elif chunks[index].lower() == 'brisk':
try: # opencv 2
detector = cv2.BRISK()
except AttributeError: # opencv 3
detector = cv2.BRISK_create()
norm = cv2.NORM_HAMMING # Hamming distance
else:
raise Exception(
"name '{}' with detector '{}' not valid".format(name, chunks[index]))
index += 1
if len(chunks) - 1 >= index and chunks[index].lower() == 'flann':
# FLANN based Matcher, Fast Approximate Nearest Neighbor Search
# Library
if norm == cv2.NORM_L2: # for SIFT ans SURF
flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
else: # for ORB
flann_params = dict(algorithm=FLANN_INDEX_LSH,
table_number=6, # 12
key_size=12, # 20
multi_probe_level=1) # 2
# bug : need to pass empty dict (#1329)
matcher = cv2.FlannBasedMatcher(flann_params, {})
else: # brute force matcher
# difference in norm http://stackoverflow.com/a/32849908/5288758
matcher = cv2.BFMatcher(norm)
self.detector, self.matcher = detector, matcher
return detector, matcher
def init_feature(name, separator="-", features={}): # dictionary caches the features
"""
This function takes parameters from a command to initialize a detector and matcher.
:param name: "<sift|surf|orb>[-flann]" (str) Ex: "sift-flann"
:param separator: separator character
:param features: it is a dictionary containing the mapping from name to the
initialized detector, matcher pair. If None it is created.
This feature is to reduce time by reusing created features.
:return: detector, matcher
"""
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
if features is None:
features = {} # reset features
if name not in features: # if called with a different name
chunks = name.split(separator)
index = 0
if chunks[index].lower() == 'sift':
try: # opencv 2
detector = cv2.SIFT() # Scale-invariant feature transform
except AttributeError: # opencv 3
detector = cv2.xfeatures2d.SIFT_create()
norm = cv2.NORM_L2 # distance measurement to be used
elif chunks[index].lower() == 'surf':
try: # opencv 2
# Hessian Threshold to 800, 500 #
# http://stackoverflow.com/a/18891668/5288758
detector = cv2.SURF()
except AttributeError: # opencv 3
detector = cv2.xfeatures2d.SURF_create()
# http://docs.opencv.org/2.4/modules/nonfree/doc/feature_detection.html
norm = cv2.NORM_L2 # distance measurement to be used
elif chunks[index].lower() == 'orb':
try: # opencv 2
detector = cv2.ORB() # around 400, binary string based descriptors
except AttributeError: # opencv 3
detector = cv2.ORB_create()
norm = cv2.NORM_HAMMING # Hamming distance
elif chunks[index].lower() == 'brisk':
try: # opencv 2
detector = cv2.BRISK()
except AttributeError: # opencv 3
detector = cv2.BRISK_create()
norm = cv2.NORM_HAMMING # Hamming distance
else:
raise Exception(
"name '{}' with detector '{}' not valid".format(name, chunks[index]))
index += 1
if len(chunks) - 1 >= index and chunks[index].lower() == 'flann':
# FLANN based Matcher, Fast Approximate Nearest Neighbor Search
# Library
if norm == cv2.NORM_L2: # for SIFT ans SURF
flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
else: # for ORB
flann_params = dict(algorithm=FLANN_INDEX_LSH,
table_number=6, # 12
key_size=12, # 20
multi_probe_level=1) # 2
# bug : need to pass empty dict (#1329)
matcher = cv2.FlannBasedMatcher(flann_params, {})
else: # brute force matcher
# difference in norm http://stackoverflow.com/a/32849908/5288758
matcher = cv2.BFMatcher(norm)
features[name] = detector, matcher # cache detector and matcher
return features[name] # get buffered: detector, matcher
def affine_skew(tilt, phi, img, mask=None):
"""
Increase robustness to descriptors by calculating other invariant perspectives to image.
:param tilt: tilting of image
:param phi: rotation of image (in degrees)
:param img: image to find Affine transforms
:param mask: mask to detect keypoints (it uses default, mask[:] = 255)
:return: skew_img, skew_mask, Ai (invert Affine Transform)
Ai - is an affine transform matrix from skew_img to img
"""
h, w = img.shape[:2] # get 2D shape
if mask is None:
mask = np.zeros((h, w), np.uint8)
mask[:] = 255
A = np.float32([[1, 0, 0], [0, 1, 0]]) # init Transformation matrix
if phi != 0.0: # simulate rotation
phi = np.deg2rad(phi) # convert degrees to radian
s, c = np.sin(phi), np.cos(phi) # get sine, cosine components
# build partial Transformation matrix
A = np.float32([[c, -s], [s, c]])
corners = [[0, 0], [w, 0], [w, h], [0, h]] # use corners
tcorners = np.int32(np.dot(corners, A.T)) # transform corners
x, y, w, h = cv2.boundingRect(
tcorners.reshape(1, -1, 2)) # get translations
A = np.hstack([A, [[-x], [-y]]]) # finish Transformation matrix build
img = cv2.warpAffine(
img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
if tilt != 1.0:
s = 0.8 * np.sqrt(tilt * tilt - 1) # get sigma
# blur image with gaussian blur
img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
img = cv2.resize(img, (0, 0), fx=1.0 / tilt, fy=1.0,
interpolation=cv2.INTER_NEAREST) # resize
A[0] /= tilt
if phi != 0.0 or tilt != 1.0:
h, w = img.shape[:2] # get new 2D shape
# also get mask transformation
mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
Ai = cv2.invertAffineTransform(A)
return img, mask, Ai
#@memoize(MANAGER["TEMPPATH"],ignore=["pool"])
def ASIFT(feature_name, img, mask=None, pool=_pool):
"""
asift(feature_name, img, mask=None, pool=None) -> keypoints, descrs
Apply a set of affine transformations to the image, detect keypoints and
reproject them into initial image coordinates.
See http://www.ipol.im/pub/algo/my_affine_sift/ for the details.
ThreadPool object may be passed to speedup the computation.
:param feature_name: feature name to create detector.
:param img: image to find keypoints and its descriptors
:param mask: mask to detect keypoints (it uses default, mask[:] = 255)
:param pool: multiprocessing pool (dummy, it uses multithreading)
:return: keypoints,descriptors
"""
# bulding parameters of tilt and rotation variations
# it must get detector object of cv2 here to prevent conflict with
# memoizers
detector = init_feature(feature_name)[0]
params = [(1.0, 0.0)] # first tilt and rotation
# phi rotations for t tilts of the image
for t in 2**(0.5 * np.arange(1, 6)):
for phi in np.arange(0, 180, 72.0 / t):
params.append((t, phi))
def helper(param):
t, phi = param # tilt, phi (rotation)
# computing the affine transform
# get tilted image, mask and transformation
timg, tmask, Ai = affine_skew(t, phi, img, mask)
# Find keypoints and descriptors with the detector
keypoints, descrs = detector.detectAndCompute(
timg, tmask) # use detector
for kp in keypoints:
x, y = kp.pt # get actual keypoints
# transform keypoints to original img
kp.pt = tuple(np.dot(Ai, (x, y, 1)))
if descrs is None:
descrs = [] # faster than: descrs or []
return keypoints, descrs
if pool is None:
ires = it.imap(helper, params) # process asynchronously
else:
ires = pool.imap(helper, params) # process asynchronously in pool
keypoints, descrs = [], []
for i, (k, d) in enumerate(ires):
keypoints.extend(k)
descrs.extend(d)
if FLAG_DEBUG:
print('affine sampling: %d / %d\r' % (i + 1, len(params)), end=' ')
# convert to dictionaries
keypoints = [vars(SimKeyPoint(obj)) for obj in keypoints]
# return keyPoint2tuple(keypoints), np.array(descrs)
return keypoints, np.array(descrs)
def ASIFT_iter(imgs, feature_name=feature_name):
"""
Affine-SIFT for N images.
:param imgs: images to apply asift
:param feature_name: eg. SIFT SURF ORB
:return: [(kp1,desc1),...,(kpN,descN)]
"""
# print 'imgf - %d features, imgb - %d features' % (len(kp1), len(kp2))
for img in imgs:
yield ASIFT(feature_name, img, pool=_pool)
def ASIFT_multiple(imgs, feature_name=feature_name):
"""
Affine-SIFT for N images.
:param imgs: images to apply asift
:param feature_name: eg. SIFT SURF ORB
:return: [(kp1,desc1),...,(kpN,descN)]
"""
# print 'imgf - %d features, imgb - %d features' % (len(kp1), len(kp2))
return [ASIFT(feature_name, img, pool=_pool) for img in imgs]
def filter_matches(kp1, kp2, matches, ratio=0.75):
"""
This function applies a ratio test.
:param kp1: raw keypoints 1
:param kp2: raw keypoints 2
:param matches: raw matches
:param ratio: filtering ratio of distance
:return: filtered keypoint 1, filtered keypoint 2, keypoint pairs
"""
mkp1, mkp2 = [], [] # initialize matched keypoints
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio: # by Hamming distance
m = m[0]
# keypoint with Index of the descriptor in query descriptors
mkp1.append(kp1[m.queryIdx])
# keypoint with Index of the descriptor in train descriptors
mkp2.append(kp2[m.trainIdx])
p1 = np.float32([kp["pt"] for kp in mkp1])
p2 = np.float32([kp["pt"] for kp in mkp2])
return p1, p2, list(zip(mkp1, mkp2)) # p1, p2, kp_pairs
#@memoize(MANAGER["TEMPPATH"])
def MATCH(feature_name, kp1, desc1, kp2, desc2):
"""
Use matcher and asift output to obtain Transformation matrix (TM).
:param feature_name: feature name to create detector. It is the same used in the detector
which is used in init_feature function but the detector itself is ignored.
e.g. if 'detector' uses BFMatcher, if 'detector-flann' uses FlannBasedMatcher.
:param kp1: keypoints of source image
:param desc1: descriptors of kp1
:param kp2: keypoints of destine image
:param desc2: descriptors of kp2
:return: TM
"""
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.html
# it must get matcher object of cv2 here to prevent conflict with memoizers
matcher = init_feature(feature_name)[1]
# BFMatcher.knnMatch() returns k best matches where k is specified by the
# user
raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2) # 2
# If k=2, it will draw two match-lines for each keypoint.
# So we have to pass a status if we want to selectively draw it.
p1, p2, kp_pairs = filter_matches(
kp1, kp2, raw_matches) # ratio test of 0.75
if len(p1) >= 4:
# status specifies the inlier and outlier points
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
if FLAG_DEBUG:
print('%d / %d inliers/matched' % (np.sum(status), len(status)))
# do not draw outliers (there will be a lot of them)
# kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag] #
# uncomment to give only good kp_pairs
else:
H, status = None, None
if FLAG_DEBUG:
print('%d matches found, not enough for homography estimation' % len(p1))
return H, status, kp_pairs
def MATCH_multiple(pairlist, feature_name=feature_name):
"""
:param pairlist: list of keypoint and descriptors pair e.g. [(kp1,desc1),...,(kpN,descN)]
:param feature_name: feature name to create detector
:return: [(H1, mask1, kp_pairs1),....(HN, maskN, kp_pairsN)]
"""
kp1, desc1 = pairlist[0]
return [MATCH(feature_name, kp1, desc1, kpN, descN) for kpN, descN in pairlist[1:]]
def inlineRatio(inlines, lines, thresh=30):
"""
Probability that a match was correct.
:param inlines: number of matched lines
:param lines: number lines
:param thresh: threshold for lines (i.e. very low probability <= thresh < good probability)
:return:
"""
return (inlines / lines) * normsigmoid(lines, 30, thresh) # less than 30 are below 0.5
| [
"cv2.GaussianBlur",
"numpy.sum",
"cv2.warpAffine",
"cv2.xfeatures2d.SURF_create",
"numpy.sin",
"numpy.arange",
"itertools.imap",
"cv2.invertAffineTransform",
"cv2.BFMatcher",
"cv2.BRISK",
"cv2.resize",
"multiprocessing.pool.ThreadPool",
"cv2.FlannBasedMatcher",
"numpy.hstack",
"cv2.ORB_c... | [((796, 819), 'multiprocessing.pool.ThreadPool', 'Pool', ([], {'processes': 'NO_CPUs'}), '(processes=NO_CPUs)\n', (800, 819), True, 'from multiprocessing.pool import ThreadPool as Pool\n'), ((11012, 11046), 'numpy.float32', 'np.float32', (['[[1, 0, 0], [0, 1, 0]]'], {}), '([[1, 0, 0], [0, 1, 0]])\n', (11022, 11046), True, 'import numpy as np\n'), ((12301, 12329), 'cv2.invertAffineTransform', 'cv2.invertAffineTransform', (['A'], {}), '(A)\n', (12326, 12329), False, 'import cv2\n'), ((16258, 16295), 'numpy.float32', 'np.float32', (["[kp['pt'] for kp in mkp1]"], {}), "([kp['pt'] for kp in mkp1])\n", (16268, 16295), True, 'import numpy as np\n'), ((16305, 16342), 'numpy.float32', 'np.float32', (["[kp['pt'] for kp in mkp2]"], {}), "([kp['pt'] for kp in mkp2])\n", (16315, 16342), True, 'import numpy as np\n'), ((10955, 10981), 'numpy.zeros', 'np.zeros', (['(h, w)', 'np.uint8'], {}), '((h, w), np.uint8)\n', (10963, 10981), True, 'import numpy as np\n'), ((11131, 11146), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (11141, 11146), True, 'import numpy as np\n'), ((11305, 11334), 'numpy.float32', 'np.float32', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (11315, 11334), True, 'import numpy as np\n'), ((11583, 11611), 'numpy.hstack', 'np.hstack', (['[A, [[-x], [-y]]]'], {}), '([A, [[-x], [-y]]])\n', (11592, 11611), True, 'import numpy as np\n'), ((11664, 11756), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'A', '(w, h)'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_REPLICATE'}), '(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.\n BORDER_REPLICATE)\n', (11678, 11756), False, 'import cv2\n'), ((11895, 11947), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(0, 0)'], {'sigmaX': 's', 'sigmaY': '(0.01)'}), '(img, (0, 0), sigmaX=s, sigmaY=0.01)\n', (11911, 11947), False, 'import cv2\n'), ((11962, 12041), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)'], {'fx': '(1.0 / tilt)', 'fy': '(1.0)', 'interpolation': 'cv2.INTER_NEAREST'}), '(img, (0, 0), fx=1.0 / tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)\n', (11972, 12041), False, 'import cv2\n'), ((12235, 12291), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'A', '(w, h)'], {'flags': 'cv2.INTER_NEAREST'}), '(mask, A, (w, h), flags=cv2.INTER_NEAREST)\n', (12249, 12291), False, 'import cv2\n'), ((13455, 13482), 'numpy.arange', 'np.arange', (['(0)', '(180)', '(72.0 / t)'], {}), '(0, 180, 72.0 / t)\n', (13464, 13482), True, 'import numpy as np\n'), ((14222, 14245), 'itertools.imap', 'it.imap', (['helper', 'params'], {}), '(helper, params)\n', (14229, 14245), True, 'import itertools as it\n'), ((14754, 14770), 'numpy.array', 'np.array', (['descrs'], {}), '(descrs)\n', (14762, 14770), True, 'import numpy as np\n'), ((17749, 17792), 'cv2.findHomography', 'cv2.findHomography', (['p1', 'p2', 'cv2.RANSAC', '(5.0)'], {}), '(p1, p2, cv2.RANSAC, 5.0)\n', (17767, 17792), False, 'import cv2\n'), ((3858, 3874), 'numpy.array', 'np.array', (['descrs'], {}), '(descrs)\n', (3866, 3874), True, 'import numpy as np\n'), ((6788, 6827), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['flann_params', '{}'], {}), '(flann_params, {})\n', (6809, 6827), False, 'import cv2\n'), ((6964, 6983), 'cv2.BFMatcher', 'cv2.BFMatcher', (['norm'], {}), '(norm)\n', (6977, 6983), False, 'import cv2\n'), ((10062, 10101), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['flann_params', '{}'], {}), '(flann_params, {})\n', (10083, 10101), False, 'import cv2\n'), ((10238, 10257), 'cv2.BFMatcher', 'cv2.BFMatcher', (['norm'], {}), '(norm)\n', (10251, 10257), False, 'import cv2\n'), ((11191, 11202), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (11197, 11202), True, 'import numpy as np\n'), ((11204, 11215), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (11210, 11215), True, 'import numpy as np\n'), ((11429, 11449), 'numpy.dot', 'np.dot', (['corners', 'A.T'], {}), '(corners, A.T)\n', (11435, 11449), True, 'import numpy as np\n'), ((11803, 11827), 'numpy.sqrt', 'np.sqrt', (['(tilt * tilt - 1)'], {}), '(tilt * tilt - 1)\n', (11810, 11827), True, 'import numpy as np\n'), ((13418, 13433), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (13427, 13433), True, 'import numpy as np\n'), ((16367, 16382), 'builtins.zip', 'zip', (['mkp1', 'mkp2'], {}), '(mkp1, mkp2)\n', (16370, 16382), False, 'from builtins import zip\n'), ((2015, 2042), 'numpy.arange', 'np.arange', (['(0)', '(180)', '(72.0 / t)'], {}), '(0, 180, 72.0 / t)\n', (2024, 2042), True, 'import numpy as np\n'), ((4725, 4735), 'cv2.SIFT', 'cv2.SIFT', ([], {}), '()\n', (4733, 4735), False, 'import cv2\n'), ((7999, 8009), 'cv2.SIFT', 'cv2.SIFT', ([], {}), '()\n', (8007, 8009), False, 'import cv2\n'), ((14050, 14071), 'numpy.dot', 'np.dot', (['Ai', '(x, y, 1)'], {}), '(Ai, (x, y, 1))\n', (14056, 14071), True, 'import numpy as np\n'), ((1970, 1985), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (1979, 1985), True, 'import numpy as np\n'), ((2962, 2985), 'itertools.imap', 'it.imap', (['helper', 'params'], {}), '(helper, params)\n', (2969, 2985), True, 'import itertools as it\n'), ((4847, 4876), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (4874, 4876), False, 'import cv2\n'), ((5157, 5167), 'cv2.SURF', 'cv2.SURF', ([], {}), '()\n', (5165, 5167), False, 'import cv2\n'), ((8121, 8150), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (8148, 8150), False, 'import cv2\n'), ((8431, 8441), 'cv2.SURF', 'cv2.SURF', ([], {}), '()\n', (8439, 8441), False, 'import cv2\n'), ((2719, 2740), 'numpy.dot', 'np.dot', (['Ai', '(x, y, 1)'], {}), '(Ai, (x, y, 1))\n', (2725, 2740), True, 'import numpy as np\n'), ((5242, 5271), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', ([], {}), '()\n', (5269, 5271), False, 'import cv2\n'), ((5523, 5532), 'cv2.ORB', 'cv2.ORB', ([], {}), '()\n', (5530, 5532), False, 'import cv2\n'), ((8516, 8545), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', ([], {}), '()\n', (8543, 8545), False, 'import cv2\n'), ((8797, 8806), 'cv2.ORB', 'cv2.ORB', ([], {}), '()\n', (8804, 8806), False, 'import cv2\n'), ((17864, 17878), 'numpy.sum', 'np.sum', (['status'], {}), '(status)\n', (17870, 17878), True, 'import numpy as np\n'), ((5654, 5670), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (5668, 5670), False, 'import cv2\n'), ((5830, 5841), 'cv2.BRISK', 'cv2.BRISK', ([], {}), '()\n', (5839, 5841), False, 'import cv2\n'), ((8928, 8944), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (8942, 8944), False, 'import cv2\n'), ((9104, 9115), 'cv2.BRISK', 'cv2.BRISK', ([], {}), '()\n', (9113, 9115), False, 'import cv2\n'), ((5916, 5934), 'cv2.BRISK_create', 'cv2.BRISK_create', ([], {}), '()\n', (5932, 5934), False, 'import cv2\n'), ((9190, 9208), 'cv2.BRISK_create', 'cv2.BRISK_create', ([], {}), '()\n', (9206, 9208), False, 'import cv2\n')] |
"""Synthetic observation data testing.
"""
import copy
import unittest
import numpy
import tigernet
from .network_objects import network_lattice_1x1_geomelem
from .network_objects import network_empirical_simplified
import platform
os = platform.platform()[:7].lower()
if os == "windows":
WINDOWS = True
DECIMAL = -1
else:
WINDOWS = False
DECIMAL = 1
# WINDOWS = False
# DECIMAL = 1
####################################################################################
################################## SYNTH-SYNTH #####################################
####################################################################################
class TestSyntheticObservationsSegmentRandomLattice1x1(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_lattice_1x1_geomelem)
# generate synthetic observations
obs = tigernet.generate_obs(5, network.s_data)
obs["obs_id"] = ["a", "b", "c", "d", "e"]
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id"}
self.net_obs = tigernet.Observations(*args, **kwargs)
def test_obs2coords(self):
known_obs2coords = {
(0, "a"): (4.939321535345923, 6.436704297351775),
(1, "b"): (5.4248703846447945, 4.903948646972072),
(2, "c"): (3.8128931940501425, 5.813047017599905),
(3, "d"): (3.9382849013642325, 8.025957007038718),
(4, "e"): (8.672964844509263, 3.4509736694319995),
}
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords.items():
self.assertAlmostEqual(observed_obs2coords[k], v)
def test_obs2segm(self):
known_obs2segm = {"a": 1, "b": 3, "c": 1, "d": 1, "e": 3}
observed_obs2segm = self.net_obs.obs2segm
self.assertEqual(observed_obs2segm, known_obs2segm)
def test_snapped_points_df_dist_a(self):
known_dist_a = [
1.9367042973517747,
0.9248703846447945,
1.3130470175999047,
3.5259570070387185,
4.172964844509263,
]
observed_dist_a = list(self.net_obs.snapped_points["dist_a"])
self.assertAlmostEqual(observed_dist_a, known_dist_a)
known_dist_a_mean = 2.3747087102288913
observed_dist_a_mean = self.net_obs.snapped_points["dist_a"].mean()
self.assertAlmostEqual(observed_dist_a_mean, known_dist_a_mean)
def test_snapped_points_df_dist_b(self):
known_dist_b = [
2.563295702648225,
3.5751296153552055,
3.1869529824000953,
0.9740429929612815,
0.32703515549073714,
]
observed_dist_b = list(self.net_obs.snapped_points["dist_b"])
self.assertAlmostEqual(observed_dist_b, known_dist_b)
known_dist_b_mean = 2.1252912897711087
observed_dist_b_mean = self.net_obs.snapped_points["dist_b"].mean()
self.assertAlmostEqual(observed_dist_b_mean, known_dist_b_mean)
def test_snapped_points_df_node_a(self):
known_node_a = [1, 1, 1, 1, 1]
observed_node_a = list(self.net_obs.snapped_points["node_a"])
self.assertEqual(observed_node_a, known_node_a)
def test_snapped_points_df_node_b(self):
known_node_b = [2, 4, 2, 2, 4]
observed_node_b = list(self.net_obs.snapped_points["node_b"])
self.assertEqual(observed_node_b, known_node_b)
def test_snapped_points_df_dist2line(self):
known_dist2line = [
0.4393215353459228,
0.4039486469720721,
0.6871068059498575,
0.5617150986357675,
1.0490263305680005,
]
observed_dist2line = list(self.net_obs.snapped_points["dist2line"])
self.assertAlmostEqual(observed_dist2line, known_dist2line)
known_dist2line_mean = 0.6282236834943241
observed_dist2line_mean = self.net_obs.snapped_points["dist2line"].mean()
self.assertAlmostEqual(observed_dist2line_mean, known_dist2line_mean)
class TestSyntheticObservationsNodeRandomLattice1x1(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_lattice_1x1_geomelem)
# generate synthetic observations
obs = tigernet.generate_obs(5, network.s_data)
obs["obs_id"] = ["a", "b", "c", "d", "e"]
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "snap_to": "nodes"}
self.net_obs = tigernet.Observations(*args, **kwargs)
def test_obs2coords(self):
known_obs2coords = {
(0, "a"): (4.939321535345923, 6.436704297351775),
(1, "b"): (5.4248703846447945, 4.903948646972072),
(2, "c"): (3.8128931940501425, 5.813047017599905),
(3, "d"): (3.9382849013642325, 8.025957007038718),
(4, "e"): (8.672964844509263, 3.4509736694319995),
}
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords.items():
self.assertAlmostEqual(observed_obs2coords[k], v)
def test_obs2node(self):
known_obs2node = {"a": 1, "b": 1, "c": 1, "d": 2, "e": 4}
observed_obs2node = self.net_obs.obs2node
self.assertEqual(observed_obs2node, known_obs2node)
def test_snapped_points_df_dist2node(self):
known_dist2node = [
1.9859070841304562,
1.0092372059053203,
1.4819609418640627,
1.1244036660258458,
1.098821293546778,
]
observed_dist2node = list(self.net_obs.snapped_points["dist2node"])
self.assertAlmostEqual(observed_dist2node, known_dist2node)
known_dist2node_mean = 1.3400660382944927
observed_dist2node_mean = self.net_obs.snapped_points["dist2node"].mean()
self.assertAlmostEqual(observed_dist2node_mean, known_dist2node_mean)
####################################################################################
####################### SYNTH-SYNTH RESTRICTED #####################################
####################################################################################
class TestSyntheticObservationsSegmentRandomLattice1x1Restricted(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_lattice_1x1_geomelem)
network.s_data.loc[1, "MTFCC"] = "S1100"
network.s_data.loc[3, "MTFCC"] = "S1100"
# generate synthetic observations
obs = tigernet.generate_obs(5, network.s_data)
obs["obs_id"] = ["a", "b", "c", "d", "e"]
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "restrict_col": "MTFCC"}
kwargs.update({"remove_restricted": ["S1100", "S1630", "S1640"]})
self.net_obs = tigernet.Observations(*args, **kwargs)
def test_obs2coords(self):
known_obs2coords = {
(0, "a"): (4.939321535345923, 6.436704297351775),
(1, "b"): (5.4248703846447945, 4.903948646972072),
(2, "c"): (3.8128931940501425, 5.813047017599905),
(3, "d"): (3.9382849013642325, 8.025957007038718),
(4, "e"): (8.672964844509263, 3.4509736694319995),
}
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords.items():
self.assertAlmostEqual(observed_obs2coords[k], v)
def test_obs2segm(self):
known_obs2segm = {"a": 0, "b": 0, "c": 2, "d": 2, "e": 0}
observed_obs2segm = self.net_obs.obs2segm
self.assertEqual(observed_obs2segm, known_obs2segm)
def test_snapped_points_df_dist_a(self):
known_dist_a = [
4.5,
4.5,
3.812893194050143,
3.9382849013642325,
3.4509736694319995,
]
observed_dist_a = list(self.net_obs.snapped_points["dist_a"])
self.assertAlmostEqual(observed_dist_a, known_dist_a)
known_dist_a_mean = 4.040430352969275
observed_dist_a_mean = self.net_obs.snapped_points["dist_a"].mean()
self.assertAlmostEqual(observed_dist_a_mean, known_dist_a_mean)
def test_snapped_points_df_dist_b(self):
known_dist_b = [
0.0,
0.0,
0.6871068059498571,
0.5617150986357675,
1.0490263305680005,
]
observed_dist_b = list(self.net_obs.snapped_points["dist_b"])
self.assertAlmostEqual(observed_dist_b, known_dist_b)
known_dist_b_mean = 0.459569647030725
observed_dist_b_mean = self.net_obs.snapped_points["dist_b"].mean()
self.assertAlmostEqual(observed_dist_b_mean, known_dist_b_mean)
def test_snapped_points_df_node_a(self):
known_node_a = [0, 0, 3, 3, 0]
observed_node_a = list(self.net_obs.snapped_points["node_a"])
self.assertEqual(observed_node_a, known_node_a)
def test_snapped_points_df_node_b(self):
known_node_b = [1, 1, 1, 1, 1]
observed_node_b = list(self.net_obs.snapped_points["node_b"])
self.assertEqual(observed_node_b, known_node_b)
def test_snapped_points_df_dist2line(self):
known_dist2line = [
1.9859070841304562,
1.0092372059053203,
1.3130470175999047,
3.525957007038718,
4.172964844509263,
]
observed_dist2line = list(self.net_obs.snapped_points["dist2line"])
self.assertAlmostEqual(observed_dist2line, known_dist2line)
known_dist2line_mean = 2.4014226318367324
observed_dist2ine_mean = self.net_obs.snapped_points["dist2line"].mean()
self.assertAlmostEqual(observed_dist2ine_mean, known_dist2line_mean)
class TestSyntheticObservationsNodeRandomLattice1x1Restricted(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_lattice_1x1_geomelem)
network.s_data.loc[1, "MTFCC"] = "S1100"
network.s_data.loc[3, "MTFCC"] = "S1100"
# generate synthetic observations
obs = tigernet.generate_obs(5, network.s_data)
obs["obs_id"] = ["a", "b", "c", "d", "e"]
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "snap_to": "nodes"}
kwargs.update({"restrict_col": "MTFCC"})
kwargs.update({"remove_restricted": ["S1100", "S1630", "S1640"]})
self.net_obs = tigernet.Observations(*args, **kwargs)
def test_obs2coords(self):
known_obs2coords = {
(0, "a"): (4.939321535345923, 6.436704297351775),
(1, "b"): (5.4248703846447945, 4.903948646972072),
(2, "c"): (3.8128931940501425, 5.813047017599905),
(3, "d"): (3.9382849013642325, 8.025957007038718),
(4, "e"): (8.672964844509263, 3.4509736694319995),
}
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords.items():
self.assertAlmostEqual(observed_obs2coords[k], v)
def test_obs2node(self):
known_obs2node = {"a": 1, "b": 1, "c": 1, "d": 1, "e": 1}
observed_obs2node = self.net_obs.obs2node
self.assertEqual(observed_obs2node, known_obs2node)
def test_snapped_points_df_dist2node(self):
known_dist2node = [
1.9859070841304562,
1.0092372059053203,
1.4819609418640627,
3.5704196766655913,
4.302800464317999,
]
observed_dist2node = list(self.net_obs.snapped_points["dist2node"])
self.assertAlmostEqual(observed_dist2node, known_dist2node)
known_dist2node_mean = 2.470065074576686
observed_dist2node_mean = self.net_obs.snapped_points["dist2node"].mean()
self.assertAlmostEqual(observed_dist2node_mean, known_dist2node_mean)
####################################################################################
################################## SYNTH-EMPIR #####################################
####################################################################################
class TestSyntheticObservationsSegmentRandomEmpirical(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_empirical_simplified)
# generate synthetic observations
obs = tigernet.generate_obs(500, network.s_data)
obs["obs_id"] = obs.index
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id"}
self.net_obs = tigernet.Observations(*args, **kwargs)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_obs2coords(self):
known_obs2coords = [
((495, 495), (621033.3213594754, 164941.80269090834)),
((496, 496), (621819.5720103906, 165514.3885859197)),
((497, 497), (623654.2570885622, 164241.2803142736)),
((498, 498), (622851.6060250874, 166857.07354681785)),
((499, 499), (621816.24144166, 166044.17761455863)),
]
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords:
obs = numpy.array(observed_obs2coords[k])
numpy.testing.assert_array_almost_equal(obs, numpy.array(v))
def test_obs2segm(self):
known_obs2segm = [(495, 150), (496, 230), (497, 84), (498, 91), (499, 105)]
observed_obs2segm = list(self.net_obs.obs2segm.items())[-5:]
self.assertEqual(observed_obs2segm, known_obs2segm)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist_a(self):
known_dist_a = numpy.array(
[
210.40526565933823,
118.30357725098324,
34.12778222322711,
120.39577375386378,
0.0,
]
)
observed_dist_a = list(self.net_obs.snapped_points["dist_a"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist_a), known_dist_a
)
known_dist_a_mean = 163.49368966710074
observed_dist_a_mean = self.net_obs.snapped_points["dist_a"].mean()
self.assertAlmostEqual(observed_dist_a_mean, known_dist_a_mean)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist_b(self):
known_dist_b = numpy.array(
[
342.6965551431302,
0.0,
86.50490751040633,
58.25005873237134,
152.0185068774602,
]
)
observed_dist_b = list(self.net_obs.snapped_points["dist_b"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist_b), known_dist_b
)
known_dist_b_mean = 159.75442932794624
observed_dist_b_mean = self.net_obs.snapped_points["dist_b"].mean()
self.assertAlmostEqual(observed_dist_b_mean, known_dist_b_mean)
def test_snapped_points_df_node_a(self):
known_node_a = [186, 86, 122, 132, 151]
observed_node_a = list(self.net_obs.snapped_points["node_a"])[-5:]
self.assertEqual(observed_node_a, known_node_a)
def test_snapped_points_df_node_b(self):
known_node_b = [193, 245, 48, 133, 22]
observed_node_b = list(self.net_obs.snapped_points["node_b"])[-5:]
self.assertEqual(observed_node_b, known_node_b)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist2line(self):
known_dist2line = numpy.array(
[
147.05576410321171,
298.0459114928476,
2.914177304108527,
160.72592517096817,
300.2025615374258,
]
)
observed_dist2line = list(self.net_obs.snapped_points["dist2line"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist2line), known_dist2line
)
known_dist2line_mean = 70.14736252699115
observed_dist2ine_mean = self.net_obs.snapped_points["dist2line"].mean()
self.assertAlmostEqual(observed_dist2ine_mean, known_dist2line_mean)
class TestSyntheticObservationsNodeRandomEmpirical(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_empirical_simplified)
# generate synthetic observations
obs = tigernet.generate_obs(500, network.s_data)
obs["obs_id"] = obs.index
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "snap_to": "nodes"}
self.net_obs = tigernet.Observations(*args, **kwargs)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_obs2coords(self):
known_obs2coords = [
((495, 495), (621033.3213594754, 164941.80269090834)),
((496, 496), (621819.5720103906, 165514.3885859197)),
((497, 497), (623654.2570885622, 164241.2803142736)),
((498, 498), (622851.6060250874, 166857.07354681785)),
((499, 499), (621816.24144166, 166044.17761455863)),
]
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords:
numpy.testing.assert_array_almost_equal(
numpy.array(observed_obs2coords[k]), numpy.array(v)
)
def test_obs2node(self):
known_obs2node = [(495, 192), (496, 245), (497, 122), (498, 133), (499, 151)]
observed_obs2node = self.net_obs.obs2node
for k, v in known_obs2node:
self.assertAlmostEqual(observed_obs2node[k], v)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist2node(self):
known_dist2node = numpy.array(
[
233.41263770566138,
298.0459114928476,
34.25197729818704,
170.95581991959833,
300.2025615374258,
]
)
observed_dist2node = list(self.net_obs.snapped_points["dist2node"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(numpy.array(observed_dist2node)), known_dist2node
)
known_dist2node_mean = 117.00153682103445
observed_dist2node_mean = self.net_obs.snapped_points["dist2node"].mean()
self.assertAlmostEqual(observed_dist2node_mean, known_dist2node_mean)
####################################################################################
######################## SYNTH-EMPIR RESTRICTED ####################################
####################################################################################
class TestSyntheticObservationsSegmentRandomEmpiricalRestricted(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_empirical_simplified)
# generate synthetic observations
obs = tigernet.generate_obs(500, network.s_data)
obs["obs_id"] = obs.index
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id"}
kwargs.update({"restrict_col": "MTFCC"})
kwargs.update({"remove_restricted": ["S1100", "S1630", "S1640"]})
self.net_obs = tigernet.Observations(*args, **kwargs)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_obs2coords(self):
known_obs2coords = [
((495, 495), (621033.3213594754, 164941.80269090834)),
((496, 496), (621819.5720103906, 165514.3885859197)),
((497, 497), (623654.2570885622, 164241.2803142736)),
((498, 498), (622851.6060250874, 166857.07354681785)),
((499, 499), (621816.24144166, 166044.17761455863)),
]
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords:
obs = numpy.array(observed_obs2coords[k])
numpy.testing.assert_array_almost_equal(obs, numpy.array(v))
def test_obs2segm(self):
known_obs2segm = [(495, 150), (496, 230), (497, 84), (498, 91), (499, 105)]
observed_obs2segm = list(self.net_obs.obs2segm.items())[-5:]
self.assertEqual(observed_obs2segm, known_obs2segm)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist_a(self):
known_dist_a = numpy.array(
[
210.40526565933823,
118.30357725098324,
34.12778222322711,
120.39577375386378,
0.0,
]
)
observed_dist_a = list(self.net_obs.snapped_points["dist_a"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist_a), known_dist_a
)
known_dist_a_mean = 147.23394614647037
observed_dist_a_mean = self.net_obs.snapped_points["dist_a"].mean()
self.assertAlmostEqual(observed_dist_a_mean, known_dist_a_mean)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist_b(self):
known_dist_b = numpy.array(
[
342.6965551431302,
0.0,
86.50490751040633,
58.25005873237134,
152.0185068774602,
]
)
observed_dist_b = list(self.net_obs.snapped_points["dist_b"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist_b), known_dist_b
)
known_dist_b_mean = 148.17608136919543
observed_dist_b_mean = self.net_obs.snapped_points["dist_b"].mean()
self.assertAlmostEqual(observed_dist_b_mean, known_dist_b_mean)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_node_a(self):
known_node_a = [186, 86, 122, 132, 151]
observed_node_a = list(self.net_obs.snapped_points["node_a"])[-5:]
self.assertEqual(observed_node_a, known_node_a)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_node_b(self):
known_node_b = [193, 245, 48, 133, 22]
observed_node_b = list(self.net_obs.snapped_points["node_b"])[-5:]
self.assertEqual(observed_node_b, known_node_b)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist2line(self):
known_dist2line = numpy.array(
[
147.05576410321171,
298.0459114928476,
2.914177304108527,
160.72592517096817,
300.2025615374258,
]
)
observed_dist2line = list(self.net_obs.snapped_points["dist2line"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(observed_dist2line), known_dist2line
)
known_dist2line_mean = 72.28800510090015
observed_dist2ine_mean = self.net_obs.snapped_points["dist2line"].mean()
self.assertAlmostEqual(observed_dist2ine_mean, known_dist2line_mean)
class TestSyntheticObservationsNodeRandomEmpiricalRestricted(unittest.TestCase):
def setUp(self):
network = copy.deepcopy(network_empirical_simplified)
# generate synthetic observations
obs = tigernet.generate_obs(500, network.s_data)
obs["obs_id"] = obs.index
# associate observations with the network
args = network, obs.copy()
kwargs = {"df_name": "obs1", "df_key": "obs_id", "snap_to": "nodes"}
kwargs.update({"restrict_col": "MTFCC"})
kwargs.update({"remove_restricted": ["S1100", "S1630", "S1640"]})
self.net_obs = tigernet.Observations(*args, **kwargs)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_obs2coords(self):
known_obs2coords = [
((495, 495), (621033.3213594754, 164941.80269090834)),
((496, 496), (621819.5720103906, 165514.3885859197)),
((497, 497), (623654.2570885622, 164241.2803142736)),
((498, 498), (622851.6060250874, 166857.07354681785)),
((499, 499), (621816.24144166, 166044.17761455863)),
]
observed_obs2coords = self.net_obs.obs2coords
for k, v in known_obs2coords:
numpy.testing.assert_array_almost_equal(
numpy.array(observed_obs2coords[k]), numpy.array(v)
)
def test_obs2node(self):
known_obs2node = [(495, 192), (496, 245), (497, 122), (498, 133), (499, 151)]
observed_obs2node = self.net_obs.obs2node
for k, v in known_obs2node:
self.assertAlmostEqual(observed_obs2node[k], v)
@unittest.skipIf(WINDOWS, "Skipping Windows due to precision issues.")
def test_snapped_points_df_dist2node(self):
known_dist2node = numpy.array(
[
233.41263770566138,
298.0459114928476,
34.25197729818704,
170.95581991959833,
300.2025615374258,
]
)
observed_dist2node = list(self.net_obs.snapped_points["dist2node"])[-5:]
numpy.testing.assert_array_almost_equal(
numpy.array(numpy.array(observed_dist2node)), known_dist2node
)
known_dist2node_mean = 117.96666272251426
observed_dist2node_mean = self.net_obs.snapped_points["dist2node"].mean()
self.assertAlmostEqual(observed_dist2node_mean, known_dist2node_mean)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"unittest.skipIf",
"copy.deepcopy",
"tigernet.Observations",
"platform.platform",
"numpy.array",
"tigernet.generate_obs"
] | [((12671, 12740), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (12686, 12740), False, 'import unittest\n'), ((13610, 13679), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (13625, 13679), False, 'import unittest\n'), ((14354, 14423), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (14369, 14423), False, 'import unittest\n'), ((15544, 15613), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (15559, 15613), False, 'import unittest\n'), ((16846, 16915), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (16861, 16915), False, 'import unittest\n'), ((17812, 17881), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (17827, 17881), False, 'import unittest\n'), ((19503, 19572), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (19518, 19572), False, 'import unittest\n'), ((20442, 20511), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (20457, 20511), False, 'import unittest\n'), ((21186, 21255), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (21201, 21255), False, 'import unittest\n'), ((21927, 21996), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (21942, 21996), False, 'import unittest\n'), ((22227, 22296), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (22242, 22296), False, 'import unittest\n'), ((22526, 22595), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (22541, 22595), False, 'import unittest\n'), ((23961, 24030), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (23976, 24030), False, 'import unittest\n'), ((24927, 24996), 'unittest.skipIf', 'unittest.skipIf', (['WINDOWS', '"""Skipping Windows due to precision issues."""'], {}), "(WINDOWS, 'Skipping Windows due to precision issues.')\n", (24942, 24996), False, 'import unittest\n'), ((25757, 25772), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25770, 25772), False, 'import unittest\n'), ((776, 819), 'copy.deepcopy', 'copy.deepcopy', (['network_lattice_1x1_geomelem'], {}), '(network_lattice_1x1_geomelem)\n', (789, 819), False, 'import copy\n'), ((877, 917), 'tigernet.generate_obs', 'tigernet.generate_obs', (['(5)', 'network.s_data'], {}), '(5, network.s_data)\n', (898, 917), False, 'import tigernet\n'), ((1134, 1172), 'tigernet.Observations', 'tigernet.Observations', (['*args'], {}), '(*args, **kwargs)\n', (1155, 1172), False, 'import tigernet\n'), ((4200, 4243), 'copy.deepcopy', 'copy.deepcopy', (['network_lattice_1x1_geomelem'], {}), '(network_lattice_1x1_geomelem)\n', (4213, 4243), False, 'import copy\n'), ((4301, 4341), 'tigernet.generate_obs', 'tigernet.generate_obs', (['(5)', 'network.s_data'], {}), '(5, network.s_data)\n', (4322, 4341), False, 'import tigernet\n'), ((4578, 4616), 'tigernet.Observations', 'tigernet.Observations', (['*args'], {}), '(*args, **kwargs)\n', (4599, 4616), False, 'import tigernet\n'), ((6354, 6397), 'copy.deepcopy', 'copy.deepcopy', (['network_lattice_1x1_geomelem'], {}), '(network_lattice_1x1_geomelem)\n', (6367, 6397), False, 'import copy\n'), ((6553, 6593), 'tigernet.generate_obs', 'tigernet.generate_obs', (['(5)', 'network.s_data'], {}), '(5, network.s_data)\n', (6574, 6593), False, 'import tigernet\n'), ((6909, 6947), 'tigernet.Observations', 'tigernet.Observations', (['*args'], {}), '(*args, **kwargs)\n', (6930, 6947), False, 'import tigernet\n'), ((9919, 9962), 'copy.deepcopy', 'copy.deepcopy', (['network_lattice_1x1_geomelem'], {}), '(network_lattice_1x1_geomelem)\n', (9932, 9962), False, 'import copy\n'), ((10118, 10158), 'tigernet.generate_obs', 'tigernet.generate_obs', (['(5)', 'network.s_data'], {}), '(5, network.s_data)\n', (10139, 10158), False, 'import tigernet\n'), ((10518, 10556), 'tigernet.Observations', 'tigernet.Observations', (['*args'], {}), '(*args, **kwargs)\n', (10539, 10556), False, 'import tigernet\n'), ((12282, 12325), 'copy.deepcopy', 'copy.deepcopy', (['network_empirical_simplified'], {}), '(network_empirical_simplified)\n', (12295, 12325), False, 'import copy\n'), ((12383, 12425), 'tigernet.generate_obs', 'tigernet.generate_obs', (['(500)', 'network.s_data'], {}), '(500, network.s_data)\n', (12404, 12425), False, 'import tigernet\n'), ((12626, 12664), 'tigernet.Observations', 'tigernet.Observations', (['*args'], {}), '(*args, **kwargs)\n', (12647, 12664), False, 'import tigernet\n'), ((13748, 13850), 'numpy.array', 'numpy.array', (['[210.40526565933823, 118.30357725098324, 34.12778222322711, \n 120.39577375386378, 0.0]'], {}), '([210.40526565933823, 118.30357725098324, 34.12778222322711, \n 120.39577375386378, 0.0])\n', (13759, 13850), False, 'import numpy\n'), ((14492, 14591), 'numpy.array', 'numpy.array', (['[342.6965551431302, 0.0, 86.50490751040633, 58.25005873237134, \n 152.0185068774602]'], {}), '([342.6965551431302, 0.0, 86.50490751040633, 58.25005873237134, \n 152.0185068774602])\n', (14503, 14591), False, 'import numpy\n'), ((15688, 15803), 'numpy.array', 'numpy.array', (['[147.05576410321171, 298.0459114928476, 2.914177304108527, \n 160.72592517096817, 300.2025615374258]'], {}), '([147.05576410321171, 298.0459114928476, 2.914177304108527, \n 160.72592517096817, 300.2025615374258])\n', (15699, 15803), False, 'import numpy\n'), ((16437, 16480), 'copy.deepcopy', 'copy.deepcopy', (['network_empirical_simplified'], {}), '(network_empirical_simplified)\n', (16450, 16480), False, 'import copy\n'), ((16538, 16580), 'tigernet.generate_obs', 'tigernet.generate_obs', (['(500)', 'network.s_data'], {}), '(500, network.s_data)\n', (16559, 16580), False, 'import tigernet\n'), ((16801, 16839), 'tigernet.Observations', 'tigernet.Observations', (['*args'], {}), '(*args, **kwargs)\n', (16822, 16839), False, 'import tigernet\n'), ((17956, 18071), 'numpy.array', 'numpy.array', (['[233.41263770566138, 298.0459114928476, 34.25197729818704, \n 170.95581991959833, 300.2025615374258]'], {}), '([233.41263770566138, 298.0459114928476, 34.25197729818704, \n 170.95581991959833, 300.2025615374258])\n', (17967, 18071), False, 'import numpy\n'), ((18991, 19034), 'copy.deepcopy', 'copy.deepcopy', (['network_empirical_simplified'], {}), '(network_empirical_simplified)\n', (19004, 19034), False, 'import copy\n'), ((19092, 19134), 'tigernet.generate_obs', 'tigernet.generate_obs', (['(500)', 'network.s_data'], {}), '(500, network.s_data)\n', (19113, 19134), False, 'import tigernet\n'), ((19458, 19496), 'tigernet.Observations', 'tigernet.Observations', (['*args'], {}), '(*args, **kwargs)\n', (19479, 19496), False, 'import tigernet\n'), ((20580, 20682), 'numpy.array', 'numpy.array', (['[210.40526565933823, 118.30357725098324, 34.12778222322711, \n 120.39577375386378, 0.0]'], {}), '([210.40526565933823, 118.30357725098324, 34.12778222322711, \n 120.39577375386378, 0.0])\n', (20591, 20682), False, 'import numpy\n'), ((21324, 21423), 'numpy.array', 'numpy.array', (['[342.6965551431302, 0.0, 86.50490751040633, 58.25005873237134, \n 152.0185068774602]'], {}), '([342.6965551431302, 0.0, 86.50490751040633, 58.25005873237134, \n 152.0185068774602])\n', (21335, 21423), False, 'import numpy\n'), ((22670, 22785), 'numpy.array', 'numpy.array', (['[147.05576410321171, 298.0459114928476, 2.914177304108527, \n 160.72592517096817, 300.2025615374258]'], {}), '([147.05576410321171, 298.0459114928476, 2.914177304108527, \n 160.72592517096817, 300.2025615374258])\n', (22681, 22785), False, 'import numpy\n'), ((23429, 23472), 'copy.deepcopy', 'copy.deepcopy', (['network_empirical_simplified'], {}), '(network_empirical_simplified)\n', (23442, 23472), False, 'import copy\n'), ((23530, 23572), 'tigernet.generate_obs', 'tigernet.generate_obs', (['(500)', 'network.s_data'], {}), '(500, network.s_data)\n', (23551, 23572), False, 'import tigernet\n'), ((23916, 23954), 'tigernet.Observations', 'tigernet.Observations', (['*args'], {}), '(*args, **kwargs)\n', (23937, 23954), False, 'import tigernet\n'), ((25071, 25186), 'numpy.array', 'numpy.array', (['[233.41263770566138, 298.0459114928476, 34.25197729818704, \n 170.95581991959833, 300.2025615374258]'], {}), '([233.41263770566138, 298.0459114928476, 34.25197729818704, \n 170.95581991959833, 300.2025615374258])\n', (25082, 25186), False, 'import numpy\n'), ((241, 260), 'platform.platform', 'platform.platform', ([], {}), '()\n', (258, 260), False, 'import platform\n'), ((13252, 13287), 'numpy.array', 'numpy.array', (['observed_obs2coords[k]'], {}), '(observed_obs2coords[k])\n', (13263, 13287), False, 'import numpy\n'), ((14099, 14127), 'numpy.array', 'numpy.array', (['observed_dist_a'], {}), '(observed_dist_a)\n', (14110, 14127), False, 'import numpy\n'), ((14840, 14868), 'numpy.array', 'numpy.array', (['observed_dist_b'], {}), '(observed_dist_b)\n', (14851, 14868), False, 'import numpy\n'), ((16058, 16089), 'numpy.array', 'numpy.array', (['observed_dist2line'], {}), '(observed_dist2line)\n', (16069, 16089), False, 'import numpy\n'), ((20084, 20119), 'numpy.array', 'numpy.array', (['observed_obs2coords[k]'], {}), '(observed_obs2coords[k])\n', (20095, 20119), False, 'import numpy\n'), ((20931, 20959), 'numpy.array', 'numpy.array', (['observed_dist_a'], {}), '(observed_dist_a)\n', (20942, 20959), False, 'import numpy\n'), ((21672, 21700), 'numpy.array', 'numpy.array', (['observed_dist_b'], {}), '(observed_dist_b)\n', (21683, 21700), False, 'import numpy\n'), ((23040, 23071), 'numpy.array', 'numpy.array', (['observed_dist2line'], {}), '(observed_dist2line)\n', (23051, 23071), False, 'import numpy\n'), ((13345, 13359), 'numpy.array', 'numpy.array', (['v'], {}), '(v)\n', (13356, 13359), False, 'import numpy\n'), ((17478, 17513), 'numpy.array', 'numpy.array', (['observed_obs2coords[k]'], {}), '(observed_obs2coords[k])\n', (17489, 17513), False, 'import numpy\n'), ((17515, 17529), 'numpy.array', 'numpy.array', (['v'], {}), '(v)\n', (17526, 17529), False, 'import numpy\n'), ((18338, 18369), 'numpy.array', 'numpy.array', (['observed_dist2node'], {}), '(observed_dist2node)\n', (18349, 18369), False, 'import numpy\n'), ((20177, 20191), 'numpy.array', 'numpy.array', (['v'], {}), '(v)\n', (20188, 20191), False, 'import numpy\n'), ((24593, 24628), 'numpy.array', 'numpy.array', (['observed_obs2coords[k]'], {}), '(observed_obs2coords[k])\n', (24604, 24628), False, 'import numpy\n'), ((24630, 24644), 'numpy.array', 'numpy.array', (['v'], {}), '(v)\n', (24641, 24644), False, 'import numpy\n'), ((25453, 25484), 'numpy.array', 'numpy.array', (['observed_dist2node'], {}), '(observed_dist2node)\n', (25464, 25484), False, 'import numpy\n')] |
## @ingroup Methods-Aerodynamics-Common-Fidelity_Zero-Lift
# compute_HFW_inflow_velocities.py
#
# Created: Sep 2021, <NAME>
# Modified:
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Data
from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.generate_propeller_wake_distribution import generate_propeller_wake_distribution
from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.compute_wake_induced_velocity import compute_wake_induced_velocity
# package imports
import numpy as np
from scipy.interpolate import interp1d
def compute_HFW_inflow_velocities( prop ):
"""
Assumptions:
None
Source:
N/A
Inputs:
prop - rotor instance
Outputs:
Va - axial velocity array of shape (ctrl_pts, Nr, Na) [m/s]
Vt - tangential velocity array of shape (ctrl_pts, Nr, Na) [m/s]
"""
VD = Data()
omega = prop.inputs.omega
time = prop.wake_settings.wake_development_time
init_timestep_offset = prop.wake_settings.init_timestep_offset
number_of_wake_timesteps = prop.wake_settings.number_of_wake_timesteps
# use results from prior bemt iteration
prop_outputs = prop.outputs
cpts = len(prop_outputs.velocity)
Na = prop.number_azimuthal_stations
Nr = len(prop.chord_distribution)
r = prop.radius_distribution
conditions = Data()
conditions.noise = Data()
conditions.noise.sources = Data()
conditions.noise.sources.propellers = Data()
conditions.noise.sources.propellers.propeller = prop_outputs
props=Data()
props.propeller = prop
identical=False
# compute radial blade section locations based on initial timestep offset
dt = time/number_of_wake_timesteps
t0 = dt*init_timestep_offset
# set shape of velocitie arrays
Va = np.zeros((cpts,Nr,Na))
Vt = np.zeros((cpts,Nr,Na))
for i in range(Na):
# increment blade angle to new azimuthal position
blade_angle = (omega[0]*t0 + i*(2*np.pi/(Na))) * prop.rotation # Positive rotation, positive blade angle
# update wake geometry
init_timestep_offset = blade_angle/(omega * dt)
# generate wake distribution using initial circulation from BEMT
WD, _, _, _, _ = generate_propeller_wake_distribution(props,identical,cpts,VD,
init_timestep_offset, time,
number_of_wake_timesteps,conditions )
# ----------------------------------------------------------------
# Compute the wake-induced velocities at propeller blade
# ----------------------------------------------------------------
# set the evaluation points in the vortex distribution: (ncpts, nblades, Nr, Ntsteps)
r = prop.radius_distribution
Yb = prop.Wake_VD.Yblades_cp[0,0,:,0]
Zb = prop.Wake_VD.Zblades_cp[0,0,:,0]
Xb = prop.Wake_VD.Xblades_cp[0,0,:,0]
VD.YC = (Yb[1:] + Yb[:-1])/2
VD.ZC = (Zb[1:] + Zb[:-1])/2
VD.XC = (Xb[1:] + Xb[:-1])/2
VD.n_cp = np.size(VD.YC)
# Compute induced velocities at blade from the helical fixed wake
VD.Wake_collapsed = WD
V_ind = compute_wake_induced_velocity(WD, VD, cpts)
u = V_ind[0,:,0] # velocity in vehicle x-frame
v = V_ind[0,:,1] # velocity in vehicle y-frame
w = V_ind[0,:,2] # velocity in vehicle z-frame
# interpolate to get values at rotor radial stations
r_midpts = (r[1:] + r[:-1])/2
u_r = interp1d(r_midpts, u, fill_value="extrapolate")
v_r = interp1d(r_midpts, v, fill_value="extrapolate")
w_r = interp1d(r_midpts, w, fill_value="extrapolate")
up = u_r(r)
vp = v_r(r)
wp = w_r(r)
# Update velocities at the disc
Va[:,:,i] = -up
Vt[:,:,i] = (-vp*np.cos(blade_angle) + wp*np.sin(blade_angle))
prop.vortex_distribution = VD
return Va, Vt | [
"numpy.size",
"SUAVE.Core.Data",
"numpy.zeros",
"numpy.sin",
"numpy.cos",
"scipy.interpolate.interp1d",
"SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.compute_wake_induced_velocity.compute_wake_induced_velocity",
"SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.generate_propeller_wake_dist... | [((1028, 1034), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (1032, 1034), False, 'from SUAVE.Core import Data\n'), ((1590, 1596), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (1594, 1596), False, 'from SUAVE.Core import Data\n'), ((1620, 1626), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (1624, 1626), False, 'from SUAVE.Core import Data\n'), ((1658, 1664), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (1662, 1664), False, 'from SUAVE.Core import Data\n'), ((1707, 1713), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (1711, 1713), False, 'from SUAVE.Core import Data\n'), ((1790, 1796), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (1794, 1796), False, 'from SUAVE.Core import Data\n'), ((2045, 2069), 'numpy.zeros', 'np.zeros', (['(cpts, Nr, Na)'], {}), '((cpts, Nr, Na))\n', (2053, 2069), True, 'import numpy as np\n'), ((2077, 2101), 'numpy.zeros', 'np.zeros', (['(cpts, Nr, Na)'], {}), '((cpts, Nr, Na))\n', (2085, 2101), True, 'import numpy as np\n'), ((2486, 2620), 'SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.generate_propeller_wake_distribution.generate_propeller_wake_distribution', 'generate_propeller_wake_distribution', (['props', 'identical', 'cpts', 'VD', 'init_timestep_offset', 'time', 'number_of_wake_timesteps', 'conditions'], {}), '(props, identical, cpts, VD,\n init_timestep_offset, time, number_of_wake_timesteps, conditions)\n', (2522, 2620), False, 'from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.generate_propeller_wake_distribution import generate_propeller_wake_distribution\n'), ((3394, 3408), 'numpy.size', 'np.size', (['VD.YC'], {}), '(VD.YC)\n', (3401, 3408), True, 'import numpy as np\n'), ((3534, 3577), 'SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.compute_wake_induced_velocity.compute_wake_induced_velocity', 'compute_wake_induced_velocity', (['WD', 'VD', 'cpts'], {}), '(WD, VD, cpts)\n', (3563, 3577), False, 'from SUAVE.Methods.Aerodynamics.Common.Fidelity_Zero.Lift.compute_wake_induced_velocity import compute_wake_induced_velocity\n'), ((3889, 3936), 'scipy.interpolate.interp1d', 'interp1d', (['r_midpts', 'u'], {'fill_value': '"""extrapolate"""'}), "(r_midpts, u, fill_value='extrapolate')\n", (3897, 3936), False, 'from scipy.interpolate import interp1d\n'), ((3951, 3998), 'scipy.interpolate.interp1d', 'interp1d', (['r_midpts', 'v'], {'fill_value': '"""extrapolate"""'}), "(r_midpts, v, fill_value='extrapolate')\n", (3959, 3998), False, 'from scipy.interpolate import interp1d\n'), ((4013, 4060), 'scipy.interpolate.interp1d', 'interp1d', (['r_midpts', 'w'], {'fill_value': '"""extrapolate"""'}), "(r_midpts, w, fill_value='extrapolate')\n", (4021, 4060), False, 'from scipy.interpolate import interp1d\n'), ((4229, 4248), 'numpy.cos', 'np.cos', (['blade_angle'], {}), '(blade_angle)\n', (4235, 4248), True, 'import numpy as np\n'), ((4254, 4273), 'numpy.sin', 'np.sin', (['blade_angle'], {}), '(blade_angle)\n', (4260, 4273), True, 'import numpy as np\n')] |
import sys, h5py, subprocess, os, numpy
def write_text_2d(fname, head, dm):
'''Write text file of 2d arrays.
'''
with open(fname, 'w') as f:
f.write(head)
for i, dm1 in enumerate(dm):
for j, dm2 in enumerate(dm1):
if abs(dm2) > 1.e-12:
f.write('{:3d}{:3d}{:24.16f}{:24.16f}\n'.format( \
i, j, dm2.real, dm2.imag))
def write_text_4d(fname, head, dm):
'''Write text file of 4d arrays.
'''
with open(fname, 'w') as f:
f.write(head)
for i, dm1 in enumerate(dm):
for j, dm2 in enumerate(dm1):
for k, dm3 in enumerate(dm2):
for l, dm4 in enumerate(dm3):
if abs(dm4) > 1.e-12:
f.write('{:3d}{:3d}{:3d}{:3d}{:24.16f}{:24.16f}\n' \
.format(i, j, k, l, dm4.real, dm4.imag))
def h5gen_text_embed_hamil(imp):
'''Generate text files of the embedding Hamiltonian.
'''
with h5py.File('EMBED_HAMIL_{}.h5'.format(imp), 'r') as f:
daalpha = f['/D'][...].T
lambdac = f['/LAMBDA'][...].T
h1e = f['/H1E'][...].T
v2e = f['/V2E'][...].T
# h1e file
head = '# list of h1e_{alpha, beta} of significance \n' + \
'# alpha beta h1e.real h1e.imag \n'
write_text_2d('h1e_{}.dat'.format(imp), head, h1e)
# d file
head = '# list of d_{a, alpha} with significance \n' + \
'# a alpha d.real d.imag \n'
write_text_2d('d_aalpha_{}.dat'.format(imp), head, daalpha)
# lambdac file
head = '# list of lambdac_{a, b} of significance \n' + \
'# a b lambdac.real lambdac.imag \n'
write_text_2d('lambdac_{}.dat'.format(imp), head, lambdac)
# v2e file
head = '# Note H_loc = \sum_{a,b} {h1e_a,b c_a^\dagger c_b} \n' + \
'# + 1/2 \sum_{a, b, c, d} v2e_{a,b,c,d} \n' + \
'# * c_a^\dagger c_c^\dagger c_d c_b \n' + \
'# list of v2e_{alpha, beta, gamma, delta} of significance \n' + \
'# alpha beta gamma delta v2e.real' + \
' v2e.imag\n'
if not os.path.isfile('v2e_{}.dat'.format(imp)):
write_text_4d('v2e_{}.dat'.format(imp), head, v2e)
def gen_file_lat(imp, flat, reorder=None):
'''Generate lat file.
'''
command = ['/usr/bin/time', '-v', '-a', '-o', 'timing_{}'.format(imp),
'syten-mps-embedding-lanata', '--h1e', './h1e_{}.dat'.format(imp),
'--d', './d_aalpha_{}.dat'.format(imp), '--lambdac',
'./lambdac_{}.dat'.format(imp), '--v2e', './v2e_{}.dat'.format(imp),
'-o', flat, '-q']
if reorder is not None:
command.extend(['-r', reorder])
print(' '.join(command))
subprocess.call(command)
def gen_file_rnd_state(imp, norbs, lat):
'''Generate random state for initialization.
'''
command = ['/usr/bin/time', '-v', '-a', '-o', 'timing_{}'.format(imp),
'syten-random', '-s', str(norbs), '-l', lat, '-o',
'rnd_{}.state'.format(imp)]
print(' '.join(command))
subprocess.call(command)
def run_syten_dmrg(imp, lat, inp_state, out_f, s_config, out_state=None,
threads_tensor=1):
'''Run dmrg calculation.
'''
command = ['/usr/bin/time', '-v', '-a', '-o', 'timing_{}'.format(imp),
'syten-dmrg', '-l', lat, '-i', inp_state, '-o', out_f,
'-s', s_config, '-q']
if out_state is not None:
command.extend(['-f', out_state])
if threads_tensor > 1:
command.extend(['--threads-tensor', str(threads_tensor)])
print(' '.join(command))
subprocess.call(command)
def run_syten_expectation(imp, lat, state, f_expval):
'''Calculate expectation values.
'''
command = ['syten-expectation', '-c', '-a', state, '-l', lat, \
'--template-file', f_expval, '-q']
return subprocess.check_output(command)
def run_syten_mutual_information(imp, state, f_reorder):
command = ['/usr/bin/time', '-v', '-a', '-o', 'timing_{}'.format(imp),
'syten-mutualInformation', '-o', state, '-f', f_reorder]
print(' '.join(command))
subprocess.call(command)
def driver_dmrg(s_config= \
'(t 1e-8 d 1e-8 m 50 expandBlocksize 10 x 5 save false)' + \
'(m 100) '*8 + '(m 100 d 1e-8 save true)'):
'''driver for the impurity dmrg calculations.
'''
if '-i' in sys.argv:
imp = sys.argv[sys.argv.index('-i')+1]
else:
imp = 1
if os.path.isfile('s_config_{}.cfg'.format(imp)):
s_config = ''
for line in open('s_config_{}.cfg'.format(imp), 'r').readlines():
s_config += line.replace('\n', '')
with h5py.File('EMBED_HAMIL_{}.h5'.format(imp), 'r') as f:
na2 = f['/na2'][0]
lambdac = f['/LAMBDA'][...].T
# dump to text file due to dmrg code
h5gen_text_embed_hamil(imp)
f_reorder = 'reordering_{}'.format(imp)
if not os.path.isfile(f_reorder):
# generate lat file
flat = 'without-reordering_{}.lat'.format(imp)
gen_file_lat(imp, flat)
# generate random state for initialization.
gen_file_rnd_state(imp, na2, flat)
# initial stage 1/2 sweeping
flat = 'without-reordering_{}.lat:H1e Hd Hf Hv2e + + +'.format(imp)
inp_state = 'rnd_{}.state'.format(imp)
out_f = 'without-reordering-dmrg_{}'.format(imp)
s_config0 = '(t 1e-8 d 1e-6 m 50 x 5) (m 100)'
run_syten_dmrg(imp, flat, inp_state, out_f, s_config0)
# reorder
state = out_f + '_2_5.state'
run_syten_mutual_information(imp, state, f_reorder)
else:
if os.path.isfile('with-reordering-gs_{}.state'.format(imp)):
inp_state = 'with-reordering-gs_{}.state'.format(imp)
else:
inp_state = 'rnd_{}.state'.format(imp)
# generate lat file with reordering
flat = 'with-reordering_{}.lat'.format(imp)
gen_file_lat(imp, flat, reorder=f_reorder)
# dmrg after reordering
flat = 'with-reordering_{}.lat:H1e Hd Hf Hv2e + + +'.format(imp)
out_f = 'with-reordering-dmrg_{}'.format(imp)
out_state = 'with-reordering-gs_{}.state'.format(imp)
run_syten_dmrg(imp, flat, inp_state, out_f, s_config,
out_state=out_state)
# get expectation value
f_temp = 'exp_val_{}.template'.format(imp)
na4 = na2*2
if not os.path.isfile(f_temp):
with open(f_temp, 'w') as f:
for i in range(na4):
for j in range(i, na4):
f.write('{{ CH_{} C_{} * }}\n'.format(i, j))
f.write('{ H1e Hd Hf Hv2e + + + }')
flat = 'with-reordering_{}.lat'.format(imp)
res = run_syten_expectation(imp, flat, out_state, f_temp)
res = res.split('\n')[:na4*(na4+1)/2+1]
dm = numpy.zeros([na4, na4], dtype=numpy.complex)
ij = 0
for i in range(na4):
for j in range(i, na4):
res_ = map(float, res[ij].split())
ij += 1
dm[i, j] = res_[0] + res_[1]*1.j
if i != j:
dm[j, i] = numpy.conj(dm[i, j])
res_ = map(float, res[-1].split())
assert numpy.abs(res_[1]) < 1.e-6, ' syten: non-real impurity etot!'
# convention: f_b f_a^\dagger = \delta_{a,b} - f_a^\dagger f_b
etot = res_[0] - numpy.trace(lambdac)
with h5py.File('EMBED_HAMIL_RES_{}.h5'.format(imp), 'w') as f:
f['/emol'] = [etot.real]
f['/DM'] = dm.T
if __name__ == '__main__':
driver_dmrg()
| [
"numpy.conj",
"numpy.trace",
"numpy.abs",
"subprocess.check_output",
"numpy.zeros",
"os.path.isfile",
"subprocess.call",
"sys.argv.index"
] | [((2911, 2935), 'subprocess.call', 'subprocess.call', (['command'], {}), '(command)\n', (2926, 2935), False, 'import sys, h5py, subprocess, os, numpy\n'), ((3247, 3271), 'subprocess.call', 'subprocess.call', (['command'], {}), '(command)\n', (3262, 3271), False, 'import sys, h5py, subprocess, os, numpy\n'), ((3785, 3809), 'subprocess.call', 'subprocess.call', (['command'], {}), '(command)\n', (3800, 3809), False, 'import sys, h5py, subprocess, os, numpy\n'), ((4037, 4069), 'subprocess.check_output', 'subprocess.check_output', (['command'], {}), '(command)\n', (4060, 4069), False, 'import sys, h5py, subprocess, os, numpy\n'), ((4306, 4330), 'subprocess.call', 'subprocess.call', (['command'], {}), '(command)\n', (4321, 4330), False, 'import sys, h5py, subprocess, os, numpy\n'), ((6942, 6986), 'numpy.zeros', 'numpy.zeros', (['[na4, na4]'], {'dtype': 'numpy.complex'}), '([na4, na4], dtype=numpy.complex)\n', (6953, 6986), False, 'import sys, h5py, subprocess, os, numpy\n'), ((5095, 5120), 'os.path.isfile', 'os.path.isfile', (['f_reorder'], {}), '(f_reorder)\n', (5109, 5120), False, 'import sys, h5py, subprocess, os, numpy\n'), ((6532, 6554), 'os.path.isfile', 'os.path.isfile', (['f_temp'], {}), '(f_temp)\n', (6546, 6554), False, 'import sys, h5py, subprocess, os, numpy\n'), ((7288, 7306), 'numpy.abs', 'numpy.abs', (['res_[1]'], {}), '(res_[1])\n', (7297, 7306), False, 'import sys, h5py, subprocess, os, numpy\n'), ((7439, 7459), 'numpy.trace', 'numpy.trace', (['lambdac'], {}), '(lambdac)\n', (7450, 7459), False, 'import sys, h5py, subprocess, os, numpy\n'), ((4588, 4608), 'sys.argv.index', 'sys.argv.index', (['"""-i"""'], {}), "('-i')\n", (4602, 4608), False, 'import sys, h5py, subprocess, os, numpy\n'), ((7217, 7237), 'numpy.conj', 'numpy.conj', (['dm[i, j]'], {}), '(dm[i, j])\n', (7227, 7237), False, 'import sys, h5py, subprocess, os, numpy\n')] |
import numpy as np
# Set up an NCSS query from thredds using siphon
query = ncss.query()
query.accept('netcdf4')
query.variables('Temperature_isobaric', 'Geopotential_height_isobaric')
query.vertical_level(50000)
now = datetime.utcnow()
query.time_range(now, now + timedelta(days=1))
query.lonlat_box(west=-110, east=-45, north=50, south=10)
# Download data using NCSS
data = ncss.get_data(query)
ds = xr.open_dataset(NetCDF4DataStore(data))
temp_var = ds.metpy.parse_cf('Temperature_isobaric')
height_var = ds.metpy.parse_cf('Geopotential_height_isobaric')
longitude = temp_var.metpy.x
latitude = temp_var.metpy.y
time_index = 0
# Plot using CartoPy and Matplotlib
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
contours = np.arange(5000, 6000, 80)
ax.pcolormesh(longitude, latitude, temp_var[time_index].squeeze(),
transform=data_projection, zorder=0)
ax.contour(longitude, latitude, height_var[time_index].squeeze(), contours, colors='k',
transform=data_projection, linewidths=2, zorder=1)
ax.set_title(temp_var.metpy.time[time_index].values)
# add some common geographic features
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.STATES, edgecolor='black')
ax.add_feature(cfeature.BORDERS)
# add some lat/lon gridlines
ax.gridlines() | [
"numpy.arange"
] | [((782, 807), 'numpy.arange', 'np.arange', (['(5000)', '(6000)', '(80)'], {}), '(5000, 6000, 80)\n', (791, 807), True, 'import numpy as np\n')] |
"""Landlab component that simulates overland flow.
This component simulates overland flow using the 2-D numerical model of
shallow-water flow over topography using the de Almeida et al., 2012
algorithm for storage-cell inundation modeling.
.. codeauthor:: <NAME>
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow import OverlandFlow
Create a grid on which to calculate overland flow.
>>> grid = RasterModelGrid((4, 5))
The grid will need some data to provide the overland flow component. To
check the names of the fields that provide input to the overland flow
component use the *input_var_names* class property.
>>> OverlandFlow.input_var_names
('water__depth', 'topographic__elevation')
Create fields of data for each of these input variables.
>>> grid.at_node['topographic__elevation'] = np.array([
... 0., 0., 0., 0., 0.,
... 1., 1., 1., 1., 1.,
... 2., 2., 2., 2., 2.,
... 3., 3., 3., 3., 3.])
>>> grid.at_node['water__depth'] = np.array([
... 0. , 0. , 0. , 0. , 0. ,
... 0. , 0. , 0. , 0. , 0. ,
... 0. , 0. , 0. , 0. , 0. ,
... 0.1, 0.1, 0.1, 0.1, 0.1])
Instantiate the `OverlandFlow` component to work on this grid, and run it.
>>> of = OverlandFlow(grid, steep_slopes=True)
>>> of.overland_flow()
After calculating the overland flow, new fields have been added to the
grid. Use the *output_var_names* property to see the names of the fields that
have been changed.
>>> of.output_var_names
('water__depth', 'water__discharge', 'water_surface__gradient')
The `water__depth` field is defined at nodes.
>>> of.var_loc('water__depth')
'node'
>>> grid.at_node['water__depth'] # doctest: +NORMALIZE_WHITESPACE
array([ 1.00000000e-05, 1.00000000e-05, 1.00000000e-05,
1.00000000e-05, 1.00000000e-05, 1.00000000e-05,
1.00000000e-05, 1.00000000e-05, 1.00000000e-05,
1.00000000e-05, 1.00000000e-05, 2.00100000e-02,
2.00100000e-02, 2.00100000e-02, 1.00000000e-05,
1.00010000e-01, 1.00010000e-01, 1.00010000e-01,
1.00010000e-01, 1.00010000e-01])
The `water__discharge` field is defined at links. Because our initial
topography was a dipping plane, there is no water discharge in the horizontal
direction, only toward the bottom of the grid.
>>> of.var_loc('water__discharge')
'link'
>>> q = grid.at_link['water__discharge'] # doctest: +NORMALIZE_WHITESPACE
>>> np.all(q[grid.horizontal_links] == 0.)
True
>>> np.all(q[grid.vertical_links] <= 0.)
True
The *water_surface__gradient* is also defined at links.
>>> of.var_loc('water_surface__gradient')
'link'
>>> grid.at_link['water_surface__gradient'] # doctest: +NORMALIZE_WHITESPACE
array([ 0. , 0. , 0. , 0. ,
0. , 1. , 1. , 1. , 0. ,
0. , 0. , 0. , 0. ,
0. , 1. , 1. , 1. , 0. ,
0. , 0. , 0. , 0. ,
0. , 1.1, 1.1, 1.1, 0. ,
0. , 0. , 0. , 0. ])
"""
from landlab import Component, FieldError
import numpy as np
from landlab.grid.structured_quad import links
from landlab.utils.decorators import use_file_name_or_kwds
_SEVEN_OVER_THREE = 7.0 / 3.0
class OverlandFlow(Component):
"""Simulate overland flow using de Almeida approximations.
Landlab component that simulates overland flow using the de Almeida
et al., 2012 approximations of the 1D shallow water equations to be used
for 2D flood inundation modeling.
This component calculates discharge, depth and shear stress after some
precipitation event across any raster grid. Default input file is named
"overland_flow_input.txt' and is contained in the
landlab.components.overland_flow folder.
Parameters
----------
grid : RasterModelGrid
A landlab grid.
h_init : float, optional
Thicknes of initial thin layer of water to prevent divide by zero
errors (m).
alpha : float, optional
Time step coeffcient, described in Bates et al., 2010 and
de Almeida et al., 2012.
mannings_n : float, optional
Manning's roughness coefficient.
g : float, optional
Acceleration due to gravity (m/s^2).
theta : float, optional
Weighting factor from de Almeida et al., 2012.
rainfall_intensity : float, optional
Rainfall intensity.
"""
_name = 'OverlandFlow'
_input_var_names = (
'water__depth',
'topographic__elevation',
)
_output_var_names = (
'water__depth',
'water__discharge',
'water_surface__gradient',
)
_var_units = {
'water__depth': 'm',
'water__discharge': 'm3/s',
'topographic__elevation': 'm',
'water_surface__gradient': '-',
}
_var_mapping = {
'water__depth': 'node',
'topographic__elevtation': 'node',
'water__discharge': 'link',
'water_surface__gradient': 'link',
}
_var_doc = {
'water__depth': 'The depth of water at each node.',
'topographic__elevtation': 'The land surface elevation.',
'water__discharge': 'The discharge of water on active links.',
'water_surface__gradient': 'Downstream gradient of the water surface.',
}
@use_file_name_or_kwds
def __init__(self, grid, use_fixed_links=False, h_init=0.00001, alpha=0.7,
mannings_n=0.03, g=9.81, theta=0.8, rainfall_intensity=0.0,
steep_slopes = False, **kwds):
"""Create a overland flow component.
Parameters
----------
grid : RasterModelGrid
A landlab grid.
h_init : float, optional
Thicknes of initial thin layer of water to prevent divide by zero
errors (m).
alpha : float, optional
Time step coeffcient, described in Bates et al., 2010 and
de Almeida et al., 2012.
mannings_n : float, optional
Manning's roughness coefficient.
g : float, optional
Acceleration due to gravity (m/s^2).
theta : float, optional
Weighting factor from de Almeida et al., 2012.
rainfall_intensity : float, optional
Rainfall intensity.
"""
super(OverlandFlow, self).__init__(grid, **kwds)
# First we copy our grid
self._grid = grid
self.h_init = h_init
self.alpha = alpha
self.mannings_n = mannings_n
self.g = g
self.theta = theta
self.rainfall_intensity = rainfall_intensity
self.steep_slopes = steep_slopes
# Now setting up fields at the links...
# For water discharge
try:
self.q = grid.add_zeros('water__discharge', at='link',
units=self._var_units['water__discharge'])
except FieldError:
# Field was already set; still, fill it with zeros
self.q = grid.at_link['water__discharge']
self.q.fill(0.)
# For water depths calculated at links
try:
self.h_links = grid.add_zeros('water__depth', at='link',
units=self._var_units[
'water__depth'])
except FieldError:
self.h_links = grid.at_link['water__depth']
self.h_links.fill(0.)
self.h_links += self.h_init
try:
self.h = grid.add_zeros('water__depth', at='node',
units=self._var_units['water__depth'])
except FieldError:
# Field was already set
self.h = grid.at_node['water__depth']
self.h += self.h_init
# For water surface slopes at links
try:
self.slope = grid.add_zeros('water_surface__gradient', at='link')
except FieldError:
self.slope = grid.at_link['water_surface__gradient']
self.slope.fill(0.)
# Start time of simulation is at 1.0 s
self.elapsed_time = 1.0
self.dt = None
self.dhdt = grid.zeros()
# When we instantiate the class we recognize that neighbors have not
# been found. After the user either calls self.set_up_neighbor_array
# or self.overland_flow this will be set to True. This is done so
# that every iteration of self.overland_flow does NOT need to
# reinitalize the neighbors and saves computation time.
self.neighbor_flag = False
# When looking for neighbors, we automatically ignore inactive links
# by default. However, what about when we want to look at fixed links
# too? By default, we ignore these, but if they are important to your
# model and will be updated in your driver loop, they can be used by
# setting the flag in the initialization of the class to 'True'
self.use_fixed_links = use_fixed_links
# Assiging a class variable to the elevation field.
self.z = self._grid.at_node['topographic__elevation']
def calc_time_step(self):
"""Calculate time step.
Adaptive time stepper from Bates et al., 2010 and de Almeida
et al., 2012
"""
self.dt = (self.alpha * self._grid.dx / np.sqrt(self.g * np.amax(
self._grid.at_node['water__depth'])))
return self.dt
def set_up_neighbor_arrays(self):
"""Create and initialize link neighbor arrays.
Set up arrays of neighboring horizontal and vertical links that are
needed for the de Almeida solution.
"""
# First we identify all active links
self.active_ids = links.active_link_ids(self.grid.shape,
self.grid.status_at_node)
# And then find all horizontal link IDs (Active and Inactive)
self.horizontal_ids = links.horizontal_link_ids(self.grid.shape)
# And make the array 1-D
self.horizontal_ids = self.horizontal_ids.flatten()
# Find all horizontal active link ids
self.horizontal_active_link_ids = links.horizontal_active_link_ids(
self.grid.shape, self.active_ids)
# Now we repeat this process for the vertical links.
# First find the vertical link ids and reshape it into a 1-D array
self.vertical_ids = links.vertical_link_ids(self.grid.shape).flatten()
# Find the *active* verical link ids
self.vertical_active_link_ids = links.vertical_active_link_ids(
self.grid.shape, self.active_ids)
if self.use_fixed_links is True:
fixed_link_ids = links.fixed_link_ids(
self.grid.shape, self.grid.status_at_node)
fixed_horizontal_links = links.horizontal_fixed_link_ids(
self.grid.shape, fixed_link_ids)
fixed_vertical_links = links.vertical_fixed_link_ids(
self.grid.shape, fixed_link_ids)
self.horizontal_active_link_ids = np.maximum(
self.horizontal_active_link_ids, fixed_horizontal_links)
self.vertical_active_link_ids = np.maximum(
self.vertical_active_link_ids, fixed_vertical_links)
self.active_neighbors = find_active_neighbors_for_fixed_links(
self.grid)
# Using the active vertical link ids we can find the north
# and south vertical neighbors
self.north_neighbors = links.vertical_north_link_neighbor(
self.grid.shape, self.vertical_active_link_ids)
self.south_neighbors = links.vertical_south_link_neighbor(
self.grid.shape, self.vertical_active_link_ids)
# Using the horizontal active link ids, we can find the west and
# east neighbors
self.west_neighbors = links.horizontal_west_link_neighbor(
self.grid.shape, self.horizontal_active_link_ids)
self.east_neighbors = links.horizontal_east_link_neighbor(
self.grid.shape, self.horizontal_active_link_ids)
# Set up arrays for discharge in the horizontal & vertical directions.
self.q_horizontal = np.zeros(links.number_of_horizontal_links(
self.grid.shape))
self.q_vertical = np.zeros(links.number_of_vertical_links(
self.grid.shape))
# Once the neighbor arrays are set up, we change the flag to True!
self.neighbor_flag = True
def overland_flow(self, dt=None):
"""Generate overland flow across a grid.
For one time step, this generates 'overland flow' across a given grid
by calculating discharge at each node.
Using the depth slope product, shear stress is calculated at every
node.
Outputs water depth, discharge and shear stress values through time at
every point in the input grid.
"""
if dt is None:
self.calc_time_step()
# First, we check and see if the neighbor arrays have been initialized
if self.neighbor_flag is False:
self.set_up_neighbor_arrays()
# In case another component has added data to the fields, we just
# reset our water depths, topographic elevations and water discharge
# variables to the fields.
self.h = self.grid['node']['water__depth']
self.z = self.grid['node']['topographic__elevation']
self.q = self.grid['link']['water__discharge']
self.h_links = self.grid['link']['water__depth']
# Here we identify the core nodes and active link ids for later use.
self.core_nodes = self.grid.core_nodes
self.active_links = self.grid.active_links
# Per Bates et al., 2010, this solution needs to find the difference
# between the highest water surface in the two cells and the
# highest bed elevation
zmax = self._grid.map_max_of_link_nodes_to_link(self.z)
w = self.h + self.z
wmax = self._grid.map_max_of_link_nodes_to_link(w)
hflow = wmax[self._grid.active_links] - zmax[self._grid.active_links]
# Insert this water depth into an array of water depths at the links.
self.h_links[self.active_links] = hflow
# Now we calculate the slope of the water surface elevation at
# active links
self.water_surface_gradient = (
self.grid.calc_grad_at_link(w)[self.grid.active_links])
# And insert these values into an array of all links
self.slope[self.active_links] = self.water_surface_gradient
# If the user chooses to set boundary links to the neighbor value, we
# set the discharge array to have the boundary links set to their
# neighbor value
if self.use_fixed_links is True:
self.q[self.grid.fixed_links] = self.q[self.active_neighbors]
# Now we can calculate discharge. To handle links with neighbors that
# do not exist, we will do a fancy indexing trick. Non-existent links
# or inactive links have an index of '-1', which in Python, looks to
# the end of a list or array. To accommodate these '-1' indices, we
# will simply insert an value of 0.0 discharge (in units of L^2/T)
# to the end of the discharge array.
self.q = np.append(self.q, [0])
horiz = self.horizontal_ids
vert = self.vertical_ids
# Now we calculate discharge in the horizontal direction
self.q[horiz] = ((
self.theta * self.q[horiz] + (1 - self.theta) /
2 * (self.q[self.west_neighbors] + self.q[self.east_neighbors]) -
self.g * self.h_links[self.horizontal_ids] * self.dt *
self.slope[self.horizontal_ids]) / (
1 + self.g * self.dt * self.mannings_n ** 2. *
abs(self.q[horiz]) / self.h_links[self.horizontal_ids] **
_SEVEN_OVER_THREE))
# ... and in the vertical direction
self.q[vert] = ((
self.theta * self.q[vert] + (1 - self.theta) /
2 * (self.q[self.north_neighbors] + self.q[self.south_neighbors]) -
self.g * self.h_links[self.vertical_ids] * self.dt *
self.slope[self.vertical_ids]) / (
1 + self.g * self.dt * self.mannings_n ** 2. *
abs(self.q[vert]) / self.h_links[self.vertical_ids] **
_SEVEN_OVER_THREE))
# Now to return the array to its original length (length of number of
# all links), we delete the extra 0.0 value from the end of the array.
self.q = np.delete(self.q, len(self.q) - 1)
# And put the horizontal and vertical arrays back together, to create
# the discharge array.
# self.q = np.concatenate((self.q_vertical, self.q_horizontal), axis=0)
# Updating the discharge array to have the boundary links set to
# their neighbor
if self.use_fixed_links is True:
self.q[self.grid.fixed_links] = self.q[self.active_neighbors]
if self.steep_slopes is True:
# To prevent water from draining too fast for our time steps...
# Our Froude number.
Fr = 0.8
# Our two limiting factors, the froude number and courant number.
# Looking a calculated q to be compared to our Fr number.
calculated_q = (self.q / self.h_links) / np.sqrt(self.g *
self.h_links)
# Looking at our calculated q and comparing it to our Courant no.,
q_courant = self.q * self.dt / self.grid.dx
# Water depth split equally between four links..
water_div_4 = self.h_links / 4.
# IDs where water discharge is positive...
(positive_q, ) = np.where(self.q > 0)
# ... and negative.
(negative_q, ) = np.where(self.q < 0)
# Where does our calculated q exceed the Froude number? If q does
# exceed the Froude number, we are getting supercritical flow and
# discharge needs to be reduced to maintain stability.
(Froude_logical, ) = np.where((calculated_q) > Fr)
(Froude_abs_logical, ) = np.where(abs(calculated_q) > Fr)
# Where does our calculated q exceed the Courant number and water
# depth divided amongst 4 links? If the calculated q exceeds the
# Courant number and is greater than the water depth divided by 4
# links, we reduce discharge to maintain stability.
(water_logical, ) = np.where(q_courant > water_div_4)
(water_abs_logical, ) = np.where(abs(q_courant) > water_div_4)
# Where are these conditions met? For positive and negative q,
# there are specific rules to reduce q. This step finds where the
# discharge values are positive or negative and where discharge
# exceeds the Froude or Courant number.
self.if_statement_1 = np.intersect1d(positive_q, Froude_logical)
self.if_statement_2 = np.intersect1d(negative_q,
Froude_abs_logical)
self.if_statement_3 = np.intersect1d(positive_q, water_logical)
self.if_statement_4 = np.intersect1d(negative_q, water_abs_logical)
# Rules 1 and 2 reduce discharge by the Froude number.
self.q[self.if_statement_1] = (
self.h_links[self.if_statement_1] *
(np.sqrt(self.g * self.h_links[self.if_statement_1]) * Fr))
self.q[self.if_statement_2] = (
0. - (self.h_links[self.if_statement_2] *
np.sqrt(self.g * self.h_links[self.if_statement_2]) *
Fr))
# Rules 3 and 4 reduce discharge by the Courant number.
self.q[self.if_statement_3] = (((
self.h_links[self.if_statement_3] * self.grid.dx) / 5.) /
self.dt)
self.q[self.if_statement_4] = (
0. - (self.h_links[self.if_statement_4] * self.grid.dx / 5.) /
self.dt)
# # Once stability has been restored, we calculate the change in water
# # depths on all core nodes by finding the difference between inputs
# # (rainfall) and the inputs/outputs (flux divergence of discharge)
self.dhdt = (self.rainfall_intensity - self.grid.calc_flux_div_at_node(
self.q))
# Updating our water depths...
self.h[self.core_nodes] = (self.h[self.core_nodes] +
self.dhdt[self.core_nodes] * self.dt)
# # To prevent divide by zero errors, a minimum threshold water depth
# # must be maintained. To reduce mass imbalances, this is set to
# # find locations where water depth is smaller than h_init (default is
# # is 0.001) and the new value is self.h_init * 10^-3. This was set as
# # it showed the smallest amount of mass creation in the grid during
# # testing.
if self.steep_slopes is True:
self.h[np.where(self.h < self.h_init)] = self.h_init * 10.0 ** -3
# And reset our field values with the newest water depth and discharge.
self.grid.at_node['water__depth'] = self.h
self.grid.at_link['water__discharge'] = self.q
def find_active_neighbors_for_fixed_links(grid):
"""Find active link neighbors for every fixed link.
Specialized link ID function used to ID the active links that neighbor
fixed links in the vertical and horizontal directions.
If the user wants to assign fixed gradients or values to the fixed
links dynamically, this function identifies the nearest active_link
neighbor.
Each fixed link can either have 0 or 1 active neighbor. This function
finds if and where that active neighbor is and stores those IDs in
an array.
Parameters
----------
grid : RasterModelGrid
A landlab grid.
Returns
-------
ndarray of int, shape `(*, )`
Flat array of links.
Examples
--------
>>> from landlab.grid.structured_quad.links import neighbors_at_link
>>> from landlab import RasterModelGrid
>>> from landlab.components.overland_flow.generate_overland_flow_deAlmeida import find_active_neighbors_for_fixed_links
>>> from landlab import RasterModelGrid, FIXED_GRADIENT_BOUNDARY
>>> grid = RasterModelGrid((4, 5))
>>> grid.status_at_node[:5] = FIXED_GRADIENT_BOUNDARY
>>> grid.status_at_node[::5] = FIXED_GRADIENT_BOUNDARY
>>> grid.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2, 2,
2, 0, 0, 0, 1,
2, 0, 0, 0, 1,
2, 1, 1, 1, 1], dtype=int8)
>>> grid.fixed_links
array([ 5, 6, 7, 9, 18])
>>> grid.active_links
array([10, 11, 12, 14, 15, 16, 19, 20, 21, 23, 24, 25])
>>> find_active_neighbors_for_fixed_links(grid)
array([14, 15, 16, 10, 19])
>>> rmg = RasterModelGrid((4, 7))
>>> rmg.at_node['topographic__elevation'] = rmg.zeros(at='node')
>>> rmg.at_link['topographic__slope'] = rmg.zeros(at='link')
>>> rmg.set_fixed_link_boundaries_at_grid_edges(True, True, True, True)
>>> find_active_neighbors_for_fixed_links(rmg)
array([20, 21, 22, 23, 24, 14, 17, 27, 30, 20, 21, 22, 23, 24])
"""
neighbors = links.neighbors_at_link(grid.shape, grid.fixed_links).flat
return neighbors[np.in1d(neighbors, grid.active_links)]
| [
"landlab.grid.structured_quad.links.vertical_active_link_ids",
"landlab.grid.structured_quad.links.fixed_link_ids",
"numpy.maximum",
"landlab.grid.structured_quad.links.horizontal_west_link_neighbor",
"landlab.grid.structured_quad.links.horizontal_active_link_ids",
"landlab.grid.structured_quad.links.vert... | [((9613, 9677), 'landlab.grid.structured_quad.links.active_link_ids', 'links.active_link_ids', (['self.grid.shape', 'self.grid.status_at_node'], {}), '(self.grid.shape, self.grid.status_at_node)\n', (9634, 9677), False, 'from landlab.grid.structured_quad import links\n'), ((9827, 9869), 'landlab.grid.structured_quad.links.horizontal_link_ids', 'links.horizontal_link_ids', (['self.grid.shape'], {}), '(self.grid.shape)\n', (9852, 9869), False, 'from landlab.grid.structured_quad import links\n'), ((10053, 10119), 'landlab.grid.structured_quad.links.horizontal_active_link_ids', 'links.horizontal_active_link_ids', (['self.grid.shape', 'self.active_ids'], {}), '(self.grid.shape, self.active_ids)\n', (10085, 10119), False, 'from landlab.grid.structured_quad import links\n'), ((10435, 10499), 'landlab.grid.structured_quad.links.vertical_active_link_ids', 'links.vertical_active_link_ids', (['self.grid.shape', 'self.active_ids'], {}), '(self.grid.shape, self.active_ids)\n', (10465, 10499), False, 'from landlab.grid.structured_quad import links\n'), ((11395, 11482), 'landlab.grid.structured_quad.links.vertical_north_link_neighbor', 'links.vertical_north_link_neighbor', (['self.grid.shape', 'self.vertical_active_link_ids'], {}), '(self.grid.shape, self.\n vertical_active_link_ids)\n', (11429, 11482), False, 'from landlab.grid.structured_quad import links\n'), ((11522, 11609), 'landlab.grid.structured_quad.links.vertical_south_link_neighbor', 'links.vertical_south_link_neighbor', (['self.grid.shape', 'self.vertical_active_link_ids'], {}), '(self.grid.shape, self.\n vertical_active_link_ids)\n', (11556, 11609), False, 'from landlab.grid.structured_quad import links\n'), ((11747, 11837), 'landlab.grid.structured_quad.links.horizontal_west_link_neighbor', 'links.horizontal_west_link_neighbor', (['self.grid.shape', 'self.horizontal_active_link_ids'], {}), '(self.grid.shape, self.\n horizontal_active_link_ids)\n', (11782, 11837), False, 'from landlab.grid.structured_quad import links\n'), ((11876, 11966), 'landlab.grid.structured_quad.links.horizontal_east_link_neighbor', 'links.horizontal_east_link_neighbor', (['self.grid.shape', 'self.horizontal_active_link_ids'], {}), '(self.grid.shape, self.\n horizontal_active_link_ids)\n', (11911, 11966), False, 'from landlab.grid.structured_quad import links\n'), ((15212, 15234), 'numpy.append', 'np.append', (['self.q', '[0]'], {}), '(self.q, [0])\n', (15221, 15234), True, 'import numpy as np\n'), ((23372, 23425), 'landlab.grid.structured_quad.links.neighbors_at_link', 'links.neighbors_at_link', (['grid.shape', 'grid.fixed_links'], {}), '(grid.shape, grid.fixed_links)\n', (23395, 23425), False, 'from landlab.grid.structured_quad import links\n'), ((23452, 23489), 'numpy.in1d', 'np.in1d', (['neighbors', 'grid.active_links'], {}), '(neighbors, grid.active_links)\n', (23459, 23489), True, 'import numpy as np\n'), ((10584, 10647), 'landlab.grid.structured_quad.links.fixed_link_ids', 'links.fixed_link_ids', (['self.grid.shape', 'self.grid.status_at_node'], {}), '(self.grid.shape, self.grid.status_at_node)\n', (10604, 10647), False, 'from landlab.grid.structured_quad import links\n'), ((10702, 10766), 'landlab.grid.structured_quad.links.horizontal_fixed_link_ids', 'links.horizontal_fixed_link_ids', (['self.grid.shape', 'fixed_link_ids'], {}), '(self.grid.shape, fixed_link_ids)\n', (10733, 10766), False, 'from landlab.grid.structured_quad import links\n'), ((10819, 10881), 'landlab.grid.structured_quad.links.vertical_fixed_link_ids', 'links.vertical_fixed_link_ids', (['self.grid.shape', 'fixed_link_ids'], {}), '(self.grid.shape, fixed_link_ids)\n', (10848, 10881), False, 'from landlab.grid.structured_quad import links\n'), ((10945, 11012), 'numpy.maximum', 'np.maximum', (['self.horizontal_active_link_ids', 'fixed_horizontal_links'], {}), '(self.horizontal_active_link_ids, fixed_horizontal_links)\n', (10955, 11012), True, 'import numpy as np\n'), ((11074, 11137), 'numpy.maximum', 'np.maximum', (['self.vertical_active_link_ids', 'fixed_vertical_links'], {}), '(self.vertical_active_link_ids, fixed_vertical_links)\n', (11084, 11137), True, 'import numpy as np\n'), ((12092, 12141), 'landlab.grid.structured_quad.links.number_of_horizontal_links', 'links.number_of_horizontal_links', (['self.grid.shape'], {}), '(self.grid.shape)\n', (12124, 12141), False, 'from landlab.grid.structured_quad import links\n'), ((12191, 12238), 'landlab.grid.structured_quad.links.number_of_vertical_links', 'links.number_of_vertical_links', (['self.grid.shape'], {}), '(self.grid.shape)\n', (12221, 12238), False, 'from landlab.grid.structured_quad import links\n'), ((17719, 17739), 'numpy.where', 'np.where', (['(self.q > 0)'], {}), '(self.q > 0)\n', (17727, 17739), True, 'import numpy as np\n'), ((17802, 17822), 'numpy.where', 'np.where', (['(self.q < 0)'], {}), '(self.q < 0)\n', (17810, 17822), True, 'import numpy as np\n'), ((18080, 18107), 'numpy.where', 'np.where', (['(calculated_q > Fr)'], {}), '(calculated_q > Fr)\n', (18088, 18107), True, 'import numpy as np\n'), ((18510, 18543), 'numpy.where', 'np.where', (['(q_courant > water_div_4)'], {}), '(q_courant > water_div_4)\n', (18518, 18543), True, 'import numpy as np\n'), ((18935, 18977), 'numpy.intersect1d', 'np.intersect1d', (['positive_q', 'Froude_logical'], {}), '(positive_q, Froude_logical)\n', (18949, 18977), True, 'import numpy as np\n'), ((19012, 19058), 'numpy.intersect1d', 'np.intersect1d', (['negative_q', 'Froude_abs_logical'], {}), '(negative_q, Froude_abs_logical)\n', (19026, 19058), True, 'import numpy as np\n'), ((19142, 19183), 'numpy.intersect1d', 'np.intersect1d', (['positive_q', 'water_logical'], {}), '(positive_q, water_logical)\n', (19156, 19183), True, 'import numpy as np\n'), ((19218, 19263), 'numpy.intersect1d', 'np.intersect1d', (['negative_q', 'water_abs_logical'], {}), '(negative_q, water_abs_logical)\n', (19232, 19263), True, 'import numpy as np\n'), ((10298, 10338), 'landlab.grid.structured_quad.links.vertical_link_ids', 'links.vertical_link_ids', (['self.grid.shape'], {}), '(self.grid.shape)\n', (10321, 10338), False, 'from landlab.grid.structured_quad import links\n'), ((17300, 17330), 'numpy.sqrt', 'np.sqrt', (['(self.g * self.h_links)'], {}), '(self.g * self.h_links)\n', (17307, 17330), True, 'import numpy as np\n'), ((21099, 21129), 'numpy.where', 'np.where', (['(self.h < self.h_init)'], {}), '(self.h < self.h_init)\n', (21107, 21129), True, 'import numpy as np\n'), ((9232, 9275), 'numpy.amax', 'np.amax', (["self._grid.at_node['water__depth']"], {}), "(self._grid.at_node['water__depth'])\n", (9239, 9275), True, 'import numpy as np\n'), ((19445, 19496), 'numpy.sqrt', 'np.sqrt', (['(self.g * self.h_links[self.if_statement_1])'], {}), '(self.g * self.h_links[self.if_statement_1])\n', (19452, 19496), True, 'import numpy as np\n'), ((19629, 19680), 'numpy.sqrt', 'np.sqrt', (['(self.g * self.h_links[self.if_statement_2])'], {}), '(self.g * self.h_links[self.if_statement_2])\n', (19636, 19680), True, 'import numpy as np\n')] |
#!/opt/local/bin/python3
import random
from math import log, sqrt, cos, sin, pi
from collections import Counter, namedtuple
from multiprocessing import Pool
from toolz import concat
import numpy as np
import scipy
import statsmodels.api as sm
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
plt.rcParams['font.size'] = 7
plt.rcParams['axes.labelweight'] = 'normal'
class Landscape(object):
def __init__(self, N, n_pop):
# create populations
self.N = N
pop = list(range(N))
for _ in range(n_pop-N):
pop.append(random.choice(pop))
self.pop = np.array(sorted(Counter(pop).values(), reverse=True))
# lay out locations
self.x, self.y = np.zeros((N)), np.zeros((N))
for i in range(1, N):
r, theta = self.generate_position()
self.x[i] = r * cos(theta)
self.y[i] = r * sin(theta)
def plot(self, filename, scores=None):
plt.figure(figsize=(2.5,2.5))
plt.tick_params(labelbottom='off', labelleft='off')
if scores:
plt.scatter(self.x, self.y, s=self.pop/10, alpha=0.3, c=scores, cmap='viridis')
else:
plt.scatter(self.x, self.y, s=self.pop/10, alpha=0.3)
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.savefig(filename)
plt.close()
class UniformLandscape(Landscape):
def generate_position(self):
"""Uniformly distributed within a unit circle"""
return sqrt(np.random.random()), np.random.random() * 2 * pi
class NonUniformLandscape(Landscape):
def generate_position(self):
"""Non-uniformly distributed within a unit circle"""
return np.random.power(4), np.random.random() * 2 * pi
class Simulation(object):
def __init__(self, L, V):
self.L = L
self.V = V
# initialize agents
self.agents = [ np.zeros((pop, V)) for pop in L.pop ]
self.agents[0][:] = 1.0
# pre-compute distances
self.distance = np.zeros((L.N))
for i in range(L.N):
self.distance[i] = sqrt((L.x[0]-L.x[i])**2+(L.y[0]-L.y[i])**2)
def run(self, R, q):
for _ in range(R):
# choose a destination city (other than the capital), weighted by distance from capital
c = multinomial(self.p)
a = np.random.randint(0, self.L.pop[c])
# diffuse
for i in range(self.V):
if np.random.random() < q:
self.agents[c][a, i] = 1
self.dialects = [ 1-np.dot(normalize(np.sum(self.agents[0], axis=0)),
normalize(np.sum(a, axis=0)))
for a in self.agents ]
def results(self):
return self.distance[1:], self.L.pop[1:], self.dialects[1:]
def plot_distance(self, filename):
plt.figure(figsize=(4,4))
x, y = zip(*[(a,b) for (a,b) in zip(self.distance[1:], self.dialects[1:]) if b < 2.0])
plt.scatter(x, y, alpha=0.3)
x, y = zip(*sm.nonparametric.lowess(y, x, frac=.5))
plt.plot(x,y)
plt.xlabel('Geographic distance')
plt.ylabel('Dialect difference')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.tight_layout()
plt.savefig(filename)
plt.close()
def plot_population(self, filename):
plt.figure(figsize=(4,4))
x, y = zip(*[(log(a),b) for (a,b) in zip(self.L.pop[1:], self.dialects[1:]) if b < 2.0])
x = x[1:]
y = y[1:]
plt.scatter(x, y, alpha=0.3)
x1, y1 = zip(*sm.nonparametric.lowess(y, x, frac=.5))
plt.plot(x1, y1)
plt.xlabel('Population (log)')
plt.ylabel('Dialect difference')
plt.xlim(0, max(x))
plt.ylim(0, 1)
plt.tight_layout()
plt.savefig(filename)
plt.close()
class Gravity(Simulation):
def __init__(self, L, V):
super().__init__(L, V)
self.p = self.L.pop/self.distance
self.p[0] = 0.0
class Radiation(Simulation):
def __init__(self, L, V):
super().__init__(L, V)
s = np.zeros((L.N))
for i in range(L.N):
for j in range(L.N):
if self.distance[j] <= self.distance[i]:
s[i] += self.L.pop[j]
s[i] = s[i] - self.L.pop[0] - self.L.pop[i]
s[0] = 0.0
self.p = (self.L.pop[0]*self.L.pop) / ((self.L.pop[0]+s)*(self.L.pop[0]+self.L.pop+s))
self.p[0] = 0.0
def multinomial(p):
"""Draw from a multinomial distribution"""
N = np.sum(p)
p = p/N
return int(np.random.multinomial(1, p).nonzero()[0])
def normalize(v):
"""Normalize a vector"""
L = np.sqrt(np.sum(v**2))
if L > 0:
return v/L
else:
return v
def main():
P = 0.01
R = 50000
uniform = UniformLandscape(500, 100000)
print('fig1.pdf : uniform landscape')
uniform.plot('fig1.pdf')
expr1 = Gravity(uniform, 100)
expr1.run(R, P)
print('fig2.pdf : distance (gravity, uniform)')
expr1.plot_distance('fig2.pdf')
print('fig3.pdf : population (gravity, uniform)')
expr1.plot_population('fig3.pdf')
expr2 = Radiation(uniform, 100)
expr2.run(R, P)
print('fig4.pdf : distance (radiation, uniform)')
expr2.plot_distance('fig4.pdf')
print('fig5.pdf : distance (population, uniform)')
expr2.plot_population('fig5.pdf')
if False:
non_uniform = NonUniformLandscape(500, 100000)
print('fig6.pdf : non-uniform landscape')
non_uniform.plot('fig6.pdf')
expr3 = Gravity(non_uniform, 100)
expr3.run(R, P)
print('fig7.pdf : distance (gravity, non_uniform)')
expr3.plot_distance('fig7.pdf')
print('fig8.pdf : population (gravity, non_uniform)')
expr3.plot_population('fig8.pdf')
expr4 = Radiation(non_uniform, 100)
expr4.run(R, P)
print('fig9.pdf : distance (radiation, non_uniform)')
expr4.plot_distance('fig9.pdf')
print('fig10.pdf : distance (population, non_uniform)')
expr4.plot_population('fig10.pdf')
main()
| [
"statsmodels.api.nonparametric.lowess",
"numpy.sum",
"numpy.random.multinomial",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.close",
"math.cos",
"collections.Counter",
"... | [((265, 286), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (279, 286), False, 'import matplotlib\n'), ((342, 372), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-white"""'], {}), "('seaborn-white')\n", (355, 372), True, 'import matplotlib.pyplot as plt\n'), ((4732, 4741), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (4738, 4741), True, 'import numpy as np\n'), ((1041, 1071), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.5, 2.5)'}), '(figsize=(2.5, 2.5))\n', (1051, 1071), True, 'import matplotlib.pyplot as plt\n'), ((1079, 1130), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelbottom': '"""off"""', 'labelleft': '"""off"""'}), "(labelbottom='off', labelleft='off')\n", (1094, 1130), True, 'import matplotlib.pyplot as plt\n'), ((1332, 1347), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1340, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1371), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1364, 1371), True, 'import matplotlib.pyplot as plt\n'), ((1380, 1401), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (1391, 1401), True, 'import matplotlib.pyplot as plt\n'), ((1410, 1421), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1419, 1421), True, 'import matplotlib.pyplot as plt\n'), ((2122, 2135), 'numpy.zeros', 'np.zeros', (['L.N'], {}), '(L.N)\n', (2130, 2135), True, 'import numpy as np\n'), ((3009, 3035), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (3019, 3035), True, 'import matplotlib.pyplot as plt\n'), ((3138, 3166), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'alpha': '(0.3)'}), '(x, y, alpha=0.3)\n', (3149, 3166), True, 'import matplotlib.pyplot as plt\n'), ((3235, 3249), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (3243, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3257, 3290), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Geographic distance"""'], {}), "('Geographic distance')\n", (3267, 3290), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dialect difference"""'], {}), "('Dialect difference')\n", (3309, 3331), True, 'import matplotlib.pyplot as plt\n'), ((3340, 3354), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (3348, 3354), True, 'import matplotlib.pyplot as plt\n'), ((3363, 3377), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (3371, 3377), True, 'import matplotlib.pyplot as plt\n'), ((3386, 3404), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3402, 3404), True, 'import matplotlib.pyplot as plt\n'), ((3413, 3434), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (3424, 3434), True, 'import matplotlib.pyplot as plt\n'), ((3443, 3454), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3452, 3454), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3536), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (3520, 3536), True, 'import matplotlib.pyplot as plt\n'), ((3677, 3705), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'alpha': '(0.3)'}), '(x, y, alpha=0.3)\n', (3688, 3705), True, 'import matplotlib.pyplot as plt\n'), ((3776, 3792), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1'], {}), '(x1, y1)\n', (3784, 3792), True, 'import matplotlib.pyplot as plt\n'), ((3801, 3831), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Population (log)"""'], {}), "('Population (log)')\n", (3811, 3831), True, 'import matplotlib.pyplot as plt\n'), ((3840, 3872), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dialect difference"""'], {}), "('Dialect difference')\n", (3850, 3872), True, 'import matplotlib.pyplot as plt\n'), ((3909, 3923), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (3917, 3923), True, 'import matplotlib.pyplot as plt\n'), ((3932, 3950), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3948, 3950), True, 'import matplotlib.pyplot as plt\n'), ((3959, 3980), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (3970, 3980), True, 'import matplotlib.pyplot as plt\n'), ((3989, 4000), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3998, 4000), True, 'import matplotlib.pyplot as plt\n'), ((4274, 4287), 'numpy.zeros', 'np.zeros', (['L.N'], {}), '(L.N)\n', (4282, 4287), True, 'import numpy as np\n'), ((4875, 4889), 'numpy.sum', 'np.sum', (['(v ** 2)'], {}), '(v ** 2)\n', (4881, 4889), True, 'import numpy as np\n'), ((799, 810), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (807, 810), True, 'import numpy as np\n'), ((814, 825), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (822, 825), True, 'import numpy as np\n'), ((1164, 1250), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.x', 'self.y'], {'s': '(self.pop / 10)', 'alpha': '(0.3)', 'c': 'scores', 'cmap': '"""viridis"""'}), "(self.x, self.y, s=self.pop / 10, alpha=0.3, c=scores, cmap=\n 'viridis')\n", (1175, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1270, 1325), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.x', 'self.y'], {'s': '(self.pop / 10)', 'alpha': '(0.3)'}), '(self.x, self.y, s=self.pop / 10, alpha=0.3)\n', (1281, 1325), True, 'import matplotlib.pyplot as plt\n'), ((1779, 1797), 'numpy.random.power', 'np.random.power', (['(4)'], {}), '(4)\n', (1794, 1797), True, 'import numpy as np\n'), ((1994, 2012), 'numpy.zeros', 'np.zeros', (['(pop, V)'], {}), '((pop, V))\n', (2002, 2012), True, 'import numpy as np\n'), ((2198, 2251), 'math.sqrt', 'sqrt', (['((L.x[0] - L.x[i]) ** 2 + (L.y[0] - L.y[i]) ** 2)'], {}), '((L.x[0] - L.x[i]) ** 2 + (L.y[0] - L.y[i]) ** 2)\n', (2202, 2251), False, 'from math import log, sqrt, cos, sin, pi\n'), ((2461, 2496), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.L.pop[c]'], {}), '(0, self.L.pop[c])\n', (2478, 2496), True, 'import numpy as np\n'), ((643, 661), 'random.choice', 'random.choice', (['pop'], {}), '(pop)\n', (656, 661), False, 'import random\n'), ((934, 944), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (937, 944), False, 'from math import log, sqrt, cos, sin, pi\n'), ((973, 983), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (976, 983), False, 'from math import log, sqrt, cos, sin, pi\n'), ((1575, 1593), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1591, 1593), True, 'import numpy as np\n'), ((3187, 3226), 'statsmodels.api.nonparametric.lowess', 'sm.nonparametric.lowess', (['y', 'x'], {'frac': '(0.5)'}), '(y, x, frac=0.5)\n', (3210, 3226), True, 'import statsmodels.api as sm\n'), ((3728, 3767), 'statsmodels.api.nonparametric.lowess', 'sm.nonparametric.lowess', (['y', 'x'], {'frac': '(0.5)'}), '(y, x, frac=0.5)\n', (3751, 3767), True, 'import statsmodels.api as sm\n'), ((1596, 1614), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1612, 1614), True, 'import numpy as np\n'), ((1799, 1817), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1815, 1817), True, 'import numpy as np\n'), ((2574, 2592), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2590, 2592), True, 'import numpy as np\n'), ((4769, 4796), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'p'], {}), '(1, p)\n', (4790, 4796), True, 'import numpy as np\n'), ((698, 710), 'collections.Counter', 'Counter', (['pop'], {}), '(pop)\n', (705, 710), False, 'from collections import Counter, namedtuple\n'), ((2709, 2739), 'numpy.sum', 'np.sum', (['self.agents[0]'], {'axis': '(0)'}), '(self.agents[0], axis=0)\n', (2715, 2739), True, 'import numpy as np\n'), ((2788, 2805), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (2794, 2805), True, 'import numpy as np\n'), ((3558, 3564), 'math.log', 'log', (['a'], {}), '(a)\n', (3561, 3564), False, 'from math import log, sqrt, cos, sin, pi\n')] |
import json
import logging as log
import numbers
import numpy as np
import os
import shutil
import sys
import weakref
from io import StringIO
from bson import json_util
from tensorflow.python.client import device_lib
from constants import json_constant_map
def xstr(s):
return '' if s is None else str(s)
def get_available_gpus():
return device_lib.list_local_devices()
def clean_dir(folder: str):
if os.path.exists(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path, ignore_errors=True)
def assign(tgt, src):
if isinstance(src, dict):
for key in src:
if isinstance(tgt, dict):
tgt[key] = src[key]
else:
setattr(tgt, key, src[key])
else:
for key in dir(src):
val = getattr(src, key, None)
if not callable(val):
if isinstance(tgt, dict):
tgt[key] = val
else:
setattr(tgt, key, val)
class FileRemover(object):
def __init__(self):
self.weak_references = dict() # weak_ref -> filepath to remove
def cleanup_once_done(self, response, filepath):
wr = weakref.ref(response, self._do_cleanup)
self.weak_references[wr] = filepath
def _do_cleanup(self, wr):
filepath = self.weak_references[wr]
try:
if os.path.isdir(filepath):
shutil.rmtree(filepath, ignore_errors=True)
else:
os.remove(filepath)
except Exception as ex:
log.debug('Error deleting {}: {}'.format(filepath, str(ex)))
file_remover = FileRemover()
class Tee(object):
def __init__(self, stream):
self.stream = stream
self._str = StringIO()
if stream == sys.stdout:
self.stream_type = 'stdout'
elif stream == sys.stderr:
self.stream_type = 'stderr'
if self.stream_type == 'stdout':
sys.stdout = self
elif self.stream_type == 'stderr':
sys.stderr = self
def write(self, data):
self._str.write(data)
self.stream.write(data)
def flush(self):
self._str.flush()
self.stream.flush()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.stream_type == 'stdout':
sys.stdout = self.stream
elif self.stream_type == 'stderr':
sys.stderr = self.stream
def getvalue(self):
return self._str.getvalue()
def custom_split(*arrays, test_size=0.25, random_state=42, stratify=None):
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
from itertools import chain
from sklearn.utils import _safe_indexing
from sklearn.model_selection._split import _validate_shuffle_split
if isinstance(arrays[0], numbers.Integral):
n_samples = arrays[0]
arrays = [np.arange(n_samples), np.arange(n_samples)]
else:
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(n_samples, test_size, None, default_test_size=0.25)
if stratify is not None:
from sklearn.model_selection import StratifiedShuffleSplit
CVClass = StratifiedShuffleSplit
else:
from sklearn.model_selection import ShuffleSplit
CVClass = ShuffleSplit
cv = CVClass(test_size=n_test, train_size=n_train, random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
res = list(chain.from_iterable((_safe_indexing(a, train), _safe_indexing(a, test)) for a in arrays))
res.extend([train, test])
return res
def mongo_to_object(mongo_object):
to_json = getattr(mongo_object, 'to_json', None)
if to_json and callable(to_json):
return json.loads(mongo_object.to_json(), parse_constant=lambda constant: json_constant_map[constant])
else:
return json.loads(json_util.dumps(mongo_object), parse_constant=lambda constant: json_constant_map[constant])
| [
"io.StringIO",
"os.remove",
"os.unlink",
"os.path.isdir",
"bson.json_util.dumps",
"os.path.exists",
"sklearn.utils.validation._num_samples",
"sklearn.model_selection._split._validate_shuffle_split",
"tensorflow.python.client.device_lib.list_local_devices",
"os.path.isfile",
"os.path.islink",
"... | [((352, 383), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (381, 383), False, 'from tensorflow.python.client import device_lib\n'), ((421, 443), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (435, 443), False, 'import os\n'), ((3345, 3420), 'sklearn.model_selection._split._validate_shuffle_split', '_validate_shuffle_split', (['n_samples', 'test_size', 'None'], {'default_test_size': '(0.25)'}), '(n_samples, test_size, None, default_test_size=0.25)\n', (3368, 3420), False, 'from sklearn.model_selection._split import _validate_shuffle_split\n'), ((469, 487), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (479, 487), False, 'import os\n'), ((1419, 1458), 'weakref.ref', 'weakref.ref', (['response', 'self._do_cleanup'], {}), '(response, self._do_cleanup)\n', (1430, 1458), False, 'import weakref\n'), ((1984, 1994), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1992, 1994), False, 'from io import StringIO\n'), ((3259, 3277), 'sklearn.utils.indexable', 'indexable', (['*arrays'], {}), '(*arrays)\n', (3268, 3277), False, 'from sklearn.utils import indexable\n'), ((3298, 3321), 'sklearn.utils.validation._num_samples', '_num_samples', (['arrays[0]'], {}), '(arrays[0])\n', (3310, 3321), False, 'from sklearn.utils.validation import _num_samples\n'), ((513, 543), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (525, 543), False, 'import os\n'), ((1607, 1630), 'os.path.isdir', 'os.path.isdir', (['filepath'], {}), '(filepath)\n', (1620, 1630), False, 'import os\n'), ((3188, 3208), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (3197, 3208), True, 'import numpy as np\n'), ((3210, 3230), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (3219, 3230), True, 'import numpy as np\n'), ((4225, 4254), 'bson.json_util.dumps', 'json_util.dumps', (['mongo_object'], {}), '(mongo_object)\n', (4240, 4254), False, 'from bson import json_util\n'), ((559, 584), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (573, 584), False, 'import os\n'), ((588, 613), 'os.path.islink', 'os.path.islink', (['file_path'], {}), '(file_path)\n', (602, 613), False, 'import os\n'), ((631, 651), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (640, 651), False, 'import os\n'), ((669, 693), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (682, 693), False, 'import os\n'), ((1648, 1691), 'shutil.rmtree', 'shutil.rmtree', (['filepath'], {'ignore_errors': '(True)'}), '(filepath, ignore_errors=True)\n', (1661, 1691), False, 'import shutil\n'), ((1726, 1745), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (1735, 1745), False, 'import os\n'), ((711, 755), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {'ignore_errors': '(True)'}), '(file_path, ignore_errors=True)\n', (724, 755), False, 'import shutil\n'), ((3836, 3860), 'sklearn.utils._safe_indexing', '_safe_indexing', (['a', 'train'], {}), '(a, train)\n', (3850, 3860), False, 'from sklearn.utils import _safe_indexing\n'), ((3862, 3885), 'sklearn.utils._safe_indexing', '_safe_indexing', (['a', 'test'], {}), '(a, test)\n', (3876, 3885), False, 'from sklearn.utils import _safe_indexing\n')] |
"""
This code processes balance sheet data in order to fit company data into a normalised balance sheet.
This code outputs a CSV file with the first row corresponding to the lines that make up a balance sheet financial statement
Progress
3/11/2020 - Wrote code so that it outputs a spreadsheet that works for the first company , 3DX Industries Inc
4/11/2020 - Added more terms to the CATEGORISE_BALANCE_SHEET_TEMRS list
09/11/2020 - added some terms to classify income statement
10/11/2020 - Streamlined the code, added more terms to the CATEGORISE_INCOME_STATEMENT_TERMS
27/11/2020 - added more terms to the CATEGORISE_INCOME_STATEMENT_TERMS, added a section which sums values together if the key is repeated.
28/11/2020 - Ensured that all revenue, net income and total asset terms were captured by the code
24/12/2020 - Ensured dates are in correct format. Ensured code only captures latest filing if duplicate date filings exist
28/02/2021 - Changed code methodology, switched to a cosine distance + KNN methodology to capture filings
Prepared by <NAME>
"""
# Importing our modules
import pandas as pd
import os
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
import re
from sklearn.neighbors import NearestNeighbors
from ftfy import fix_text
import numpy as np
THRESHOLD = 0.6
def main():
# Obtaining the company data information for example the income statement
source_directory = (
r"C:\Users\blow\Desktop\Work\Quant_project\Scrape_Code\Data Directory"
)
# This lists out all the company folders
list_of_company_folders = os.listdir(source_directory)
# This allows us to obtain the terms that will appear in a normal income statement
clean_income_statement_terms = pd.read_excel(
os.getcwd() + "\\Clean income statement.xlsx"
)
clean_income_statement_terms = clean_income_statement_terms.iloc[:, 0:1]
clean_unique_income_statement_terms = clean_income_statement_terms[
"income statement terms"
].unique()
# This allows us to obtain the terms that will appear in a normal balance sheet
clean_balance_sheet_terms = pd.read_excel(
os.getcwd() + "\\Clean balance sheet.xlsx"
)
clean_balance_sheet_terms = clean_balance_sheet_terms.iloc[:, 0:1]
clean_unique_balance_sheet_terms = clean_balance_sheet_terms[
"balance sheet terms"
].unique()
df_income_statement = initialise_dataframe(clean_unique_income_statement_terms)
df_balance_sheet = initialise_dataframe(clean_unique_balance_sheet_terms)
for company_folder in list_of_company_folders:
company_files = os.listdir(os.path.join(source_directory, company_folder))
for file in company_files:
code, cik, sic, file_type = get_file_name(file)
if file_type == "statements of operations and comprehensive income (loss)":
# This allows us to get the names of each term in the respective report
names = pd.read_csv(
os.path.join(source_directory, company_folder, file)
)
# This uses the algorithm to get the matches
matches = match_data(
names,
clean_income_statement_terms,
clean_unique_income_statement_terms,
)
# This filters all matches by the threshold value, lower is better
matches = matches[(matches["Match confidence"] < THRESHOLD)]
# This creates a dataframe with all the matches values
merged_df = clean_data(matches, company_folder, names, code, cik, sic)
# This appends it to the income statement dataframe
df_income_statement = df_income_statement.append(
merged_df, ignore_index=True
)
if file_type == "balance sheet":
# This allows us to get the names of each term in the respective report
names = pd.read_csv(
os.path.join(source_directory, company_folder, file)
)
# This uses the algorithm to get the matches
matches = match_data(
names,
clean_balance_sheet_terms,
clean_unique_balance_sheet_terms,
)
# This filters all matches by the threshold value, lower is better
matches = matches[(matches["Match confidence"] < THRESHOLD)]
# This creates a dataframe with all the matches values
merged_df = clean_data(matches, company_folder, names, code, cik, sic)
# This appends it to the income statement dataframe
df_balance_sheet = df_balance_sheet.append(merged_df, ignore_index=True)
# Combining duplicated data
df_income_statement = combine_duplicated_income_statement_data(df_income_statement)
df_income_statement = drop_duplicated_dates(df_income_statement)
df_balance_sheet = drop_duplicated_dates(df_balance_sheet)
print("Converting data to csv")
df_income_statement.to_csv("./output income statement.csv", index=False)
df_balance_sheet.to_csv("./output balance sheet.csv", index=False)
def initialise_dataframe(clean_unique_income_statement_terms):
# TODO change variables to neutral name because this applies for the other statements as well
df_income_statement = pd.DataFrame(columns=clean_unique_income_statement_terms)
df_income_statement["Company Name"] = ""
df_income_statement["Dates"] = ""
df_income_statement["Latest filing date"] = ""
df_income_statement["Code"] = ""
df_income_statement["CIK"] = ""
df_income_statement["SIC"] = ""
cols = df_income_statement.columns.tolist()
cols = cols[-6:] + cols[:-6]
df_income_statement = df_income_statement[cols]
return df_income_statement
def get_file_name(file):
"""
:param file: File that contains information about the individual company. E.g Balance sheet of XYZ company
:return: code: Whether the file is a 10k or 10Q file
:return: file_type: Whether the file is a balance sheet
"""
try:
[code, _datestring, cik, sic, sec_type] = file.split("_")
sec_type = sec_type.replace("Consolidated ", "").lower()
file_name = "{}_{}".format(code, sec_type)
file_type = sec_type
return code, cik, sic, file_type
except:
traceback.print_exc()
def match_data(names, clean_statement_terms, clean_unique_statement_terms):
print("Vecorizing the data - this could take a few minutes for large datasets...")
vectorizer = TfidfVectorizer(min_df=1, analyzer=ngrams, lowercase=False)
tfidf = vectorizer.fit_transform(clean_unique_statement_terms)
print("Vecorizing completed...")
nbrs = NearestNeighbors(n_neighbors=2, n_jobs=-1).fit(tfidf)
org_column = "Category" # column to match against in the messy data
names.dropna(
subset=[org_column], inplace=True
) # Drops the rows of the income statement where the 'category' is blank
unique_org = set(names[org_column].values) # set used for increased performance
print("getting nearest n...")
distances, indices = getNearestN(unique_org, vectorizer, nbrs)
unique_org = list(unique_org) # need to convert back to a list
print("finding matches...")
matches = []
for i, j in enumerate(indices):
temp = [
distances[i][0],
clean_statement_terms.values[j][0][0],
unique_org[i],
]
matches.append(temp)
print("Building data frame...")
matches = pd.DataFrame(
matches,
columns=["Match confidence", "Matched name", "Original name"],
)
print("Done")
return matches
def clean_data(matches, company_folder, names, code, cik, sic):
matches = matches.sort_values(
by=["Matched name", "Match confidence"]
).drop_duplicates(subset=["Matched name"])
sliced_names = names.loc[names["Category"].isin(matches["Original name"])]
# join on 'Category' and 'Original name
merged_df = matches.merge(
sliced_names,
how="left",
left_on="Original name",
right_on="Category",
)
# Sometimes the original statement will have multiple lines of the same name. This drops all the duplicated names and keeps the first occurance
merged_df = merged_df.drop_duplicates(subset=["Matched name"])
# drop the original name column, sets index to 'Matched name', the information we are interested in, and transposes
merged_df = (
merged_df.drop(columns=["Original name", "Category", "Match confidence"])
.set_index("Matched name")
.T
)
# This adds the company name to the dataframe
merged_df["Company Name"] = company_folder
# This adds the CIK number to the dataframe
merged_df["Code"] = code
merged_df["CIK"] = cik
merged_df["SIC"] = sic
merged_df = merged_df.reset_index()
merged_df.rename(columns={"index": "Dates"}, inplace=True)
# This adds the Latest filing date to the dataframe, allows us to remove filings and keep the most recent filing
# Sort dates in descending order
merged_df = sort_dates(merged_df)
# Add latest filing dates, the date of the most recent filing
merged_df["Latest filing date"] = merged_df.at[0, "Dates"]
print(company_folder)
print(merged_df)
return merged_df
def ngrams(string, n=2):
string = str(string)
string = fix_text(string) # fix text
string = string.encode("ascii", errors="ignore").decode() # remove non ascii chars
string = string.lower()
chars_to_remove = [")", "(", ".", "|", "[", "]", "{", "}", "'"]
rx = "[" + re.escape("".join(chars_to_remove)) + "]"
string = re.sub(rx, "", string)
string = string.replace("&", "and")
string = string.replace(",", " ")
string = string.replace("-", " ")
string = string.title() # normalise case - capital at start of each word
string = re.sub(
" +", " ", string
).strip() # get rid of multiple spaces and replace with a single
string = " " + string + " " # pad names for ngrams...
string = re.sub(r"[,-./]|\sBD", r"", string)
ngrams = zip(*[string[i:] for i in range(n)])
return ["".join(ngram) for ngram in ngrams]
def getNearestN(query, vectorizer, nbrs):
queryTFIDF_ = vectorizer.transform(query)
distances, indices = nbrs.kneighbors(queryTFIDF_)
return distances, indices
def combine_duplicated_income_statement_data(df_income_statement):
# Combining duplicated data for revenue
df_income_statement["Total revenue"] = df_income_statement["Total revenue"].combine(
df_income_statement["Revenue"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Net revenue"] = df_income_statement["Net revenue"].combine(
df_income_statement["Total revenue"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Net sales"] = df_income_statement["Net sales"].combine(
df_income_statement["Sales"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Net revenue"] = df_income_statement["Net revenue"].combine(
df_income_statement["Net sales"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Total operating revenue"] = df_income_statement[
"Total operating revenue"
].combine(
df_income_statement["Operating revenue"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Net revenue"] = df_income_statement["Net revenue"].combine(
df_income_statement["Total operating revenue"],
lambda a, b: b if np.isnan(a) else a,
)
# Combining duplicated data for cost of goods sold
df_income_statement["Cost of goods sold"] = df_income_statement[
"Cost of goods sold"
].combine(
df_income_statement["Cost of products sold"],
lambda a, b: b if np.isnan(a) else a,
)
df_income_statement["Cost of goods sold"] = df_income_statement[
"Cost of goods sold"
].combine(
df_income_statement["Cost of revenue"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Cost of goods sold"] = df_income_statement[
"Cost of goods sold"
].combine(
df_income_statement["Cost of sales"], lambda a, b: b if np.isnan(a) else a
)
# Combining duplicated data for gross profit
df_income_statement["Gross profit"] = df_income_statement["Gross profit"].combine(
df_income_statement["Gross margin"], lambda a, b: b if np.isnan(a) else a
)
# Combining duplicated data for operating expenses
df_income_statement["Total operating expenses"] = df_income_statement[
"Total operating expenses"
].combine(
df_income_statement["Operating expenses"], lambda a, b: b if np.isnan(a) else a
)
# Combining duplicated data for operating profit
df_income_statement["Operating profit"] = df_income_statement[
"Operating profit"
].combine(
df_income_statement["Operating earnings"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Operating profit"] = df_income_statement[
"Operating profit"
].combine(
df_income_statement["Operating income"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Operating profit"] = df_income_statement[
"Operating profit"
].combine(
df_income_statement["Operating loss"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Operating profit"] = df_income_statement[
"Operating profit"
].combine(
df_income_statement["Income from operations"],
lambda a, b: b if np.isnan(a) else a,
)
# Combining duplicated data for income tax
df_income_statement["Income tax expense"] = df_income_statement[
"Income tax expense"
].combine(
df_income_statement["Provision for income tax"],
lambda a, b: b if np.isnan(a) else a,
)
# Combining duplicated data for Net income
df_income_statement["Net income"] = df_income_statement["Net income"].combine(
df_income_statement["Net loss"], lambda a, b: b if np.isnan(a) else a
)
df_income_statement["Net income"] = df_income_statement["Net income"].combine(
df_income_statement["Net income loss"], lambda a, b: b if np.isnan(a) else a
)
# Drop combined columns
df_income_statement = df_income_statement.drop(
columns=[
"Revenue",
"Sales",
"Net sales",
"Total revenue",
"Operating revenue",
"Total operating revenue",
"Cost of products sold",
"Cost of revenue",
"Cost of sales",
"Gross margin",
"Operating expenses",
"Operating earnings",
"Operating income",
"Operating loss",
"Income from operations",
"Provision for income tax",
"Net loss",
"Net income loss",
]
)
return df_income_statement
def sort_dates(df):
# This line extracts the relevant portion of the dates and disregards the rest. For example: Date will become Feb. 31, 2019
df["Dates"] = df["Dates"].str.extract("([\w]{3}[.]{0,1} [\w]{1,2}[,]{0,1} [\w]{4})")
# This line drops the "." in the date. For example: Date is now Feb 31, 2019
df["Dates"] = df["Dates"].str.replace(".", "", regex=False)
# This ensures that the dates are in a "Date" format.
df["Dates"] = pd.to_datetime(df["Dates"])
# Sort dates in descending order
df = df.sort_values(by=["Dates"], ascending=False)
# # Repeat the same for the "Latest filing date" column
# df["Latest filing date"] = df["Latest filing date"].str.extract(
# "([\w]{3}[.]{0,1} [\w]{1,2}[,]{0,1} [\w]{4})"
# )
# df["Latest filing date"] = df["Latest filing date"].str.replace(
# ".", "", regex=False
# )
# df["Latest filing date"] = pd.to_datetime(df["Latest filing date"])
return df
def drop_duplicated_dates(df):
# If there is a quarterly filing and annual filing of the same date and filing date, the quarterly filing is usually less complete
# We want to keep the annual filing, the more complete one. We sum the number of null values in each row
df["Null"] = df.isnull().sum(axis=1)
# We drop the row with the earlier filing date and highest null value
df = df.sort_values(
by=["Company Name", "Dates", "Latest filing date", "Null"],
ascending=[True, False, False, True],
).drop_duplicates(subset=["Company Name", "Dates"], keep="first")
df = df.drop(columns=["Latest filing date", "Null"])
return df
if __name__ == "__main__":
main() | [
"pandas.DataFrame",
"os.path.join",
"ftfy.fix_text",
"sklearn.feature_extraction.text.TfidfVectorizer",
"os.getcwd",
"numpy.isnan",
"pandas.to_datetime",
"sklearn.neighbors.NearestNeighbors",
"re.sub",
"os.listdir"
] | [((1632, 1660), 'os.listdir', 'os.listdir', (['source_directory'], {}), '(source_directory)\n', (1642, 1660), False, 'import os\n'), ((5513, 5570), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'clean_unique_income_statement_terms'}), '(columns=clean_unique_income_statement_terms)\n', (5525, 5570), True, 'import pandas as pd\n'), ((6738, 6797), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(1)', 'analyzer': 'ngrams', 'lowercase': '(False)'}), '(min_df=1, analyzer=ngrams, lowercase=False)\n', (6753, 6797), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((7736, 7824), 'pandas.DataFrame', 'pd.DataFrame', (['matches'], {'columns': "['Match confidence', 'Matched name', 'Original name']"}), "(matches, columns=['Match confidence', 'Matched name',\n 'Original name'])\n", (7748, 7824), True, 'import pandas as pd\n'), ((9626, 9642), 'ftfy.fix_text', 'fix_text', (['string'], {}), '(string)\n', (9634, 9642), False, 'from ftfy import fix_text\n'), ((9909, 9931), 're.sub', 're.sub', (['rx', '""""""', 'string'], {}), "(rx, '', string)\n", (9915, 9931), False, 'import re\n'), ((10315, 10349), 're.sub', 're.sub', (['"""[,-./]|\\\\sBD"""', '""""""', 'string'], {}), "('[,-./]|\\\\sBD', '', string)\n", (10321, 10349), False, 'import re\n'), ((15706, 15733), 'pandas.to_datetime', 'pd.to_datetime', (["df['Dates']"], {}), "(df['Dates'])\n", (15720, 15733), True, 'import pandas as pd\n'), ((1807, 1818), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1816, 1818), False, 'import os\n'), ((2196, 2207), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2205, 2207), False, 'import os\n'), ((2677, 2723), 'os.path.join', 'os.path.join', (['source_directory', 'company_folder'], {}), '(source_directory, company_folder)\n', (2689, 2723), False, 'import os\n'), ((6914, 6956), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(2)', 'n_jobs': '(-1)'}), '(n_neighbors=2, n_jobs=-1)\n', (6930, 6956), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((10139, 10164), 're.sub', 're.sub', (['""" +"""', '""" """', 'string'], {}), "(' +', ' ', string)\n", (10145, 10164), False, 'import re\n'), ((10884, 10895), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (10892, 10895), True, 'import numpy as np\n'), ((11058, 11069), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (11066, 11069), True, 'import numpy as np\n'), ((11220, 11231), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (11228, 11231), True, 'import numpy as np\n'), ((11390, 11401), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (11398, 11401), True, 'import numpy as np\n'), ((11606, 11617), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (11614, 11617), True, 'import numpy as np\n'), ((11798, 11809), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (11806, 11809), True, 'import numpy as np\n'), ((12073, 12084), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (12081, 12084), True, 'import numpy as np\n'), ((12278, 12289), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (12286, 12289), True, 'import numpy as np\n'), ((12480, 12491), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (12488, 12491), True, 'import numpy as np\n'), ((12705, 12716), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (12713, 12716), True, 'import numpy as np\n'), ((12980, 12991), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (12988, 12991), True, 'import numpy as np\n'), ((13237, 13248), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (13245, 13248), True, 'import numpy as np\n'), ((13438, 13449), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (13446, 13449), True, 'import numpy as np\n'), ((13637, 13648), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (13645, 13648), True, 'import numpy as np\n'), ((13852, 13863), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (13860, 13863), True, 'import numpy as np\n'), ((14122, 14133), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (14130, 14133), True, 'import numpy as np\n'), ((14338, 14349), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (14346, 14349), True, 'import numpy as np\n'), ((14513, 14524), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (14521, 14524), True, 'import numpy as np\n'), ((3054, 3106), 'os.path.join', 'os.path.join', (['source_directory', 'company_folder', 'file'], {}), '(source_directory, company_folder, file)\n', (3066, 3106), False, 'import os\n'), ((4090, 4142), 'os.path.join', 'os.path.join', (['source_directory', 'company_folder', 'file'], {}), '(source_directory, company_folder, file)\n', (4102, 4142), False, 'import os\n')] |
# Copyright (c) 2020 fortiss GmbH
#
# Authors: <NAME>, <NAME>, <NAME>, and
# <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import unittest
import numpy as np
import os
import matplotlib
import time
from bark_ml.behaviors.cont_behavior import BehaviorContinuousML
from bark_ml.behaviors.discrete_behavior import BehaviorDiscreteMotionPrimitivesML, \
BehaviorDiscreteMacroActionsML
from bark.runtime.commons.parameters import ParameterServer
from bark.core.models.dynamic import SingleTrackModel
from bark.core.world import World, MakeTestWorldHighway
class PyBehaviorTests(unittest.TestCase):
def test_discrete_behavior(self):
params = ParameterServer()
discrete_behavior = BehaviorDiscreteMacroActionsML(params)
# sets 0-th motion primitive active
discrete_behavior.ActionToBehavior(0)
print(discrete_behavior.action_space)
def test_cont_behavior(self):
params = ParameterServer()
cont_behavior = BehaviorContinuousML(params)
# sets numpy array as next action
cont_behavior.ActionToBehavior(np.array([0., 0.]))
print(cont_behavior.action_space)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"bark.runtime.commons.parameters.ParameterServer",
"bark_ml.behaviors.discrete_behavior.BehaviorDiscreteMacroActionsML",
"numpy.array",
"bark_ml.behaviors.cont_behavior.BehaviorContinuousML"
] | [((1190, 1205), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1203, 1205), False, 'import unittest\n'), ((706, 723), 'bark.runtime.commons.parameters.ParameterServer', 'ParameterServer', ([], {}), '()\n', (721, 723), False, 'from bark.runtime.commons.parameters import ParameterServer\n'), ((748, 786), 'bark_ml.behaviors.discrete_behavior.BehaviorDiscreteMacroActionsML', 'BehaviorDiscreteMacroActionsML', (['params'], {}), '(params)\n', (778, 786), False, 'from bark_ml.behaviors.discrete_behavior import BehaviorDiscreteMotionPrimitivesML, BehaviorDiscreteMacroActionsML\n'), ((961, 978), 'bark.runtime.commons.parameters.ParameterServer', 'ParameterServer', ([], {}), '()\n', (976, 978), False, 'from bark.runtime.commons.parameters import ParameterServer\n'), ((999, 1027), 'bark_ml.behaviors.cont_behavior.BehaviorContinuousML', 'BehaviorContinuousML', (['params'], {}), '(params)\n', (1019, 1027), False, 'from bark_ml.behaviors.cont_behavior import BehaviorContinuousML\n'), ((1101, 1121), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1109, 1121), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Contains functionality that doesn't fit elsewhere
"""
import collections.abc
import inspect
import functools
import shelve
import struct
import logging
import pickle
import copy
import ast
import operator
import abc
import inspect
import io
import pprint
import traceback
import numpy
import progressbar
from typhon.utils.cache import mutable_cache
my_pb_widget = [progressbar.Bar("=", "[", "]"), " ",
progressbar.Percentage(), " (",
progressbar.AdaptiveETA(), " -> ",
progressbar.AbsoluteETA(), ') ']
class switch(object):
"""Simulate a switch-case statement.
http://code.activestate.com/recipes/410692/
"""
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args:
self.fall = True
return True
else:
return False
# Following inspired by http://stackoverflow.com/a/7811344/974555
def validate(func, locals):
"""Validate function with arguments.
Inside an annotated function (see PEP-3107), do type checking on the
arguments. An annotation may be either a type or a callable. Use
like this::
def f(x: str, b: int):
validate(f, locals())
... # proceed
or use the validator annotation::
@validator
def f(x: str, b: int):
... # proceed
"""
for var, test in func.__annotations__.items():
value = locals[var]
_validate_one(var, test, value, func)
def _validate_one(var, test, value, func):
"""Verify that var=value passes test
Internal function for validate
"""
if isinstance(test, type): # check for subclass
if not isinstance(value, test):
raise TypeError(("Wrong type for argument '{}'. "
"Expected: {}. Got: {}.").format(
var, test, type(value)))
elif callable(test):
if not test(value):
raise TypeError(("Failed test for argument '{0}'. "
"Value: {1}. Test {2.__name__} "
"failed.").format(
var, value if len(repr(value)) < 10000 else "(too long)", test))
elif isinstance(test, collections.abc.Sequence): # at least one should be true
passed = False
for t in test:
try:
_validate_one(var, t, value, func)
except TypeError:
pass
else:
passed = True
if not passed:
raise TypeError(("All tests failed for function {0}, argument '{1}'. "
"Value: {2}, type {3}. Tests: {4!s}").format(
func.__qualname__, var, value, type(value), test))
else:
raise RuntimeError("I don't know how to validate test {}!".format(test))
def validator(func):
"""Decorator to automagically validate a function with arguments.
Uses functionality in 'validate', required types/values determined
from decorations. Example::
@validator
def f(x: numbers.Number, y: numbers.Number, mode: str):
return x+y
Does not currently work for ``*args`` and ``**kwargs`` style arguments.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
fsig = inspect.signature(func)
# initialise with defaults; update with positional arguments; then
# update with keyword arguments
lcls = dict([(x.name, x.default) for x in fsig.parameters.values()])
lcls.update(**dict(zip([x.name for x in fsig.parameters.values()], args)))
lcls.update(**kwargs)
validate(func, lcls)
return func(*args, **kwargs)
return inner
def cat(*args):
"""Concatenate either ndarray or ma.MaskedArray
Arguments as for numpy.concatenate or numpy.ma.concatenate.
First argument determines type.
"""
if isinstance(args[0][0], numpy.ma.MaskedArray):
return numpy.ma.concatenate(*args)
else:
return numpy.concatenate(*args)
def disk_lru_cache(path):
"""Like functools.lru_cache, but stored on disk
Returns a decorator.
:param str path: File to use for caching.
:returns function: Decorator
"""
sentinel = object()
make_key = functools._make_key
def decorating_function(user_function):
cache = shelve.open(path, protocol=4, writeback=True)
cache_get = cache.get
def wrapper(*args, **kwds):
key = str(make_key(args, kwds, False, kwd_mark=(42,)))
result = cache_get(key, sentinel)
if result is not sentinel:
logging.debug(("Getting result from cache "
"{!s}, (key {!s}").format(path, key))
return result
logging.debug("No result in cache")
result = user_function(*args, **kwds)
logging.debug("Storing result in cache")
cache[key] = result
cache.sync()
return result
return functools.update_wrapper(wrapper, user_function)
return decorating_function
#
# def __init__(self, fn):
# self.fn = fn
# self.memo = {}
# self.keys = [] # don't store too many
#
# def __call__(self, *args, **kwds):
# str = pickle.dumps(args, 1)+pickle.dumps(kwds, 1)
# if not str in self.memo:
# self.memo[str] = self.fn(*args, **kwds)
# self.keys.append(str)
# if len(self.keys) > maxsize:
# del self.memo[self.keys[0]]
# del self.keys[0]
#
# return self.memo[str]
#
class NotTrueNorFalseType:
"""Not true, nor false.
A singleton class whose instance can be used in place of True or
False, to represent a value which has no true-value nor false-value,
e.g. a boolean flag the value of which is unknown or undefined.
By Stack Overflow user shx2, http://stackoverflow.com/a/25330344/974555
"""
def __new__(cls, *args, **kwargs):
# singleton
try:
obj = cls._obj
except AttributeError:
obj = object.__new__(cls, *args, **kwargs)
cls._obj = obj
return obj
def __bool__(self):
raise TypeError('%s: Value is neither True nor False' % self)
def __repr__(self):
return 'NotTrueNorFalse'
NotTrueNorFalse = NotTrueNorFalseType()
def rec_concatenate(seqs, ax=0):
"""Concatenate record arrays even if name order differs.
Takes the first record array and appends data from the rest, by name,
even if the name order or the specific dtype differs.
"""
try:
return numpy.concatenate(seqs)
except TypeError as e:
if e.args[0] != "invalid type promotion":
raise
# this part is only reached if we do have the TypeError with "invalid
# type promotion"
if ax != 0 or any(s.ndim>1 for s in seqs):
raise ValueError("Liberal concatenations must be 1-d")
if len(seqs) < 2:
raise ValueError("Must concatenate at least 2")
M = numpy.empty(shape=sum(s.shape[0] for s in seqs),
dtype=seqs[0].dtype)
for nm in M.dtype.names:
if M.dtype[nm].names is not None:
M[nm] = rec_concatenate([s[nm] for s in seqs])
else:
M[nm] = numpy.concatenate([s[nm] for s in seqs])
return M
def array_equal_with_equal_nans(A, B):
"""Like array_equal, but nans compare equal
"""
return numpy.all((A == B) | (numpy.isnan(A) & numpy.isnan(B)))
def mark_for_disk_cache(**kwargs):
"""Mark method for later caching
"""
def mark(meth, d):
meth.disk_cache = True
meth.disk_cache_args = d
return meth
return functools.partial(mark, d=kwargs)
def setmem(obj, memory):
"""Process marks set by mark_for_disk_cache on a fresh instance
Meant to be called from ``__init__`` as ``setmem(self, memory)``.
"""
if memory is not None:
for k in dir(obj):
meth = getattr(obj, k)
if hasattr(meth, "disk_cache") and meth.disk_cache:
# args = copy.deepcopy(meth.disk_cache_args)
# if "process" in args:
# args["process"] = {k:(v if callable(v) else getattr(obj, v))
# for (k, v) in args["process"].items()}
setattr(obj, k, memory.cache(meth, **meth.disk_cache_args))
def find_next_time_instant(dt1, month=None, day=None, hour=None,
minute=None, second=None):
"""First following time-instant given fields.
Given a datetime object and fields
(year/month/day/hour/minute/second), find the first instant /after/
the given datetime meeting those fields. For example, datetime(2009,
2, 28, 22, 0, 0), with hour=1, minute=15, will yield datetime(2009, 3,
1, 1, 15).
WARNING: UNTESTED!
:param datetime dt1: Starting time
:param int month:
:param int day:
:param int hour:
:param int minute:
:param int second:
"""
dt2 = datetime.datetime(dt1.year,
month or dt1.month,
day or dt1.day,
hour or dt1.hour,
minute or dt1.minute,
second or dt1.second)
while dt2 < dt1:
if dt2.month < dt1.month:
dt2 = dt2.replace(year=dt2.year+1)
continue
if dt2.day < dt1.day:
# if this makes month from 12 to 1, first if-block will
# correct
dt2 = dt2.replace(month=(mod(dt2.month+1, 12) or 1))
continue
if dt2.hour < dt1.hour:
dt2 += datetime.timedelta(days=1)
continue
if dt2.minute < dt1.minute:
dt2 += datetime.timedelta(hours=1)
continue
if dt2.second < dt1.second:
dt2 += datetime.timedelta(minutes=1)
continue
return dt2
# Next part from http://stackoverflow.com/a/9558001/974555
operators = {ast.Add: operator.add, ast.Sub: operator.sub, ast.Mult: operator.mul,
ast.Div: operator.truediv, ast.Pow: operator.pow, ast.BitXor: operator.xor,
ast.USub: operator.neg}
def safe_eval(expr):
"""Safely evaluate string that may contain basic arithmetic
"""
return _safe_eval_node(ast.parse(expr, mode="eval").body)
def _safe_eval_node(node):
if isinstance(node, ast.Num): # <number>
return node.n
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return operators[type(node.op)](_safe_eval_node(node.left), _safe_eval_node(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return operators[type(node.op)](_safe_eval_node(node.operand))
else:
raise TypeError(node)
def get_verbose_stack_description(first=2, last=-1, include_source=True,
include_locals=True, include_globals=False):
f = io.StringIO()
f.write("".join(traceback.format_stack()))
for fminfo in inspect.stack()[first:last]:
frame = fminfo.frame
try:
f.write("-" * 60 + "\n")
if include_source:
try:
f.write(inspect.getsource(frame) + "\n")
except OSError:
f.write(str(inspect.getframeinfo(frame)) +
"\n(no source code)\n")
if include_locals:
f.write(pprint.pformat(frame.f_locals) + "\n")
if include_globals:
f.write(pprint.pformat(frame.f_globals) + "\n")
finally:
try:
frame.clear()
except RuntimeError:
pass
return f.getvalue()
| [
"pprint.pformat",
"numpy.isnan",
"progressbar.Percentage",
"inspect.getframeinfo",
"progressbar.Bar",
"inspect.signature",
"progressbar.AbsoluteETA",
"traceback.format_stack",
"inspect.getsource",
"ast.parse",
"functools.partial",
"io.StringIO",
"shelve.open",
"numpy.ma.concatenate",
"fu... | [((396, 426), 'progressbar.Bar', 'progressbar.Bar', (['"""="""', '"""["""', '"""]"""'], {}), "('=', '[', ']')\n", (411, 426), False, 'import progressbar\n'), ((449, 473), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (471, 473), False, 'import progressbar\n'), ((497, 522), 'progressbar.AdaptiveETA', 'progressbar.AdaptiveETA', ([], {}), '()\n', (520, 522), False, 'import progressbar\n'), ((548, 573), 'progressbar.AbsoluteETA', 'progressbar.AbsoluteETA', ([], {}), '()\n', (571, 573), False, 'import progressbar\n'), ((3562, 3583), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (3577, 3583), False, 'import functools\n'), ((8050, 8083), 'functools.partial', 'functools.partial', (['mark'], {'d': 'kwargs'}), '(mark, d=kwargs)\n', (8067, 8083), False, 'import functools\n'), ((11227, 11240), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (11238, 11240), False, 'import io\n'), ((3631, 3654), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (3648, 3654), False, 'import inspect\n'), ((4291, 4318), 'numpy.ma.concatenate', 'numpy.ma.concatenate', (['*args'], {}), '(*args)\n', (4311, 4318), False, 'import numpy\n'), ((4344, 4368), 'numpy.concatenate', 'numpy.concatenate', (['*args'], {}), '(*args)\n', (4361, 4368), False, 'import numpy\n'), ((4682, 4727), 'shelve.open', 'shelve.open', (['path'], {'protocol': '(4)', 'writeback': '(True)'}), '(path, protocol=4, writeback=True)\n', (4693, 4727), False, 'import shelve\n'), ((5345, 5393), 'functools.update_wrapper', 'functools.update_wrapper', (['wrapper', 'user_function'], {}), '(wrapper, user_function)\n', (5369, 5393), False, 'import functools\n'), ((6967, 6990), 'numpy.concatenate', 'numpy.concatenate', (['seqs'], {}), '(seqs)\n', (6984, 6990), False, 'import numpy\n'), ((11306, 11321), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (11319, 11321), False, 'import inspect\n'), ((5107, 5142), 'logging.debug', 'logging.debug', (['"""No result in cache"""'], {}), "('No result in cache')\n", (5120, 5142), False, 'import logging\n'), ((5205, 5245), 'logging.debug', 'logging.debug', (['"""Storing result in cache"""'], {}), "('Storing result in cache')\n", (5218, 5245), False, 'import logging\n'), ((7633, 7673), 'numpy.concatenate', 'numpy.concatenate', (['[s[nm] for s in seqs]'], {}), '([s[nm] for s in seqs])\n', (7650, 7673), False, 'import numpy\n'), ((10588, 10616), 'ast.parse', 'ast.parse', (['expr'], {'mode': '"""eval"""'}), "(expr, mode='eval')\n", (10597, 10616), False, 'import ast\n'), ((11261, 11285), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (11283, 11285), False, 'import traceback\n'), ((7817, 7831), 'numpy.isnan', 'numpy.isnan', (['A'], {}), '(A)\n', (7828, 7831), False, 'import numpy\n'), ((7834, 7848), 'numpy.isnan', 'numpy.isnan', (['B'], {}), '(B)\n', (7845, 7848), False, 'import numpy\n'), ((11727, 11757), 'pprint.pformat', 'pprint.pformat', (['frame.f_locals'], {}), '(frame.f_locals)\n', (11741, 11757), False, 'import pprint\n'), ((11822, 11853), 'pprint.pformat', 'pprint.pformat', (['frame.f_globals'], {}), '(frame.f_globals)\n', (11836, 11853), False, 'import pprint\n'), ((11494, 11518), 'inspect.getsource', 'inspect.getsource', (['frame'], {}), '(frame)\n', (11511, 11518), False, 'import inspect\n'), ((11591, 11618), 'inspect.getframeinfo', 'inspect.getframeinfo', (['frame'], {}), '(frame)\n', (11611, 11618), False, 'import inspect\n')] |
import numpy as np
from numpy.linalg import inv
from bokeh.models import ColumnDataSource
class Structure:
def __init__(self, masses, massSupports, trusses, trussLength, base):
masslist = list()
for i in range(len(masses)):
masslist.append( ColumnDataSource(data=masses[i]) )
self.masses = masslist
massSupportlist = list()
for i in range(len(massSupports)):
massSupportlist.append( ColumnDataSource(data=massSupports[i]) )
self.massSupports = massSupportlist
trusslist = list()
for i in range(len(trusses)):
trusslist.append( ColumnDataSource(data=trusses[i]) )
self.trusses = trusslist
self.trussLength = trussLength
self.base = ColumnDataSource(base)
# System matrices
self.M = np.zeros((3,3))
self.C = np.zeros((3,3))
self.K = np.zeros((3,3))
# Mass locations
self.massLocations = np.zeros((3,2))
# Force indicator (forces indicate in the plotting domain the force
# carried by each of the truss members besides the location where to
# display the force values) ((Here default value are given))
self.forces = ColumnDataSource(
data=dict(
x=[0,0,0],
y=[0,0,0],
force=['Force = ','Force = ','Force = ']
)
)
self.massIndicators = ColumnDataSource(
data=dict(
x=[0,0,0],
y=[0,0,0],
mass=['','','']
)
)
self.stiffnessIndicators = ColumnDataSource(
data=dict(
x=[0,0,0],
y=[0,0,0],
stiffness=['','','']
)
)
self.maximumDisplacement = ColumnDataSource(data=dict(storey=["First","Seconds","Third"],maxDisp=[0.0,0.0,0.0]))
def update_system(self, displacement):
self.update_masses(displacement)
self.update_mass_indicator_locaiton()
self.update_massSupprts(displacement)
self.update_stiffness_indicator_locaiton()
self.update_truss_sources()
def update_masses(self, displacement):
self.masses[0].data = dict(x=[displacement[0]] , y=self.masses[0].data['y'])
self.masses[1].data = dict(x=[displacement[1]] , y=self.masses[1].data['y'])
self.masses[2].data = dict(x=[displacement[2]] , y=self.masses[2].data['y'])
def update_massSupprts(self, displacement):
self.massSupports[0].data['x'] = self.massSupports[0].data['x']*0 + [-self.trussLength/2+displacement[0], self.trussLength/2+displacement[0]]
self.massSupports[1].data['x'] = self.massSupports[1].data['x']*0 + [-self.trussLength/2+displacement[1], self.trussLength/2+displacement[1]]
self.massSupports[2].data['x'] = self.massSupports[2].data['x']*0 + [-self.trussLength/2+displacement[2], self.trussLength/2+displacement[2]]
def update_truss_sources(self):
noNodes = 10
# truss1
x1 = - self.trussLength/2
x2 = self.masses[0].data['x'][0] - self.trussLength/2
y1 = 0.0
y2 = self.masses[0].data['y'][0]
ys = linIntepolate(y1,y2,y1,y2,noNodes,self.trussLength)
xs = cubicInterpolate(x1,x2,y1,y2,noNodes,self.trussLength)
self.trusses[0].data = dict( x=xs, y=ys )
# truss2
x1 = self.trussLength/2
x2 = self.masses[0].data['x'][0] + self.trussLength/2
y1 = 0.0
y2 = self.masses[0].data['y'][0]
xs = cubicInterpolate(x1,x2,y1,y2,noNodes,self.trussLength)
ys = linIntepolate(y1,y2,y1,y2,noNodes,self.trussLength)
self.trusses[1].data = dict( x=xs, y=ys )
# truss3
x1 = self.masses[0].data['x'][0] - self.trussLength/2
x2 = self.masses[1].data['x'][0] - self.trussLength/2
y1 = self.masses[0].data['y'][0]
y2 = self.masses[1].data['y'][0]
xs = cubicInterpolate(x1,x2,y1,y2,noNodes,self.trussLength)
ys = linIntepolate(y1,y2,y1,y2,noNodes,self.trussLength)
self.trusses[2].data =dict( x=xs, y=ys )
# truss4
x1 = self.masses[0].data['x'][0] + self.trussLength/2
x2 = self.masses[1].data['x'][0] + self.trussLength/2
y1 = self.masses[0].data['y'][0]
y2 = self.masses[1].data['y'][0]
xs = cubicInterpolate(x1,x2,y1,y2,noNodes,self.trussLength)
ys = linIntepolate(y1,y2,y1,y2,noNodes,self.trussLength)
self.trusses[3].data =dict( x=xs, y=ys )
# truss5
x1 = self.masses[1].data['x'][0] - self.trussLength/2
x2 = self.masses[2].data['x'][0] - self.trussLength/2
y1 = self.masses[1].data['y'][0]
y2 = self.masses[2].data['y'][0]
xs = cubicInterpolate(x1,x2,y1,y2,noNodes,self.trussLength)
ys = linIntepolate(y1,y2,y1,y2,noNodes,self.trussLength)
self.trusses[4].data =dict( x=xs, y=ys )
# truss6
x1 = self.masses[1].data['x'][0] + self.trussLength/2
x2 = self.masses[2].data['x'][0] + self.trussLength/2
y1 = self.masses[1].data['y'][0]
y2 = self.masses[2].data['y'][0]
xs = cubicInterpolate(x1,x2,y1,y2,noNodes,self.trussLength)
ys = linIntepolate(y1,y2,y1,y2,noNodes,self.trussLength)
self.trusses[5].data =dict( x=xs, y=ys )
def update_force_indicator_location(self):
# first force indicator
x1 = (self.trusses[1].data['x'][0] + self.trusses[1].data['x'][1]) / 2 + 2.5 # where 2.5 is an offset value
y1 = (self.trusses[1].data['y'][1] + self.trusses[1].data['y'][0]) / 2
# second force indicator
x2 = (self.trusses[3].data['x'][0] + self.trusses[3].data['x'][1]) / 2 + 2.5
y2 = (self.trusses[3].data['y'][1] + self.trusses[3].data['y'][0]) / 2
# third force indicator
x3 = (self.trusses[5].data['x'][0] + self.trusses[5].data['x'][1]) / 2 + 2.5
y3 = (self.trusses[5].data['y'][1] + self.trusses[5].data['y'][0]) / 2
# update the source fle
self.forces.data = dict(x=[x1,x2,x3],y=[y1,y2,y3],force=self.forces.data['force'])
def update_mass_indicator_locaiton(self):
updateLocation = list()
for i in self.masses:
updateLocation.append( i.data['y'][0] + 0.5 )
self.massIndicators.data = dict(
x=[self.masses[0].data['x'][0], self.masses[1].data['x'][0], self.masses[2].data['x'][0]],
y=updateLocation,
mass=self.massIndicators.data['mass']
)
# Update the value of the maximum displacement of the structure in the table
self.update_maximum_displacement()
def update_stiffness_indicator_locaiton(self):
updateLocationX = list()
updateLocationY = list()
counter = 0
for i in self.massSupports:
updateLocationY.append( (i.data['y'][0] + self.trussLength*counter) / 2 )
updateLocationX.append( (i.data['x'][1] + 1.0) )
counter += 1
self.stiffnessIndicators.data = dict(
x=updateLocationX,
y=updateLocationY,
stiffness=self.stiffnessIndicators.data['stiffness']
)
def update_maximum_displacement(self):
self.maximumDisplacement.data = dict(
storey=["First","Seconds","Third"],
maxDisp=[
round(self.masses[0].data['x'][0],3),
round(self.masses[1].data['x'][0],3),
round(self.masses[2].data['x'][0],3)
]
)
def cubic_N1 (xi):
return 0.25 * (1-xi)*(1-xi) * (2+xi)
def cubic_N2 (xi):
return 0.25 * (1+xi)*(1+xi) * (2-xi)
def cubicInterpolate(y1, y2, x1, x2, noNodes,length):
nodes = np.ones(noNodes)
i = 0.0
while i<noNodes:
x = i*2.0/(float(noNodes)-1.0) - 1.0
nodes[int(i)] = cubic_N1(x)*y1 + cubic_N2(x)*y2
i += 1.0
return nodes
def linear_N1 (y,a,b):
return( (y-b)/(a-b) )
def linear_N2 (y,a,b):
return( (y-a)/(b-a) )
def linIntepolate(y1, y2, x1, x2, noNodes, length):
nodes = np.ones(noNodes)
i = 0.0
while i<noNodes:
x = i/(noNodes-1) * length + x1
nodes[int(i)] = linear_N1(x,x1,x2)*y1 + linear_N2(x,x1,x2)*y2
i += 1
return nodes
def solve_time_domain(structure, seismicInput):
dt = seismicInput.data['time'][1] - seismicInput.data['time'][0]
N = len(seismicInput.data['amplitude'])
M = structure.M
C = structure.C
K = structure.K
#I = np.array([[1,0,0],[0,1,0],[0,0,1]])
F = np.zeros((3,N))
F[0,:] = seismicInput.data['amplitude']
x0 = np.array([0.0,0.0,0.0])
v0 = np.array([0.0,0.0,0.0])
###########################################################################
######### Generalized-alpha (high order time integration method) ##########
###### Credit to inplementation of <NAME> from Statik Lehrstuhl ######
###########################################################################
y = np.zeros((3,len( F[0,:] ))) # 3 refers to the number of dofs (3 storeys)
y[:,0] = x0
a0 = np.dot(inv(M),( np.dot(-M,F[:,0]) - np.dot(C,v0) - np.dot(K,x0) ))
u0 = np.array([0.0,0.0,0.0])
v0 = np.array([0.0,0.0,0.0])
a0 = np.array([0.0,0.0,0.0])
f0 = F[:,0]
u1 = u0
v1 = v0
a1 = a0
f1 = f0
pInf = 0.15
alphaM = (2.0 * pInf - 1.0) / (pInf + 1.0)
alphaF = pInf / (pInf + 1.0)
beta = 0.25 * (1 - alphaM + alphaF)**2
gamma = 0.5 - alphaM + alphaF
# coefficients for LHS
a1h = (1.0 - alphaM) / (beta * dt**2)
a2h = (1.0 - alphaF) * gamma / (beta * dt)
a3h = 1.0 - alphaF
# coefficients for mass
a1m = a1h
a2m = a1h * dt
a3m = (1.0 - alphaM - 2.0 * beta) / (2.0 * beta)
#coefficients for damping
a1b = (1.0 - alphaF) * gamma / (beta * dt)
a2b = (1.0 - alphaF) * gamma / beta - 1.0
a3b = (1.0 - alphaF) * (0.5 * gamma / beta - 1.0) * dt
# coefficient for stiffness
a1k = -1.0 * alphaF
# coefficients for velocity update
a1v = gamma / (beta * dt)
a2v = 1.0 - gamma / beta
a3v = (1.0 - gamma / (2 * beta)) * dt
# coefficients for acceleration update
a1a = a1v / (dt * gamma)
a2a = -1.0 / (beta * dt)
a3a = 1.0 - 1.0 / (2.0 * beta)
#y0 = x0 - dt*v0 + (dt*dt/2)*a0
#y[:,1] = y0
#A = M + dt*gamma2*C + dt*dt*beta2*K
#invA = inv(A)
for i in range(1,len(F[0,:])):
#A = (M/(dt*dt) + C/(2*dt))
#tempVec = y[:,i-1] + 0.5*(-np.dot(M,F[:,i-1])-np.dot(C,y_dot[:,i-1])-np.dot(K,y[:,i-1]))*dt
#y[:,i] = y[:,i-1] + tempVec*dt
#y_dot[:,i] = np.dot(inv(I+C) , tempVec + 0.5*(-np.dot(M,F[:,i])-np.dot(K,y[:,i])))
#B = np.dot( 2*M/(dt*dt) - K, y[:,i-1]) + np.dot((C/(2*dt) - M/(dt*dt)) , y[:,i-2] + np.dot(-M,F[:,i-1]))
#print('np.dot(-M,F[:,i]) = ',np.dot(-M,F[:,i]))
Ff = (1.0 - alphaF) * np.dot(-M,F[:,i]) + alphaF * f0
#print('F = ',F)
LHS = a1h * M + a2h * C + a3h * K
RHS = np.dot(M,(a1m * u0 + a2m * v0 + a3m * a0))
RHS += np.dot(C,(a1b * u0 + a2b * v0 + a3b * a0))
RHS += np.dot(a1k * K, u0) + Ff
# update self.f1
f1 = np.dot(-M,F[:,i])
# updates self.u1,v1,a1
u1 = np.linalg.solve(LHS, RHS)
y[:,i] = u1
v1 = a1v * (u1 - u0) + a2v * v0 + a3v * a0
a1 = a1a * (u1 - u0) + a2a * v0 + a3a * a0
u0 = u1
v0 = v1
a0 = a1
# update the force
f0 = f1
'''
###########################################################################
################## Low-order time integration method ######################
###########################################################################
y = np.zeros((3,len( F[0,:] )))
y[:,0] = x0
a0 = np.dot(inv(M),( F[:,0] - np.dot(C,v0) - np.dot(K,x0) ))
y0 = x0 - dt*v0 + (dt*dt/2)*a0
y[:,1] = y0
for i in range(2,len(F[0,:])):
A = M/(dt**2) + C/dt + K
B = -np.dot(M,F[:,i]) + np.dot(M/(dt**2) , 2*y[:,i-1]-y[:,i-2]) + np.dot(C/dt,y[:,i-1])
yNew = np.dot(inv(A) , B)
y[:,i] = yNew
'''
return y
def construct_truss_sources(massOne, massTwo, massThree, length):
# The convention used here is that the first entry of both the x and y vectors
# represent the lower node and the second represents the upper node
trussOne = dict(
x=[massOne['x'][0] - length/2, massOne['x'][0] - length/2],
y=[massOne['y'][0] - length , massOne['y'][0] ]
)
trussTwo = dict(
x=[massOne['x'][0] + length/2, massOne['x'][0] + length/2],
y=[massOne['y'][0] - length , massOne['y'][0] ]
)
trussThree = dict(
x=[massTwo['x'][0] - length/2, massTwo['x'][0] - length/2],
y=[massOne['y'][0] , massTwo['y'][0] ]
)
trussFour = dict(
x=[massTwo['x'][0] + length/2, massTwo['x'][0] + length/2],
y=[massOne['y'][0] , massTwo['y'][0] ]
)
trussFive = dict(
x=[massThree['x'][0] - length/2, massThree['x'][0] - length/2],
y=[massTwo['y'][0] , massThree['y'][0] ]
)
trussSix = dict(
x=[massThree['x'][0] + length/2, massThree['x'][0] + length/2],
y=[massTwo['y'][0] , massThree['y'][0] ]
)
trussSources = list()
trussSources.append(trussOne)
trussSources.append(trussTwo)
trussSources.append(trussThree)
trussSources.append(trussFour)
trussSources.append(trussFive)
trussSources.append(trussSix)
return trussSources
def construct_masses_and_supports(length):
masses = list()
massSupports = list()
massOne = dict(x=[0.0],y=[1*length])
massTwo = dict(x=[0.0],y=[2*length])
massThree = dict(x=[0.0],y=[3*length])
masses.append(massOne)
masses.append(massTwo)
masses.append(massThree)
massOneSupport = dict(
x=[massOne['x'][0] - length/2, massOne['x'][0] + length/2],
y=[massOne['y'][0] , massOne['y'][0] ]
)
massTwoSupport = dict(
x=[massTwo['x'][0] - length/2, massTwo['x'][0] + length/2],
y=[massTwo['y'][0] , massTwo['y'][0] ]
)
massThreeSupport = dict(
x=[massThree['x'][0] - length/2, massThree['x'][0] + length/2],
y=[massThree['y'][0] , massThree['y'][0] ]
)
massSupports.append(massOneSupport)
massSupports.append(massTwoSupport)
massSupports.append(massThreeSupport)
return masses, massSupports
def construct_system(structure, mass, massRatio, bendingStiffness, stiffnessRatio, trussLength):
structure.massIndicators.data = dict(
x=structure.massIndicators.data['x'],
y=structure.massIndicators.data['y'],
mass=[str(massRatio[0])+'m',str(massRatio[1])+'m',str(massRatio[2])+'m']
)
structure.stiffnessIndicators.data = dict(
x=structure.stiffnessIndicators.data['x'],
y=structure.stiffnessIndicators.data['y'],
stiffness=[str(stiffnessRatio[0])+'EI',str(stiffnessRatio[1])+'EI',str(stiffnessRatio[2])+'EI']
)
structure.M = np.array([[massRatio[0], 0 , 0 ],
[ 0 ,massRatio[1], 0 ],
[ 0 , 0 ,massRatio[2]]]) * mass
structure.K = np.array([
[stiffnessRatio[0]+stiffnessRatio[1], -stiffnessRatio[1] , 0 ],
[ -stiffnessRatio[1] ,stiffnessRatio[1]+stiffnessRatio[2], -stiffnessRatio[2]],
[ 0 , -stiffnessRatio[2] , stiffnessRatio[2]]
]) * 12 * bendingStiffness / trussLength**3
'''
Rayleigh damping coefficients were chosen based on the following formula:
alpha = 2*w1*w2/(w2**2-w1**2) * (w2*xsi1 - w1*xsi2)
beta = 2/(w2**2-w1**2) * (w2*xsi2 - w1*xsi1)
**the equation brought from paper "EVALUATION OF ADDED MASS MODELING
APPROACHES FOR THE DESIGN OF MEMBRANE STRUCTURES BY FULLY COUPLED FSI
SIMULATIONS"
w1 --- angular frequency of the first mode
w2 --- angular frequency of the second mode
xsi1 --- damping ratio of first mode
xsi2 --- damping ratio of second mode
for the given structure, w1 and w2 equal 12.498 rad/sec and 26.722 rad/sec
respectively. And since we need the damping ratio of the structure to be
5 %, hence xsi1 and xsi2 are assigned with 0.05. As a result, alpha and
beta equal to 0.8515 and 0.0026 respectively.
'''
structure.C = 0.8515*structure.M + 0.0026*structure.K
def plot( plot_name, subject, radius, color ):
plot_name.line( x='x', y='y', source=subject.massSupports[0], color=color, line_width=5)
plot_name.line( x='x', y='y', source=subject.massSupports[1], color=color, line_width=5)
plot_name.line( x='x', y='y', source=subject.massSupports[2], color=color, line_width=5)
plot_name.circle( x='x',y='y',radius=radius,color=color,source=subject.masses[0] )
plot_name.circle( x='x',y='y',radius=radius,color=color,source=subject.masses[1] )
plot_name.circle( x='x',y='y',radius=radius,color=color,source=subject.masses[2] )
plot_name.line( x='x', y='y', color=color, source=subject.trusses[0], line_width=2)
plot_name.line( x='x', y='y', color=color, source=subject.trusses[1], line_width=2)
plot_name.line( x='x', y='y', color=color, source=subject.trusses[2], line_width=2)
plot_name.line( x='x', y='y', color=color, source=subject.trusses[3], line_width=2)
plot_name.line( x='x', y='y', color=color, source=subject.trusses[4], line_width=2)
plot_name.line( x='x', y='y', color=color, source=subject.trusses[5], line_width=2)
plot_name.line( x='x', y='y', source=subject.base, color='#000000', line_width=5 )
def read_seismic_input(file, scale):
amplitude = list()
time = list()
wordCounter = 0
lineCounter = 0
npts = 0
with open( file,'r' ) as f:
for line in f:
#counter = 0
for word in line.split():
if lineCounter == 3 and wordCounter == 2:
npts = int(word)
if lineCounter == 3 and wordCounter == 6:
dt = float(word)
if lineCounter >= 4:
amplitude.append(float(word)*9.81*scale)
wordCounter += 1
wordCounter = 0
lineCounter += 1
# Create time list
for i in range(0,npts):
time.append(i*dt)
return ColumnDataSource(data=dict(amplitude=np.array(amplitude),time=np.array(time)))
def read_ERS_data(file):
acceleration = list()
period = list()
wordCounter = 0
lineCounter = 0
with open( file,'r' ) as f:
for line in f:
for word in line.split(','):
if lineCounter > 45 and lineCounter < 157: #input data situated between line 45 and 157 in the data file
if wordCounter == 0:
#print(word)
period.append(float(word))
elif wordCounter == 5:
acceleration.append(float(word)*9.81) # multiplication by 9.81 to convert it to m/sec/sec
wordCounter += 1
#wordCounter += 1
wordCounter = 0
lineCounter += 1
return ColumnDataSource(data=dict(period=period,acceleration=acceleration))
def read_KOKE_ERS_data(file):
acceleration = list()
period = list()
wordCounter = 0
lineCounter = 0
with open( file,'r' ) as f:
for line in f:
for word in line.split(','):
if lineCounter > 66 and lineCounter < 178: #input data situated between line 67 and 179 in the data file
if wordCounter == 0:
#print(word)
period.append(float(word))
elif wordCounter == 10:
acceleration.append(float(word)*9.81) # multiplication by 9.81 to convert it to m/sec/sec
wordCounter += 1
#wordCounter += 1
wordCounter = 0
lineCounter += 1
return ColumnDataSource(data=dict(period=period,acceleration=acceleration))
def Read_ERS_info(file):
data = list()
wordCounter = -1
lineCounter = 0
with open( file, 'r') as f:
for line in f:
if lineCounter == 34:
for word in line.split(','):
if wordCounter >= 9 and wordCounter <= 15:
data.append(word)
wordCounter += 1
lineCounter += 1
return data
def Read_Kobe_ERS_info(file):
data = list()
wordCounter = -1
lineCounter = 0
with open( file, 'r') as f:
for line in f:
if lineCounter == 40:
for word in line.split(','):
if wordCounter >= 9 and wordCounter <= 15:
data.append(word)
wordCounter += 1
lineCounter += 1
return data | [
"bokeh.models.ColumnDataSource",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.linalg.inv",
"numpy.dot",
"numpy.linalg.solve"
] | [((9365, 9381), 'numpy.ones', 'np.ones', (['noNodes'], {}), '(noNodes)\n', (9372, 9381), True, 'import numpy as np\n'), ((9728, 9744), 'numpy.ones', 'np.ones', (['noNodes'], {}), '(noNodes)\n', (9735, 9744), True, 'import numpy as np\n'), ((10207, 10223), 'numpy.zeros', 'np.zeros', (['(3, N)'], {}), '((3, N))\n', (10215, 10223), True, 'import numpy as np\n'), ((10276, 10301), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10284, 10301), True, 'import numpy as np\n'), ((10309, 10334), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10317, 10334), True, 'import numpy as np\n'), ((10845, 10870), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10853, 10870), True, 'import numpy as np\n'), ((10878, 10903), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10886, 10903), True, 'import numpy as np\n'), ((10911, 10936), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10919, 10936), True, 'import numpy as np\n'), ((821, 843), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['base'], {}), '(base)\n', (837, 843), False, 'from bokeh.models import ColumnDataSource\n'), ((896, 912), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (904, 912), True, 'import numpy as np\n'), ((929, 945), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (937, 945), True, 'import numpy as np\n'), ((962, 978), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (970, 978), True, 'import numpy as np\n'), ((1041, 1057), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (1049, 1057), True, 'import numpy as np\n'), ((10771, 10777), 'numpy.linalg.inv', 'inv', (['M'], {}), '(M)\n', (10774, 10777), False, 'from numpy.linalg import inv\n'), ((12726, 12767), 'numpy.dot', 'np.dot', (['M', '(a1m * u0 + a2m * v0 + a3m * a0)'], {}), '(M, a1m * u0 + a2m * v0 + a3m * a0)\n', (12732, 12767), True, 'import numpy as np\n'), ((12784, 12825), 'numpy.dot', 'np.dot', (['C', '(a1b * u0 + a2b * v0 + a3b * a0)'], {}), '(C, a1b * u0 + a2b * v0 + a3b * a0)\n', (12790, 12825), True, 'import numpy as np\n'), ((12907, 12926), 'numpy.dot', 'np.dot', (['(-M)', 'F[:, i]'], {}), '(-M, F[:, i])\n', (12913, 12926), True, 'import numpy as np\n'), ((12979, 13004), 'numpy.linalg.solve', 'np.linalg.solve', (['LHS', 'RHS'], {}), '(LHS, RHS)\n', (12994, 13004), True, 'import numpy as np\n'), ((17816, 17892), 'numpy.array', 'np.array', (['[[massRatio[0], 0, 0], [0, massRatio[1], 0], [0, 0, massRatio[2]]]'], {}), '([[massRatio[0], 0, 0], [0, massRatio[1], 0], [0, 0, massRatio[2]]])\n', (17824, 17892), True, 'import numpy as np\n'), ((10815, 10828), 'numpy.dot', 'np.dot', (['K', 'x0'], {}), '(K, x0)\n', (10821, 10828), True, 'import numpy as np\n'), ((12843, 12862), 'numpy.dot', 'np.dot', (['(a1k * K)', 'u0'], {}), '(a1k * K, u0)\n', (12849, 12862), True, 'import numpy as np\n'), ((288, 320), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'masses[i]'}), '(data=masses[i])\n', (304, 320), False, 'from bokeh.models import ColumnDataSource\n'), ((481, 519), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'massSupports[i]'}), '(data=massSupports[i])\n', (497, 519), False, 'from bokeh.models import ColumnDataSource\n'), ((670, 703), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'trusses[i]'}), '(data=trusses[i])\n', (686, 703), False, 'from bokeh.models import ColumnDataSource\n'), ((10780, 10799), 'numpy.dot', 'np.dot', (['(-M)', 'F[:, 0]'], {}), '(-M, F[:, 0])\n', (10786, 10799), True, 'import numpy as np\n'), ((10800, 10813), 'numpy.dot', 'np.dot', (['C', 'v0'], {}), '(C, v0)\n', (10806, 10813), True, 'import numpy as np\n'), ((12609, 12628), 'numpy.dot', 'np.dot', (['(-M)', 'F[:, i]'], {}), '(-M, F[:, i])\n', (12615, 12628), True, 'import numpy as np\n'), ((18068, 18277), 'numpy.array', 'np.array', (['[[stiffnessRatio[0] + stiffnessRatio[1], -stiffnessRatio[1], 0], [-\n stiffnessRatio[1], stiffnessRatio[1] + stiffnessRatio[2], -\n stiffnessRatio[2]], [0, -stiffnessRatio[2], stiffnessRatio[2]]]'], {}), '([[stiffnessRatio[0] + stiffnessRatio[1], -stiffnessRatio[1], 0], [\n -stiffnessRatio[1], stiffnessRatio[1] + stiffnessRatio[2], -\n stiffnessRatio[2]], [0, -stiffnessRatio[2], stiffnessRatio[2]]])\n', (18076, 18277), True, 'import numpy as np\n'), ((21544, 21563), 'numpy.array', 'np.array', (['amplitude'], {}), '(amplitude)\n', (21552, 21563), True, 'import numpy as np\n'), ((21569, 21583), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (21577, 21583), True, 'import numpy as np\n')] |
import os
import re
import pandas as pd
import numpy as np
if __name__ == '__main__':
wordListPath = os.path.dirname(__file__)+'\..\Data\Treated\list_of_words.txt'
datasetPath = os.path.dirname(__file__)+'\..\Data\Treated\mxm_dataset_full.txt'
outputPath = os.path.dirname(__file__)+'\..\Data\Treated\dataset_dataframe.csv'
#numberOfRows = 237662 #preallocate space to speed up feeding process
numberOfRows = 100 #preallocate space to speed up feeding process
numberOfColumns = 100 #TEST REMOVE
existingColumnSize = 2 #non-bow column count
with open(wordListPath) as stream:
print('Open word list')
for line in stream: #only one line in LOW
words = re.split(',', line) [0:numberOfColumns]
print('Pre-allocate index')
dex=np.arange(0, numberOfRows)
print('Pre-allocate dataframe: ' + str(numberOfRows) + ' rows, ' + str(numberOfColumns) + ' columns')
df = pd.SparseDataFrame(index=dex, columns=['track_id','mxm_track_id', *words])
print(df)
stream.close()
index = 0
with open(datasetPath) as stream:
print('Open full dataset')
for line in stream:
words = re.split(',', line)
print(df.loc[index])
print([words[0], words[1], *([0] * numberOfColumns)])
s = pd.Series([words[0], words[1], *([0] * numberOfColumns)])
#df.loc[index] = [words[0], words[1], *([0] * numberOfColumns)]
#print(words[2:len(words)])
for word in words[2:len(words)]: #word is in format id:count, where id starts at 1, not 0
tup = re.split(':', word)
#print(tup)
columnIndex = int(tup[0])+existingColumnSize-1
s[columnIndex] = int(tup[1])
df[:,index] = s
print(df)
#print(df.shape)
index += 1
df.to_csv(outputPath, encoding='utf-8')
| [
"re.split",
"os.path.dirname",
"numpy.arange",
"pandas.Series",
"pandas.SparseDataFrame"
] | [((111, 136), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (126, 136), False, 'import os\n'), ((192, 217), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (207, 217), False, 'import os\n'), ((275, 300), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (290, 300), False, 'import os\n'), ((817, 843), 'numpy.arange', 'np.arange', (['(0)', 'numberOfRows'], {}), '(0, numberOfRows)\n', (826, 843), True, 'import numpy as np\n'), ((975, 1050), 'pandas.SparseDataFrame', 'pd.SparseDataFrame', ([], {'index': 'dex', 'columns': "['track_id', 'mxm_track_id', *words]"}), "(index=dex, columns=['track_id', 'mxm_track_id', *words])\n", (993, 1050), True, 'import pandas as pd\n'), ((1235, 1254), 're.split', 're.split', (['""","""', 'line'], {}), "(',', line)\n", (1243, 1254), False, 'import re\n'), ((1396, 1453), 'pandas.Series', 'pd.Series', (['[words[0], words[1], *([0] * numberOfColumns)]'], {}), '([words[0], words[1], *([0] * numberOfColumns)])\n', (1405, 1453), True, 'import pandas as pd\n'), ((721, 740), 're.split', 're.split', (['""","""', 'line'], {}), "(',', line)\n", (729, 740), False, 'import re\n'), ((1707, 1726), 're.split', 're.split', (['""":"""', 'word'], {}), "(':', word)\n", (1715, 1726), False, 'import re\n')] |
import torch
from parameters import *
from PIL import Image
import numpy as np
from typing import Union
__all__ = ['Normalize', 'ToTensor', 'Resize', 'AlphaKill',
'DictNormalize', 'Dict2Tensor',
'DictResize', 'DictAlphaKill']
class Normalize(object):
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.),
scale=255.0):
"""
Assumption
image = [y, x, c] shape, RGB
"""
self.mean = mean
self.std = std
self.scale = scale
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}):
image = input_dict[tag_image]
if isinstance(image, Image.Image):
image = np.array(image).astype(np.float32)
elif isinstance(image, np.ndarray):
image = image.astype(np.float32)
image /= self.scale
image -= self.mean
image /= self.std
return {tag_image : image}
class DictNormalize(Normalize):
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.),
scale=255.0, gray=False):
super(DictNormalize, self).__init__(mean=mean, std=std, scale=scale)
"""
Assumption
image = [y, x, c] shape, RGB
label = [y, x] shape, palette or grayscale
"""
self.gray_ = gray
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}):
image = super().__call__(input_dict)[tag_image]
label = input_dict[tag_label]
if isinstance(label, Image.Image):
label = np.array(label).astype(np.float32)
elif isinstance(label, np.ndarray):
label = label.astype(np.float32)
if self.gray_:
label /= self.scale
return {tag_image : image,
tag_label : label}
class ToTensor(object):
def __init__(self):
pass
"""
Assumption
image = [y, x, c] shape, RGB
"""
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}):
# Height x Width x Channels -> Channels x Height x Width
image = input_dict[tag_image]
if isinstance(image, Image.Image):
image = np.array(image).astype(np.float32)
elif isinstance(image, np.ndarray):
image = image.astype(np.float32)
image = image.transpose((2, 0, 1))
torch_image = torch.from_numpy(image).float()
return {tag_image: torch_image}
class Dict2Tensor(ToTensor):
def __init__(self, boundary_white=False, two_dim=False):
super(Dict2Tensor, self).__init__()
"""
Assumption
image = [y, x, c] shape, RGB
label = [y, x] shape, palette or grayscale
"""
self.boundary_white = boundary_white
self.two_dim = two_dim
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}):
torch_image = super().__call__(input_dict)[tag_image]
label = input_dict[tag_label]
if isinstance(label, Image.Image):
label = np.array(label).astype(np.float32)
elif isinstance(label, np.ndarray):
label = label.astype(np.float32)
# label 2D -> 3D
if self.two_dim:
label = np.expand_dims(label, axis=-1)
# Boundary White -> Black
if self.boundary_white:
label[label == 255] = 0
# transpose for torch.Tensor [H x W x C] -> [C x H x W]
label = label.transpose((2, 0, 1))
torch_label = torch.from_numpy(label).float()
return {tag_image : torch_image,
tag_label : torch_label}
class Resize(object):
def __init__(self, size:Union[tuple, list], image_mode=Image.BICUBIC):
# (Width, Height) -> (Height, Width)
# X , Y Y , X
self.size = tuple(reversed(size))
self.image_mode = image_mode
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}):
image = input_dict[tag_image]
if isinstance(image, Image.Image):
image = image.resize(self.size, self.image_mode)
elif isinstance(image, np.ndarray):
image = Image.fromarray(image)
image = image.resize(self.size, self.image_mode)
return {tag_image : image}
class DictResize(Resize):
def __init__(self, size:Union[tuple, list], image_mode=Image.BICUBIC, label_mode=Image.BILINEAR):
super(DictResize, self).__init__(size=size, image_mode=image_mode)
self.label_mode = label_mode
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}):
image = super().__call__(input_dict=input_dict)[tag_image]
label = input_dict[tag_label]
if isinstance(label, Image.Image):
pass
elif isinstance(label, np.ndarray):
label = Image.fromarray(label)
label = label.resize(self.size, self.label_mode)
return {tag_image : image,
tag_label : label}
class AlphaKill(object):
# kill alpha channel 4D -> 3D
def __init__(self):
pass
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}):
image = input_dict[tag_image]
if isinstance(image, Image.Image):
image = image.convert('RGB')
elif isinstance(image, np.ndarray):
image = Image.fromarray(image)
image = image.convert('RGB')
return {tag_image : image}
class DictAlphaKill(AlphaKill):
def __init__(self):
pass
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}):
image = super().__call__(input_dict=input_dict)[tag_image]
label = input_dict[tag_label]
if isinstance(label, np.ndarray):
label = Image.fromarray(label).convert('L')
return {tag_image : image,
tag_label : label}
| [
"PIL.Image.fromarray",
"numpy.array",
"numpy.expand_dims",
"torch.from_numpy"
] | [((3238, 3268), 'numpy.expand_dims', 'np.expand_dims', (['label'], {'axis': '(-1)'}), '(label, axis=-1)\n', (3252, 3268), True, 'import numpy as np\n'), ((2386, 2409), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2402, 2409), False, 'import torch\n'), ((3503, 3526), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (3519, 3526), False, 'import torch\n'), ((4167, 4189), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (4182, 4189), False, 'from PIL import Image\n'), ((4836, 4858), 'PIL.Image.fromarray', 'Image.fromarray', (['label'], {}), '(label)\n', (4851, 4858), False, 'from PIL import Image\n'), ((5350, 5372), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (5365, 5372), False, 'from PIL import Image\n'), ((703, 718), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (711, 718), True, 'import numpy as np\n'), ((1551, 1566), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1559, 1566), True, 'import numpy as np\n'), ((2197, 2212), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2205, 2212), True, 'import numpy as np\n'), ((3043, 3058), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (3051, 3058), True, 'import numpy as np\n'), ((5766, 5788), 'PIL.Image.fromarray', 'Image.fromarray', (['label'], {}), '(label)\n', (5781, 5788), False, 'from PIL import Image\n')] |
# Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
.. _model_selection:
Model Selection
===============
In :ref:`model_evaluation`, we saw how to check the performance of an
interpolator using cross-validation. We found that the default parameters for
:class:`verde.Spline` are not good for predicting our sample air temperature
data. Now, let's see how we can tune the :class:`~verde.Spline` to improve the
cross-validation performance.
Once again, we'll start by importing the required packages and loading our
sample data.
"""
import itertools
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
import pyproj
import verde as vd
data = vd.datasets.fetch_texas_wind()
# Use Mercator projection because Spline is a Cartesian gridder
projection = pyproj.Proj(proj="merc", lat_ts=data.latitude.mean())
proj_coords = projection(data.longitude.values, data.latitude.values)
region = vd.get_region((data.longitude, data.latitude))
# The desired grid spacing in degrees
spacing = 15 / 60
###############################################################################
# Before we begin tuning, let's reiterate what the results were with the
# default parameters.
spline_default = vd.Spline()
score_default = np.mean(
vd.cross_val_score(spline_default, proj_coords, data.air_temperature_c)
)
spline_default.fit(proj_coords, data.air_temperature_c)
print("R² with defaults:", score_default)
###############################################################################
# Tuning
# ------
#
# :class:`~verde.Spline` has many parameters that can be set to modify the
# final result. Mainly the ``damping`` regularization parameter and the
# ``mindist`` "fudge factor" which smooths the solution. Would changing the
# default values give us a better score?
#
# We can answer these questions by changing the values in our ``spline`` and
# re-evaluating the model score repeatedly for different values of these
# parameters. Let's test the following combinations:
dampings = [None, 1e-4, 1e-3, 1e-2]
mindists = [5e3, 10e3, 50e3, 100e3]
# Use itertools to create a list with all combinations of parameters to test
parameter_sets = [
dict(damping=combo[0], mindist=combo[1])
for combo in itertools.product(dampings, mindists)
]
print("Number of combinations:", len(parameter_sets))
print("Combinations:", parameter_sets)
###############################################################################
# Now we can loop over the combinations and collect the scores for each
# parameter set.
spline = vd.Spline()
scores = []
for params in parameter_sets:
spline.set_params(**params)
score = np.mean(vd.cross_val_score(spline, proj_coords, data.air_temperature_c))
scores.append(score)
print(scores)
###############################################################################
# The largest score will yield the best parameter combination.
best = np.argmax(scores)
print("Best score:", scores[best])
print("Score with defaults:", score_default)
print("Best parameters:", parameter_sets[best])
###############################################################################
# **That is a nice improvement over our previous score!**
#
# This type of tuning is important and should always be performed when using a
# new gridder or a new dataset. However, the above implementation requires a
# lot of coding. Fortunately, Verde provides convenience classes that perform
# the cross-validation and tuning automatically when fitting a dataset.
###############################################################################
# Cross-validated gridders
# ------------------------
#
# The :class:`verde.SplineCV` class provides a cross-validated version of
# :class:`verde.Spline`. It has almost the same interface but does all of the
# above automatically when fitting a dataset. The only difference is that you
# must provide a list of ``damping`` and ``mindist`` parameters to try instead
# of only a single value:
spline = vd.SplineCV(
dampings=dampings,
mindists=mindists,
)
###############################################################################
# Calling :meth:`~verde.SplineCV.fit` will run a grid search over all parameter
# combinations to find the one that maximizes the cross-validation score.
spline.fit(proj_coords, data.air_temperature_c)
###############################################################################
# The estimated best ``damping`` and ``mindist``, as well as the
# cross-validation scores, are stored in class attributes:
print("Highest score:", spline.scores_.max())
print("Best damping:", spline.damping_)
print("Best mindist:", spline.mindist_)
###############################################################################
# The cross-validated gridder can be used like any other gridder (including in
# :class:`verde.Chain` and :class:`verde.Vector`):
grid = spline.grid(
region=region,
spacing=spacing,
projection=projection,
dims=["latitude", "longitude"],
data_names="temperature",
)
print(grid)
###############################################################################
# Like :func:`verde.cross_val_score`, :class:`~verde.SplineCV` can also run the
# grid search in parallel using `Dask <https://dask.org/>`__ by specifying the
# ``delayed`` attribute:
spline = vd.SplineCV(dampings=dampings, mindists=mindists, delayed=True)
###############################################################################
# Unlike :func:`verde.cross_val_score`, calling :meth:`~verde.SplineCV.fit`
# does **not** result in :func:`dask.delayed` objects. The full grid search is
# executed and the optimal parameters are found immediately.
spline.fit(proj_coords, data.air_temperature_c)
print("Best damping:", spline.damping_)
print("Best mindist:", spline.mindist_)
###############################################################################
# The one caveat is the that the ``scores_`` attribute will be a list of
# :func:`dask.delayed` objects instead because the scores are only computed as
# intermediate values in the scheduled computations.
print("Delayed scores:", spline.scores_)
###############################################################################
# Calling :func:`dask.compute` on the scores will calculate their values but
# will unfortunately run the entire grid search again. So using
# ``delayed=True`` is not recommended if you need the scores of each parameter
# combination.
###############################################################################
# The importance of tuning
# ------------------------
#
# To see the difference that tuning has on the results, we can make a grid
# with the best configuration and see how it compares to the default result.
grid_default = spline_default.grid(
region=region,
spacing=spacing,
projection=projection,
dims=["latitude", "longitude"],
data_names="temperature",
)
###############################################################################
# Let's plot our grids side-by-side:
mask = vd.distance_mask(
(data.longitude, data.latitude),
maxdist=3 * spacing * 111e3,
coordinates=vd.grid_coordinates(region, spacing=spacing),
projection=projection,
)
grid = grid.where(mask)
grid_default = grid_default.where(mask)
plt.figure(figsize=(14, 8))
for i, title, grd in zip(range(2), ["Defaults", "Tuned"], [grid_default, grid]):
ax = plt.subplot(1, 2, i + 1, projection=ccrs.Mercator())
ax.set_title(title)
pc = grd.temperature.plot.pcolormesh(
ax=ax,
cmap="plasma",
transform=ccrs.PlateCarree(),
vmin=data.air_temperature_c.min(),
vmax=data.air_temperature_c.max(),
add_colorbar=False,
add_labels=False,
)
plt.colorbar(pc, orientation="horizontal", aspect=50, pad=0.05).set_label("C")
ax.plot(
data.longitude, data.latitude, ".k", markersize=1, transform=ccrs.PlateCarree()
)
vd.datasets.setup_texas_wind_map(ax)
plt.show()
###############################################################################
# Notice that, for sparse data like these, **smoother models tend to be better
# predictors**. This is a sign that you should probably not trust many of the
# short wavelength features that we get from the defaults.
| [
"verde.datasets.setup_texas_wind_map",
"verde.Spline",
"matplotlib.pyplot.show",
"cartopy.crs.Mercator",
"verde.datasets.fetch_texas_wind",
"numpy.argmax",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"verde.grid_coordinates",
"verde.SplineCV",
"verde.cross_val_score",
"itertools.... | [((849, 879), 'verde.datasets.fetch_texas_wind', 'vd.datasets.fetch_texas_wind', ([], {}), '()\n', (877, 879), True, 'import verde as vd\n'), ((1092, 1138), 'verde.get_region', 'vd.get_region', (['(data.longitude, data.latitude)'], {}), '((data.longitude, data.latitude))\n', (1105, 1138), True, 'import verde as vd\n'), ((1389, 1400), 'verde.Spline', 'vd.Spline', ([], {}), '()\n', (1398, 1400), True, 'import verde as vd\n'), ((2718, 2729), 'verde.Spline', 'vd.Spline', ([], {}), '()\n', (2727, 2729), True, 'import verde as vd\n'), ((3080, 3097), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3089, 3097), True, 'import numpy as np\n'), ((4156, 4205), 'verde.SplineCV', 'vd.SplineCV', ([], {'dampings': 'dampings', 'mindists': 'mindists'}), '(dampings=dampings, mindists=mindists)\n', (4167, 4205), True, 'import verde as vd\n'), ((5487, 5550), 'verde.SplineCV', 'vd.SplineCV', ([], {'dampings': 'dampings', 'mindists': 'mindists', 'delayed': '(True)'}), '(dampings=dampings, mindists=mindists, delayed=True)\n', (5498, 5550), True, 'import verde as vd\n'), ((7454, 7481), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 8)'}), '(figsize=(14, 8))\n', (7464, 7481), True, 'import matplotlib.pyplot as plt\n'), ((8144, 8154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8152, 8154), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1501), 'verde.cross_val_score', 'vd.cross_val_score', (['spline_default', 'proj_coords', 'data.air_temperature_c'], {}), '(spline_default, proj_coords, data.air_temperature_c)\n', (1448, 1501), True, 'import verde as vd\n'), ((8107, 8143), 'verde.datasets.setup_texas_wind_map', 'vd.datasets.setup_texas_wind_map', (['ax'], {}), '(ax)\n', (8139, 8143), True, 'import verde as vd\n'), ((2405, 2442), 'itertools.product', 'itertools.product', (['dampings', 'mindists'], {}), '(dampings, mindists)\n', (2422, 2442), False, 'import itertools\n'), ((2824, 2887), 'verde.cross_val_score', 'vd.cross_val_score', (['spline', 'proj_coords', 'data.air_temperature_c'], {}), '(spline, proj_coords, data.air_temperature_c)\n', (2842, 2887), True, 'import verde as vd\n'), ((7313, 7357), 'verde.grid_coordinates', 'vd.grid_coordinates', (['region'], {'spacing': 'spacing'}), '(region, spacing=spacing)\n', (7332, 7357), True, 'import verde as vd\n'), ((7608, 7623), 'cartopy.crs.Mercator', 'ccrs.Mercator', ([], {}), '()\n', (7621, 7623), True, 'import cartopy.crs as ccrs\n'), ((7747, 7765), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7763, 7765), True, 'import cartopy.crs as ccrs\n'), ((7917, 7980), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pc'], {'orientation': '"""horizontal"""', 'aspect': '(50)', 'pad': '(0.05)'}), "(pc, orientation='horizontal', aspect=50, pad=0.05)\n", (7929, 7980), True, 'import matplotlib.pyplot as plt\n'), ((8078, 8096), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8094, 8096), True, 'import cartopy.crs as ccrs\n')] |
from functools import lru_cache
from typing import Union, Tuple, Optional
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle as mpl_Circle
from skimage.measure._regionprops import _RegionProperties
from .geometry import Circle, Point, Rectangle
def bbox_center(region: _RegionProperties) -> Point:
"""Return the center of the bounding box of an scikit-image region.
Parameters
----------
region
A scikit-image region as calculated by skimage.measure.regionprops().
Returns
-------
point : :class:`~pylinac.core.geometry.Point`
"""
bbox = region.bbox
y = abs(bbox[0] - bbox[2]) / 2 + min(bbox[0], bbox[2])
x = abs(bbox[1] - bbox[3]) / 2 + min(bbox[1], bbox[3])
return Point(x, y)
class DiskROI(Circle):
"""An class representing a disk-shaped Region of Interest."""
def __init__(self, array: np.ndarray, angle: Union[float, int], roi_radius: Union[float, int],
dist_from_center: Union[float, int], phantom_center: Union[Tuple, Point]):
"""
Parameters
----------
array : ndarray
The 2D array representing the image the disk is on.
angle : int, float
The angle of the ROI in degrees from the phantom center.
roi_radius : int, float
The radius of the ROI from the center of the phantom.
dist_from_center : int, float
The distance of the ROI from the phantom center.
phantom_center : tuple
The location of the phantom center.
"""
center = self._get_shifted_center(angle, dist_from_center, phantom_center)
super().__init__(center_point=center, radius=roi_radius)
self._array = array
@staticmethod
def _get_shifted_center(angle: Union[float, int], dist_from_center: Union[float, int], phantom_center: Point):
"""The center of the ROI; corrects for phantom dislocation and roll."""
y_shift = np.sin(np.deg2rad(angle)) * dist_from_center
x_shift = np.cos(np.deg2rad(angle)) * dist_from_center
return Point(phantom_center.x + x_shift, phantom_center.y + y_shift)
@property
@lru_cache()
def pixel_value(self) -> np.ndarray:
"""The median pixel value of the ROI."""
masked_img = self.circle_mask()
return np.nanmedian(masked_img)
@property
def std(self) -> np.ndarray:
"""The standard deviation of the pixel values."""
masked_img = self.circle_mask()
return np.nanstd(masked_img)
@lru_cache()
def circle_mask(self) -> np.ndarray:
"""Return a mask of the image, only showing the circular ROI."""
# http://scikit-image.org/docs/dev/auto_examples/plot_camera_numpy.html
masked_array = np.copy(self._array).astype(np.float)
l_x, l_y = self._array.shape[0], self._array.shape[1]
X, Y = np.ogrid[:l_x, :l_y]
outer_disk_mask = (X - self.center.y) ** 2 + (Y - self.center.x) ** 2 > self.radius ** 2
masked_array[outer_disk_mask] = np.NaN
return masked_array
def plot2axes(self, axes=None, edgecolor: str='black', fill: bool=False):
"""Plot the Circle on the axes.
Parameters
----------
axes : matplotlib.axes.Axes
An MPL axes to plot to.
edgecolor : str
The color of the circle.
fill : bool
Whether to fill the circle with color or leave hollow.
"""
if axes is None:
fig, axes = plt.subplots()
axes.imshow(self._array)
axes.add_patch(mpl_Circle((self.center.x, self.center.y), edgecolor=edgecolor, radius=self.radius, fill=fill))
class LowContrastDiskROI(DiskROI):
"""A class for analyzing the low-contrast disks."""
contrast_threshold: Optional[float]
cnr_threshold: Optional[float]
background: Optional[float]
def __init__(self, array, angle, roi_radius, dist_from_center, phantom_center, contrast_threshold=None, background=None,
cnr_threshold=None):
"""
Parameters
----------
contrast_threshold : float, int
The threshold for considering a bubble to be "seen".
"""
super().__init__(array, angle, roi_radius, dist_from_center, phantom_center)
self.contrast_threshold = contrast_threshold
self.cnr_threshold = cnr_threshold
self.background = background
@property
def contrast_to_noise(self) -> float:
"""The contrast to noise ratio of the bubble: (Signal - Background)/Stdev."""
return abs(self.pixel_value - self.background) / self.std
@property
def contrast(self) -> float:
"""The contrast of the bubble compared to background: (ROI - backg) / (ROI + backg)."""
return abs((self.pixel_value - self.background) / (self.pixel_value + self.background))
@property
def cnr_constant(self) -> float:
"""The contrast-to-noise value times the bubble diameter."""
return self.contrast_to_noise * self.diameter
@property
def contrast_constant(self) -> float:
"""The contrast value times the bubble diameter."""
return self.contrast * self.diameter
@property
def passed(self) -> bool:
"""Whether the disk ROI contrast passed."""
return self.contrast > self.contrast_threshold
@property
def passed_contrast_constant(self) -> bool:
"""Boolean specifying if ROI pixel value was within tolerance of the nominal value."""
return self.contrast_constant > self.contrast_threshold
@property
def passed_cnr_constant(self) -> bool:
"""Boolean specifying if ROI pixel value was within tolerance of the nominal value."""
return self.cnr_constant > self.cnr_threshold
@property
def plot_color(self) -> str:
"""Return one of two colors depending on if ROI passed."""
return 'blue' if self.passed else 'red'
@property
def plot_color_constant(self) -> str:
"""Return one of two colors depending on if ROI passed."""
return 'blue' if self.passed_contrast_constant else 'red'
@property
def plot_color_cnr(self) -> str:
"""Return one of two colors depending on if ROI passed."""
return 'blue' if self.passed_cnr_constant else 'red'
class HighContrastDiskROI(DiskROI):
"""A class for analyzing the high-contrast disks."""
contrast_threshold: Optional[float]
def __init__(self, array, angle, roi_radius, dist_from_center, phantom_center, contrast_threshold):
"""
Parameters
----------
contrast_threshold : float, int
The threshold for considering a bubble to be "seen".
"""
super().__init__(array, angle, roi_radius, dist_from_center, phantom_center)
self.contrast_threshold = contrast_threshold
@property
def max(self) -> np.ndarray:
"""The max pixel value of the ROI."""
masked_img = self.circle_mask()
return np.nanmax(masked_img)
@property
def min(self) -> np.ndarray:
"""The min pixel value of the ROI."""
masked_img = self.circle_mask()
return np.nanmin(masked_img)
class RectangleROI(Rectangle):
"""Class that represents a rectangular ROI."""
def __init__(self, array, width, height, angle, dist_from_center, phantom_center):
y_shift = np.sin(np.deg2rad(angle)) * dist_from_center
x_shift = np.cos(np.deg2rad(angle)) * dist_from_center
center = Point(phantom_center.x + x_shift, phantom_center.y + y_shift)
super().__init__(width, height, center, as_int=True)
self._array = array
@property
def pixel_array(self) -> np.ndarray:
"""The pixel array within the ROI."""
return self._array[self.bl_corner.x:self.tr_corner.x, self.bl_corner.y:self.tr_corner.y] | [
"numpy.nanmedian",
"numpy.deg2rad",
"numpy.copy",
"numpy.nanstd",
"numpy.nanmin",
"matplotlib.patches.Circle",
"functools.lru_cache",
"matplotlib.pyplot.subplots",
"numpy.nanmax"
] | [((2198, 2209), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (2207, 2209), False, 'from functools import lru_cache\n'), ((2569, 2580), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (2578, 2580), False, 'from functools import lru_cache\n'), ((2355, 2379), 'numpy.nanmedian', 'np.nanmedian', (['masked_img'], {}), '(masked_img)\n', (2367, 2379), True, 'import numpy as np\n'), ((2541, 2562), 'numpy.nanstd', 'np.nanstd', (['masked_img'], {}), '(masked_img)\n', (2550, 2562), True, 'import numpy as np\n'), ((7059, 7080), 'numpy.nanmax', 'np.nanmax', (['masked_img'], {}), '(masked_img)\n', (7068, 7080), True, 'import numpy as np\n'), ((7230, 7251), 'numpy.nanmin', 'np.nanmin', (['masked_img'], {}), '(masked_img)\n', (7239, 7251), True, 'import numpy as np\n'), ((3545, 3559), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3557, 3559), True, 'import matplotlib.pyplot as plt\n'), ((3620, 3719), 'matplotlib.patches.Circle', 'mpl_Circle', (['(self.center.x, self.center.y)'], {'edgecolor': 'edgecolor', 'radius': 'self.radius', 'fill': 'fill'}), '((self.center.x, self.center.y), edgecolor=edgecolor, radius=self\n .radius, fill=fill)\n', (3630, 3719), True, 'from matplotlib.patches import Circle as mpl_Circle\n'), ((2000, 2017), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (2010, 2017), True, 'import numpy as np\n'), ((2063, 2080), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (2073, 2080), True, 'import numpy as np\n'), ((2798, 2818), 'numpy.copy', 'np.copy', (['self._array'], {}), '(self._array)\n', (2805, 2818), True, 'import numpy as np\n'), ((7449, 7466), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (7459, 7466), True, 'import numpy as np\n'), ((7512, 7529), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (7522, 7529), True, 'import numpy as np\n')] |
import numpy as np
import os
import pickle
from torch.utils import data
class WikipediaDataset(data.Dataset):
def __init__(self, data_dir, split="train"):
self.path = data_dir
#self.path = '/Users/tyler/Desktop/lent/advanced_ml/from_git_mar_5/Neural-Statistician/SentEval/words/from_cluster'
self.file_index = 1
self.content_idx = 0
self.content = None
pass
def __getitem__(self, item):
if self.content is None:
this_file = self.path + '/%06i.pkl' % self.file_index
#print(this_file)
in_file = open(this_file, 'rb')
self.content = pickle.load(in_file)
batch = np.array(self.content[self.content_idx]).astype(np.float32)
self.content_idx += 1
if self.content_idx == 10000:
#print('done with file')
self.content = None
self.file_index += 1
self.content_idx = 0
if self.file_index == 500:
self.file_index = 1
return batch
def __len__(self):
return 2000000 #2 million total sentence embeddings
| [
"pickle.load",
"numpy.array"
] | [((567, 587), 'pickle.load', 'pickle.load', (['in_file'], {}), '(in_file)\n', (578, 587), False, 'import pickle\n'), ((599, 639), 'numpy.array', 'np.array', (['self.content[self.content_idx]'], {}), '(self.content[self.content_idx])\n', (607, 639), True, 'import numpy as np\n')] |
import cv2
from scipy.fftpack import fftn
import numpy as np
from matplotlib import pyplot as plt
#img = cv2.imread('131317.jpg',0)
cap = cv2.VideoCapture('vtest.mp4')
fourcc = cv2.VideoWriter_fourcc('X','V','I','D')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
self_out = cv2.VideoWriter('self_output.mp4',fourcc, 20,(frame_width,frame_height),True)
per_frame_color_fft_out = cv2.VideoWriter('per_frame_color_fft_output.mp4', fourcc, 20,(frame_width,frame_height),True)
per_frame_grey_fft_out = cv2.VideoWriter('per_frame_grey_fft_output.mp4', fourcc, 20,(frame_width,frame_height),True)
t = 0
while(cap.isOpened()) and t < 50:
ret, frame = cap.read()
if ret==True:
frame = cv2.flip(frame,0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
dft = cv2.dft(np.float32(frame),flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
gray_dft = cv2.dft(np.float32(gray),flags = cv2.DFT_COMPLEX_OUTPUT)
gray_dft_shift = np.fft.fftshift(gray_dft)
gray_magnitude_spectrum = 20*np.log(cv2.magnitude(gray_dft_shift[:,:,0],gray_dft_shift[:,:,1]))
print(t)
t+=1
self_out.write(frame)
per_frame_color_fft_out.write(magnitude_spectrum)
per_frame_grey_fft_out.write(gray_magnitude_spectrum)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
#f = np.fft.fft2(frame)
#fshift = np.fft.fftshift(f)
#magnitude_spectrum = 20*np.log(np.abs(fshift))
#magnitude_spectrum *= 255.0/magnitude_spectrum.max()
#print(np.unique(magnitude_spectrum))
#cv2.imshow('magnitude_spectrum',magnitude_spectrum)
#t+=1
#f = np.fft.fft2(img)
#fshift = np.fft.fftshift(f)
#magnitude_spectrum = 20*np.log(np.abs(fshift))
#plt.subplot(121),plt.imshow(img, cmap = 'gray')
#plt.title('Input Image'), plt.xticks([]), plt.yticks([])
#plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
#plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
#plt.show()
| [
"cv2.magnitude",
"cv2.VideoWriter_fourcc",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.float32",
"cv2.VideoCapture",
"numpy.fft.fftshift",
"cv2.VideoWriter",
"cv2.flip",
"cv2.destroyAllWindows"
] | [((151, 180), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""vtest.mp4"""'], {}), "('vtest.mp4')\n", (167, 180), False, 'import cv2\n'), ((193, 235), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""X"""', '"""V"""', '"""I"""', '"""D"""'], {}), "('X', 'V', 'I', 'D')\n", (215, 235), False, 'import cv2\n'), ((310, 395), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""self_output.mp4"""', 'fourcc', '(20)', '(frame_width, frame_height)', '(True)'], {}), "('self_output.mp4', fourcc, 20, (frame_width, frame_height),\n True)\n", (325, 395), False, 'import cv2\n'), ((415, 515), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""per_frame_color_fft_output.mp4"""', 'fourcc', '(20)', '(frame_width, frame_height)', '(True)'], {}), "('per_frame_color_fft_output.mp4', fourcc, 20, (frame_width,\n frame_height), True)\n", (430, 515), False, 'import cv2\n'), ((539, 638), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""per_frame_grey_fft_output.mp4"""', 'fourcc', '(20)', '(frame_width, frame_height)', '(True)'], {}), "('per_frame_grey_fft_output.mp4', fourcc, 20, (frame_width,\n frame_height), True)\n", (554, 638), False, 'import cv2\n'), ((1624, 1647), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1645, 1647), False, 'import cv2\n'), ((745, 763), 'cv2.flip', 'cv2.flip', (['frame', '(0)'], {}), '(frame, 0)\n', (753, 763), False, 'import cv2\n'), ((785, 824), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (797, 824), False, 'import cv2\n'), ((921, 941), 'numpy.fft.fftshift', 'np.fft.fftshift', (['dft'], {}), '(dft)\n', (936, 941), True, 'import numpy as np\n'), ((1137, 1162), 'numpy.fft.fftshift', 'np.fft.fftshift', (['gray_dft'], {}), '(gray_dft)\n', (1152, 1162), True, 'import numpy as np\n'), ((850, 867), 'numpy.float32', 'np.float32', (['frame'], {}), '(frame)\n', (860, 867), True, 'import numpy as np\n'), ((1062, 1078), 'numpy.float32', 'np.float32', (['gray'], {}), '(gray)\n', (1072, 1078), True, 'import numpy as np\n'), ((982, 1035), 'cv2.magnitude', 'cv2.magnitude', (['dft_shift[:, :, 0]', 'dft_shift[:, :, 1]'], {}), '(dft_shift[:, :, 0], dft_shift[:, :, 1])\n', (995, 1035), False, 'import cv2\n'), ((1208, 1271), 'cv2.magnitude', 'cv2.magnitude', (['gray_dft_shift[:, :, 0]', 'gray_dft_shift[:, :, 1]'], {}), '(gray_dft_shift[:, :, 0], gray_dft_shift[:, :, 1])\n', (1221, 1271), False, 'import cv2\n'), ((1469, 1483), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1480, 1483), False, 'import cv2\n')] |
"""
Tutorial 3: Null models for gradient significance
==================================================
In this tutorial we assess the significance of correlations between the first
canonical gradient and data from other modalities (curvature, cortical
thickness and T1w/T2w image intensity). A normal test of the significance of
the correlation cannot be used, because the spatial auto-correlation in MRI
data may bias the test statistic. In this tutorial we will show three
approaches for null hypothesis testing: spin permutations, Moran spectral
randomization, and autocorrelation-preserving surrogates based on variogram
matching.
.. note::
When using either approach to compare gradients to non-gradient markers,
we recommend randomizing the non-gradient markers as these randomizations
need not maintain the statistical independence between gradients.
"""
###############################################################################
# Spin Permutations
# ------------------------------
#
# Here, we use the spin permutations approach previously proposed in
# `(<NAME> al., 2018)
# <https://www.sciencedirect.com/science/article/pii/S1053811918304968>`_,
# which preserves the auto-correlation of the permuted feature(s) by rotating
# the feature data on the spherical domain.
# We will start by loading the conte69 surfaces for left and right hemispheres,
# their corresponding spheres, midline mask, and t1w/t2w intensity as well as
# cortical thickness data, and a template functional gradient.
import numpy as np
from brainspace.datasets import load_gradient, load_marker, load_conte69
# load the conte69 hemisphere surfaces and spheres
surf_lh, surf_rh = load_conte69()
sphere_lh, sphere_rh = load_conte69(as_sphere=True)
# Load the data
t1wt2w_lh, t1wt2w_rh = load_marker('t1wt2w')
t1wt2w = np.concatenate([t1wt2w_lh, t1wt2w_rh])
thickness_lh, thickness_rh = load_marker('thickness')
thickness = np.concatenate([thickness_lh, thickness_rh])
# Template functional gradient
embedding = load_gradient('fc', idx=0, join=True)
###############################################################################
# Let’s first generate some null data using spintest.
from brainspace.null_models import SpinPermutations
from brainspace.plotting import plot_hemispheres
# Let's create some rotations
n_rand = 1000
sp = SpinPermutations(n_rep=n_rand, random_state=0)
sp.fit(sphere_lh, points_rh=sphere_rh)
t1wt2w_rotated = np.hstack(sp.randomize(t1wt2w_lh, t1wt2w_rh))
thickness_rotated = np.hstack(sp.randomize(thickness_lh, thickness_rh))
###############################################################################
# As an illustration of the rotation, let’s plot the original t1w/t2w data
# Plot original data
plot_hemispheres(surf_lh, surf_rh, array_name=t1wt2w, size=(1200, 200), cmap='viridis',
nan_color=(0.5, 0.5, 0.5, 1), color_bar=True, zoom=1.65)
###############################################################################
# as well as a few rotated versions.
# sphinx_gallery_thumbnail_number = 2
# Plot some rotations
plot_hemispheres(surf_lh, surf_rh, array_name=t1wt2w_rotated[:3], size=(1200, 600),
cmap='viridis', nan_color=(0.5, 0.5, 0.5, 1), color_bar=True,
zoom=1.55, label_text=['Rot0', 'Rot1', 'Rot2'])
###############################################################################
#
# .. warning::
#
# With spin permutations, midline vertices (i.e,, NaNs) from both the
# original and rotated data are discarded. Depending on the overlap of
# midlines in the, statistical comparisons between them may compare
# different numbers of features. This can bias your test statistics.
# Therefore, if a large portion of the sphere is not used, we recommend
# using Moran spectral randomization instead.
#
# Now we simply compute the correlations between the first gradient and the
# original data, as well as all rotated data.
from matplotlib import pyplot as plt
from scipy.stats import spearmanr
fig, axs = plt.subplots(1, 2, figsize=(9, 3.5))
feats = {'t1wt2w': t1wt2w, 'thickness': thickness}
rotated = {'t1wt2w': t1wt2w_rotated, 'thickness': thickness_rotated}
r_spin = np.empty(n_rand)
mask = ~np.isnan(thickness)
for k, (fn, feat) in enumerate(feats.items()):
r_obs, pv_obs = spearmanr(feat[mask], embedding[mask])
# Compute perm pval
for i, perm in enumerate(rotated[fn]):
mask_rot = mask & ~np.isnan(perm) # Remove midline
r_spin[i] = spearmanr(perm[mask_rot], embedding[mask_rot])[0]
pv_spin = np.mean(np.abs(r_spin) >= np.abs(r_obs))
# Plot null dist
axs[k].hist(r_spin, bins=25, density=True, alpha=0.5, color=(.8, .8, .8))
axs[k].axvline(r_obs, lw=2, ls='--', color='k')
axs[k].set_xlabel(f'Correlation with {fn}')
if k == 0:
axs[k].set_ylabel('Density')
print(f'{fn.capitalize()}:\n Obs : {pv_obs:.5e}\n Spin: {pv_spin:.5e}\n')
fig.tight_layout()
plt.show()
###############################################################################
# It is interesting to see that both p-values increase when taking into
# consideration the auto-correlation present in the surfaces. Also, we can see
# that the correlation with thickness is no longer statistically significant
# after spin permutations.
#
#
#
# Moran Spectral Randomization
# ------------------------------
#
# Moran Spectral Randomization (MSR) computes Moran's I, a metric for spatial
# auto-correlation and generates normally distributed data with similar
# auto-correlation. MSR relies on a weight matrix denoting the spatial
# proximity of features to one another. Within neuroimaging, one
# straightforward example of this is inverse geodesic distance i.e. distance
# along the cortical surface.
#
# In this example we will show how to use MSR to assess statistical
# significance between cortical markers (here curvature and cortical t1wt2w
# intensity) and the first functional connectivity gradient. We will start by
# loading the left temporal lobe mask, t1w/t2w intensity as well as cortical
# thickness data, and a template functional gradient
from brainspace.datasets import load_mask
n_pts_lh = surf_lh.n_points
mask_tl, _ = load_mask(name='temporal')
# Keep only the temporal lobe.
embedding_tl = embedding[:n_pts_lh][mask_tl]
t1wt2w_tl = t1wt2w_lh[mask_tl]
curv_tl = load_marker('curvature')[0][mask_tl]
###############################################################################
# We will now compute the Moran eigenvectors. This can be done either by
# providing a weight matrix of spatial proximity between each vertex, or by
# providing a cortical surface. Here we’ll use a cortical surface.
from brainspace.null_models import MoranRandomization
from brainspace.mesh import mesh_elements as me
# compute spatial weight matrix
w = me.get_ring_distance(surf_lh, n_ring=1, mask=mask_tl)
w.data **= -1
msr = MoranRandomization(n_rep=n_rand, procedure='singleton', tol=1e-6,
random_state=0)
msr.fit(w)
###############################################################################
# Using the Moran eigenvectors we can now compute the randomized data.
curv_rand = msr.randomize(curv_tl)
t1wt2w_rand = msr.randomize(t1wt2w_tl)
###############################################################################
# Now that we have the randomized data, we can compute correlations between
# the gradient and the real/randomised data and generate the non-parametric
# p-values.
fig, axs = plt.subplots(1, 2, figsize=(9, 3.5))
feats = {'t1wt2w': t1wt2w_tl, 'curvature': curv_tl}
rand = {'t1wt2w': t1wt2w_rand, 'curvature': curv_rand}
for k, (fn, data) in enumerate(rand.items()):
r_obs, pv_obs = spearmanr(feats[fn], embedding_tl, nan_policy='omit')
# Compute perm pval
r_rand = np.asarray([spearmanr(embedding_tl, d)[0] for d in data])
pv_rand = np.mean(np.abs(r_rand) >= np.abs(r_obs))
# Plot null dist
axs[k].hist(r_rand, bins=25, density=True, alpha=0.5, color=(.8, .8, .8))
axs[k].axvline(r_obs, lw=2, ls='--', color='k')
axs[k].set_xlabel(f'Correlation with {fn}')
if k == 0:
axs[k].set_ylabel('Density')
print(f'{fn.capitalize()}:\n Obs : {pv_obs:.5e}\n Moran: {pv_rand:.5e}\n')
fig.tight_layout()
plt.show()
###############################################################################
#
#
#
#
# Variogram Matching
# ------------------
#
# Here, we will repeat the same analysis using the variogram matching approach
# presented in `(Burt et al., 2020)
# <https://www.sciencedirect.com/science/article/pii/S1053811920305243>`_,
# which generates novel brainmaps with similar spatial autocorrelation to the
# input data.
#
#
# We will need a distance matrix that tells us what the spatial distance
# between our datapoints is. For this example, we will use geodesic distance.
from brainspace.mesh.mesh_elements import get_immediate_distance
from scipy.sparse.csgraph import dijkstra
# Compute geodesic distance
gd = get_immediate_distance(surf_lh, mask=mask_tl)
gd = dijkstra(gd, directed=False)
idx_sorted = np.argsort(gd, axis=1)
###############################################################################
# Now we've got everything we need to generate our surrogate datasets. By
# default, BrainSpace will use all available data to generate surrogate maps.
# However, this process is extremely computationally and memory intensive. When
# using this method with more than a few hundred regions, we recommend
# subsampling the data. This can be done using SampledSurrogateMaps instead of
# the SurrogateMaps.
from brainspace.null_models import SampledSurrogateMaps
n_surrogate_datasets = 1000
# Note: number samples must be greater than number neighbors
num_samples = 100
num_neighbors = 50
ssm = SampledSurrogateMaps(ns=num_samples, knn=num_neighbors, random_state=0)
ssm.fit(gd, idx_sorted)
t1wt2w_surrogates = ssm.randomize(t1wt2w_tl, n_rep=n_surrogate_datasets)
curv_surrogates = ssm.randomize(curv_tl, n_rep=n_surrogate_datasets)
###############################################################################
# Similar to the previous case, we can now plot the results:
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
fig, axs = plt.subplots(1, 2, figsize=(9, 3.5))
feats = {'t1wt2w': t1wt2w_tl, 'curvature': curv_tl}
rand = {'t1wt2w': t1wt2w_surrogates, 'curvature': curv_surrogates}
for k, (fn, data) in enumerate(rand.items()):
r_obs, pv_obs = spearmanr(feats[fn], embedding_tl, nan_policy='omit')
# Compute perm pval
r_rand = np.asarray([spearmanr(embedding_tl, d)[0] for d in data])
pv_rand = np.mean(np.abs(r_rand) >= np.abs(r_obs))
# Plot null dist
axs[k].hist(r_rand, bins=25, density=True, alpha=0.5, color=(.8, .8, .8))
axs[k].axvline(r_obs, lw=2, ls='--', color='k')
axs[k].set_xlabel(f'Correlation with {fn}')
if k == 0:
axs[k].set_ylabel('Density')
print(f'{fn.capitalize()}:\n Obs : {pv_obs:.5e}\n '
f'Variogram: {pv_rand:.5e}\n')
fig.tight_layout()
plt.show()
| [
"numpy.abs",
"brainspace.datasets.load_marker",
"numpy.empty",
"numpy.isnan",
"numpy.argsort",
"brainspace.plotting.plot_hemispheres",
"scipy.sparse.csgraph.dijkstra",
"brainspace.datasets.load_gradient",
"brainspace.datasets.load_mask",
"matplotlib.pyplot.subplots",
"brainspace.mesh.mesh_elemen... | [((1689, 1703), 'brainspace.datasets.load_conte69', 'load_conte69', ([], {}), '()\n', (1701, 1703), False, 'from brainspace.datasets import load_gradient, load_marker, load_conte69\n'), ((1727, 1755), 'brainspace.datasets.load_conte69', 'load_conte69', ([], {'as_sphere': '(True)'}), '(as_sphere=True)\n', (1739, 1755), False, 'from brainspace.datasets import load_gradient, load_marker, load_conte69\n'), ((1796, 1817), 'brainspace.datasets.load_marker', 'load_marker', (['"""t1wt2w"""'], {}), "('t1wt2w')\n", (1807, 1817), False, 'from brainspace.datasets import load_gradient, load_marker, load_conte69\n'), ((1827, 1865), 'numpy.concatenate', 'np.concatenate', (['[t1wt2w_lh, t1wt2w_rh]'], {}), '([t1wt2w_lh, t1wt2w_rh])\n', (1841, 1865), True, 'import numpy as np\n'), ((1896, 1920), 'brainspace.datasets.load_marker', 'load_marker', (['"""thickness"""'], {}), "('thickness')\n", (1907, 1920), False, 'from brainspace.datasets import load_gradient, load_marker, load_conte69\n'), ((1933, 1977), 'numpy.concatenate', 'np.concatenate', (['[thickness_lh, thickness_rh]'], {}), '([thickness_lh, thickness_rh])\n', (1947, 1977), True, 'import numpy as np\n'), ((2022, 2059), 'brainspace.datasets.load_gradient', 'load_gradient', (['"""fc"""'], {'idx': '(0)', 'join': '(True)'}), "('fc', idx=0, join=True)\n", (2035, 2059), False, 'from brainspace.datasets import load_gradient, load_marker, load_conte69\n'), ((2349, 2395), 'brainspace.null_models.SpinPermutations', 'SpinPermutations', ([], {'n_rep': 'n_rand', 'random_state': '(0)'}), '(n_rep=n_rand, random_state=0)\n', (2365, 2395), False, 'from brainspace.null_models import SpinPermutations\n'), ((2750, 2898), 'brainspace.plotting.plot_hemispheres', 'plot_hemispheres', (['surf_lh', 'surf_rh'], {'array_name': 't1wt2w', 'size': '(1200, 200)', 'cmap': '"""viridis"""', 'nan_color': '(0.5, 0.5, 0.5, 1)', 'color_bar': '(True)', 'zoom': '(1.65)'}), "(surf_lh, surf_rh, array_name=t1wt2w, size=(1200, 200),\n cmap='viridis', nan_color=(0.5, 0.5, 0.5, 1), color_bar=True, zoom=1.65)\n", (2766, 2898), False, 'from brainspace.plotting import plot_hemispheres\n'), ((3092, 3295), 'brainspace.plotting.plot_hemispheres', 'plot_hemispheres', (['surf_lh', 'surf_rh'], {'array_name': 't1wt2w_rotated[:3]', 'size': '(1200, 600)', 'cmap': '"""viridis"""', 'nan_color': '(0.5, 0.5, 0.5, 1)', 'color_bar': '(True)', 'zoom': '(1.55)', 'label_text': "['Rot0', 'Rot1', 'Rot2']"}), "(surf_lh, surf_rh, array_name=t1wt2w_rotated[:3], size=(\n 1200, 600), cmap='viridis', nan_color=(0.5, 0.5, 0.5, 1), color_bar=\n True, zoom=1.55, label_text=['Rot0', 'Rot1', 'Rot2'])\n", (3108, 3295), False, 'from brainspace.plotting import plot_hemispheres\n'), ((4043, 4079), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 3.5)'}), '(1, 2, figsize=(9, 3.5))\n', (4055, 4079), True, 'import matplotlib.pyplot as plt\n'), ((4211, 4227), 'numpy.empty', 'np.empty', (['n_rand'], {}), '(n_rand)\n', (4219, 4227), True, 'import numpy as np\n'), ((4966, 4976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4974, 4976), True, 'import matplotlib.pyplot as plt\n'), ((6218, 6244), 'brainspace.datasets.load_mask', 'load_mask', ([], {'name': '"""temporal"""'}), "(name='temporal')\n", (6227, 6244), False, 'from brainspace.datasets import load_mask\n'), ((6838, 6891), 'brainspace.mesh.mesh_elements.get_ring_distance', 'me.get_ring_distance', (['surf_lh'], {'n_ring': '(1)', 'mask': 'mask_tl'}), '(surf_lh, n_ring=1, mask=mask_tl)\n', (6858, 6891), True, 'from brainspace.mesh import mesh_elements as me\n'), ((6914, 7000), 'brainspace.null_models.MoranRandomization', 'MoranRandomization', ([], {'n_rep': 'n_rand', 'procedure': '"""singleton"""', 'tol': '(1e-06)', 'random_state': '(0)'}), "(n_rep=n_rand, procedure='singleton', tol=1e-06,\n random_state=0)\n", (6932, 7000), False, 'from brainspace.null_models import MoranRandomization\n'), ((7518, 7554), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 3.5)'}), '(1, 2, figsize=(9, 3.5))\n', (7530, 7554), True, 'import matplotlib.pyplot as plt\n'), ((8288, 8298), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8296, 8298), True, 'import matplotlib.pyplot as plt\n'), ((9013, 9058), 'brainspace.mesh.mesh_elements.get_immediate_distance', 'get_immediate_distance', (['surf_lh'], {'mask': 'mask_tl'}), '(surf_lh, mask=mask_tl)\n', (9035, 9058), False, 'from brainspace.mesh.mesh_elements import get_immediate_distance\n'), ((9064, 9092), 'scipy.sparse.csgraph.dijkstra', 'dijkstra', (['gd'], {'directed': '(False)'}), '(gd, directed=False)\n', (9072, 9092), False, 'from scipy.sparse.csgraph import dijkstra\n'), ((9107, 9129), 'numpy.argsort', 'np.argsort', (['gd'], {'axis': '(1)'}), '(gd, axis=1)\n', (9117, 9129), True, 'import numpy as np\n'), ((9808, 9879), 'brainspace.null_models.SampledSurrogateMaps', 'SampledSurrogateMaps', ([], {'ns': 'num_samples', 'knn': 'num_neighbors', 'random_state': '(0)'}), '(ns=num_samples, knn=num_neighbors, random_state=0)\n', (9828, 9879), False, 'from brainspace.null_models import SampledSurrogateMaps\n'), ((10268, 10304), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(9, 3.5)'}), '(1, 2, figsize=(9, 3.5))\n', (10280, 10304), True, 'import matplotlib.pyplot as plt\n'), ((11072, 11082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11080, 11082), True, 'import matplotlib.pyplot as plt\n'), ((4236, 4255), 'numpy.isnan', 'np.isnan', (['thickness'], {}), '(thickness)\n', (4244, 4255), True, 'import numpy as np\n'), ((4323, 4361), 'scipy.stats.spearmanr', 'spearmanr', (['feat[mask]', 'embedding[mask]'], {}), '(feat[mask], embedding[mask])\n', (4332, 4361), False, 'from scipy.stats import spearmanr\n'), ((7730, 7783), 'scipy.stats.spearmanr', 'spearmanr', (['feats[fn]', 'embedding_tl'], {'nan_policy': '"""omit"""'}), "(feats[fn], embedding_tl, nan_policy='omit')\n", (7739, 7783), False, 'from scipy.stats import spearmanr\n'), ((10492, 10545), 'scipy.stats.spearmanr', 'spearmanr', (['feats[fn]', 'embedding_tl'], {'nan_policy': '"""omit"""'}), "(feats[fn], embedding_tl, nan_policy='omit')\n", (10501, 10545), False, 'from scipy.stats import spearmanr\n'), ((6363, 6387), 'brainspace.datasets.load_marker', 'load_marker', (['"""curvature"""'], {}), "('curvature')\n", (6374, 6387), False, 'from brainspace.datasets import load_gradient, load_marker, load_conte69\n'), ((4510, 4556), 'scipy.stats.spearmanr', 'spearmanr', (['perm[mask_rot]', 'embedding[mask_rot]'], {}), '(perm[mask_rot], embedding[mask_rot])\n', (4519, 4556), False, 'from scipy.stats import spearmanr\n'), ((4582, 4596), 'numpy.abs', 'np.abs', (['r_spin'], {}), '(r_spin)\n', (4588, 4596), True, 'import numpy as np\n'), ((4600, 4613), 'numpy.abs', 'np.abs', (['r_obs'], {}), '(r_obs)\n', (4606, 4613), True, 'import numpy as np\n'), ((7902, 7916), 'numpy.abs', 'np.abs', (['r_rand'], {}), '(r_rand)\n', (7908, 7916), True, 'import numpy as np\n'), ((7920, 7933), 'numpy.abs', 'np.abs', (['r_obs'], {}), '(r_obs)\n', (7926, 7933), True, 'import numpy as np\n'), ((10664, 10678), 'numpy.abs', 'np.abs', (['r_rand'], {}), '(r_rand)\n', (10670, 10678), True, 'import numpy as np\n'), ((10682, 10695), 'numpy.abs', 'np.abs', (['r_obs'], {}), '(r_obs)\n', (10688, 10695), True, 'import numpy as np\n'), ((4457, 4471), 'numpy.isnan', 'np.isnan', (['perm'], {}), '(perm)\n', (4465, 4471), True, 'import numpy as np\n'), ((7834, 7860), 'scipy.stats.spearmanr', 'spearmanr', (['embedding_tl', 'd'], {}), '(embedding_tl, d)\n', (7843, 7860), False, 'from scipy.stats import spearmanr\n'), ((10596, 10622), 'scipy.stats.spearmanr', 'spearmanr', (['embedding_tl', 'd'], {}), '(embedding_tl, d)\n', (10605, 10622), False, 'from scipy.stats import spearmanr\n')] |
"""Functions for supporting image operations on a canvas (currently
only numpy arrays are supported).
"""
# standard imports
from typing import Optional
# thirdparty imports
import numpy as np
# toolbox imports
from ..base.image import Image, Imagelike
from ..base.image import Size, Sizelike
def canvas_create(image: Optional[Imagelike] = None, copy: bool = True,
size: Optional[Sizelike] = None,
channels: int = 3) -> np.ndarray:
"""Create a new canvas.
Arguments
---------
image:
An image to be used for initializing the canvas.
copy:
If `False` the image will not be copied (if possible), meaning that
canvas operations will be performed directly on that image.
size:
The size of the canvas.
channels:
The number of channels.
"""
if size is not None:
size = Size(size)
if image is not None:
image = Image.as_array(image, copy=copy)
if size is None and image is None:
raise ValueError("Neither image nor size for new canvas "
"was specified.")
if size is not None and (image is None or
((size.height, size.width) != image.shape[:2])):
canvas = np.zeros((size.height, size.width, channels), dtype=np.uint8)
if image is not None:
canvas_add_image(canvas, image)
else:
canvas = image
return canvas
def _adapt_slice(the_slice: slice, diff: int) -> slice:
diff1 = diff // 2
diff2 = diff - diff1
return slice(the_slice.start + diff1, the_slice.stop - diff2,
the_slice.step)
def canvas_add_image(canvas: np.ndarray, image: Imagelike,
rows: int = 1, columns: int = 1, index: int = 1) -> None:
"""Add an image to the canvas.
Arguments
---------
canvas:
The canvas to which to add the image.
image:
The image to add to the canvas.
rows:
Rows of the canvas grid.
columns:
Columns of the canvas grid.
index:
Index of the cell in the canvas grid, starting with 1 (like
matplotlib subplots).
"""
row, column = (index-1) // columns, (index-1) % columns
height, width = canvas.shape[0] // rows, canvas.shape[1] // columns
canvas_x = slice(column * width, (column+1) * width)
canvas_y = slice(row * height, (row+1) * height)
image = Image.as_array(image)
image_x = slice(0, image.shape[1])
image_y = slice(0, image.shape[0])
diff = image_x.stop - width
if diff > 0:
image_x = _adapt_slice(image_x, diff)
elif diff < 0:
canvas_x = _adapt_slice(canvas_x, -diff)
diff = image_y.stop - height
if diff > 0:
image_y = _adapt_slice(image_y, diff)
elif diff < 0:
canvas_y = _adapt_slice(canvas_y, -diff)
canvas[canvas_y, canvas_x] = image[image_y, image_x]
| [
"numpy.zeros"
] | [((1268, 1329), 'numpy.zeros', 'np.zeros', (['(size.height, size.width, channels)'], {'dtype': 'np.uint8'}), '((size.height, size.width, channels), dtype=np.uint8)\n', (1276, 1329), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.linalg
from pyscf.pbc.gto import Cell
from pyscf.pbc.tools import k2gamma
from pyscf.pbc.scf import rsjk
cell = Cell().build(
a = np.eye(3)*1.8,
atom = '''He 0. 0. 0.
He 0.4917 0.4917 0.4917''',
basis = {'He': [[0, [2.5, 1]]]})
cell1 = Cell().build(
a = np.eye(3)*2.6,
atom = '''He 0.4917 0.4917 0.4917''',
basis = {'He': [[0, [4.8, 1, -.1],
[1.1, .3, .5],
[0.15, .2, .8]],
[1, [0.8, 1]],]})
def tearDownModule():
global cell, cell1
del cell, cell1
class KnowValues(unittest.TestCase):
def test_get_jk(self):
kpts = cell.make_kpts([3,1,1])
np.random.seed(1)
dm = (np.random.rand(len(kpts), cell.nao, cell.nao) +
np.random.rand(len(kpts), cell.nao, cell.nao) * 1j)
dm = dm + dm.transpose(0,2,1).conj()
kmesh = k2gamma.kpts_to_kmesh(cell, kpts)
phase = k2gamma.get_phase(cell, kpts, kmesh)[1]
dm = np.einsum('Rk,kuv,Sk->RSuv', phase.conj().T, dm, phase.T)
dm = np.einsum('Rk,RSuv,Sk->kuv', phase, dm.real, phase.conj())
mf = cell.KRHF(kpts=kpts)
jref, kref = mf.get_jk(cell, dm, kpts=kpts)
ej = np.einsum('kij,kji->', jref, dm)
ek = np.einsum('kij,kji->', kref, dm) * .5
jk_builder = rsjk.RangeSeparationJKBuilder(cell, kpts)
jk_builder.omega = 0.5
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv, with_k=False)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv, with_j=False)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv, with_k=False)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv, with_j=False)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
def test_get_jk_high_cost(self):
kpts = cell1.make_kpts([3,1,1])
np.random.seed(1)
dm = (np.random.rand(len(kpts), cell1.nao, cell1.nao) +
np.random.rand(len(kpts), cell1.nao, cell1.nao) * 1j)
dm = dm + dm.transpose(0,2,1).conj()
kmesh = k2gamma.kpts_to_kmesh(cell1, kpts)
phase = k2gamma.get_phase(cell1, kpts, kmesh)[1]
dm = np.einsum('Rk,kuv,Sk->RSuv', phase.conj().T, dm, phase.T)
dm = np.einsum('Rk,RSuv,Sk->kuv', phase, dm.real, phase.conj())
mf = cell1.KRHF(kpts=kpts)
jref, kref = mf.get_jk(cell1, dm, kpts=kpts)
ej = np.einsum('kij,kji->', jref, dm)
ek = np.einsum('kij,kji->', kref, dm) * .5
jk_builder = rsjk.RangeSeparationJKBuilder(cell1, kpts)
jk_builder.omega = 0.5
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv, with_k=False)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, kpts=kpts, exxdiv=mf.exxdiv, with_j=False)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv, with_k=False)
self.assertAlmostEqual(abs(vj - jref).max(), 0, 7)
vj, vk = jk_builder.get_jk(dm, hermi=0, kpts=kpts, exxdiv=mf.exxdiv, with_j=False)
self.assertAlmostEqual(abs(vk - kref).max(), 0, 7)
if __name__ == '__main__':
print("Full Tests for rsjk")
unittest.main()
| [
"unittest.main",
"numpy.random.seed",
"numpy.einsum",
"pyscf.pbc.tools.k2gamma.get_phase",
"pyscf.pbc.tools.k2gamma.kpts_to_kmesh",
"numpy.eye",
"pyscf.pbc.scf.rsjk.RangeSeparationJKBuilder",
"pyscf.pbc.gto.Cell"
] | [((4937, 4952), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4950, 4952), False, 'import unittest\n'), ((795, 801), 'pyscf.pbc.gto.Cell', 'Cell', ([], {}), '()\n', (799, 801), False, 'from pyscf.pbc.gto import Cell\n'), ((970, 976), 'pyscf.pbc.gto.Cell', 'Cell', ([], {}), '()\n', (974, 976), False, 'from pyscf.pbc.gto import Cell\n'), ((1403, 1420), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1417, 1420), True, 'import numpy as np\n'), ((1610, 1643), 'pyscf.pbc.tools.k2gamma.kpts_to_kmesh', 'k2gamma.kpts_to_kmesh', (['cell', 'kpts'], {}), '(cell, kpts)\n', (1631, 1643), False, 'from pyscf.pbc.tools import k2gamma\n'), ((1943, 1975), 'numpy.einsum', 'np.einsum', (['"""kij,kji->"""', 'jref', 'dm'], {}), "('kij,kji->', jref, dm)\n", (1952, 1975), True, 'import numpy as np\n'), ((2049, 2090), 'pyscf.pbc.scf.rsjk.RangeSeparationJKBuilder', 'rsjk.RangeSeparationJKBuilder', (['cell', 'kpts'], {}), '(cell, kpts)\n', (2078, 2090), False, 'from pyscf.pbc.scf import rsjk\n'), ((3176, 3193), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3190, 3193), True, 'import numpy as np\n'), ((3387, 3421), 'pyscf.pbc.tools.k2gamma.kpts_to_kmesh', 'k2gamma.kpts_to_kmesh', (['cell1', 'kpts'], {}), '(cell1, kpts)\n', (3408, 3421), False, 'from pyscf.pbc.tools import k2gamma\n'), ((3724, 3756), 'numpy.einsum', 'np.einsum', (['"""kij,kji->"""', 'jref', 'dm'], {}), "('kij,kji->', jref, dm)\n", (3733, 3756), True, 'import numpy as np\n'), ((3830, 3872), 'pyscf.pbc.scf.rsjk.RangeSeparationJKBuilder', 'rsjk.RangeSeparationJKBuilder', (['cell1', 'kpts'], {}), '(cell1, kpts)\n', (3859, 3872), False, 'from pyscf.pbc.scf import rsjk\n'), ((818, 827), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (824, 827), True, 'import numpy as np\n'), ((994, 1003), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1000, 1003), True, 'import numpy as np\n'), ((1660, 1696), 'pyscf.pbc.tools.k2gamma.get_phase', 'k2gamma.get_phase', (['cell', 'kpts', 'kmesh'], {}), '(cell, kpts, kmesh)\n', (1677, 1696), False, 'from pyscf.pbc.tools import k2gamma\n'), ((1989, 2021), 'numpy.einsum', 'np.einsum', (['"""kij,kji->"""', 'kref', 'dm'], {}), "('kij,kji->', kref, dm)\n", (1998, 2021), True, 'import numpy as np\n'), ((3438, 3475), 'pyscf.pbc.tools.k2gamma.get_phase', 'k2gamma.get_phase', (['cell1', 'kpts', 'kmesh'], {}), '(cell1, kpts, kmesh)\n', (3455, 3475), False, 'from pyscf.pbc.tools import k2gamma\n'), ((3770, 3802), 'numpy.einsum', 'np.einsum', (['"""kij,kji->"""', 'kref', 'dm'], {}), "('kij,kji->', kref, dm)\n", (3779, 3802), True, 'import numpy as np\n')] |
#!/usr/bin/python
import sys
import numpy
import os
import argparse
from tabulate import tabulate
from pymicmac import utils_execution
def run(xmlFile, foldersNames):
(gcpsXYZ, cpsXYZ) = utils_execution.readGCPXMLFile(xmlFile)
tableGCPs = []
tableCPs = []
tableKOs = []
for folderName in foldersNames.split(','):
if folderName.endswith('/'):
folderName = folderName[:-1]
logFileName = folderName + '/Campari.log'
if os.path.isfile(logFileName):
lines = open(logFileName, 'r').read().split('\n')
(dsGCPs, _, _, _) = ([], [], [], [])
(dsCPs, _, _, _) = ([], [], [], [])
eiLinesIndexes = []
for j in range(len(lines)):
if lines[j].count('End Iter'):
eiLinesIndexes.append(j)
gcpKOs = []
for j in range(eiLinesIndexes[-2], len(lines)):
line = lines[j]
if line.count('Dist'):
gcp = line.split()[1]
d = float(line.split('Dist')[-1].split()[0].split('=')[-1])
if gcp in gcpsXYZ:
dsGCPs.append(d)
elif gcp in cpsXYZ:
dsCPs.append(d)
else:
print('GCP/CP: ' + gcp + ' not found')
sys.exit(1)
elif line.count('NOT OK'):
gcpKOs.append(line.split(' ')[4])
if len(gcpKOs):
tableKOs.append([folderName, ','.join(gcpKOs)])
else:
tableKOs.append([folderName, '-'])
pattern = "%0.4f"
if len(dsGCPs):
tableGCPs.append([folderName, pattern %
numpy.min(dsGCPs), pattern %
numpy.max(dsGCPs), pattern %
numpy.mean(dsGCPs), pattern %
numpy.std(dsGCPs), pattern %
numpy.median(dsGCPs)])
else:
tableGCPs.append([folderName, '-', '-', '-', '-', '-'])
if len(dsCPs):
tableCPs.append([folderName, pattern %
numpy.min(dsCPs), pattern %
numpy.max(dsCPs), pattern %
numpy.mean(dsCPs), pattern %
numpy.std(dsCPs), pattern %
numpy.median(dsCPs)])
else:
tableCPs.append([folderName, '-', '-', '-', '-', '-'])
else:
tableKOs.append([folderName, '-'])
tableGCPs.append([folderName, '-', '-', '-', '-', '-'])
tableCPs.append([folderName, '-', '-', '-', '-', '-'])
print("########################")
print("Campari Dists statistics")
print("########################")
print('KOs')
print(tabulate(tableKOs, headers=['#Name', '', ]))
print()
header = ['#Name', 'Min', 'Max', 'Mean', 'Std', 'Median']
# header = ['#Name', 'MeanDist', 'StdDist', 'MeanXDist', 'StdXDist',
# 'MeanYDist', 'StdYDist', 'MeanZDist', 'StdZDist']
print('GCPs')
print(tabulate(tableGCPs, headers=header))
print()
print('CPs')
print(tabulate(tableCPs, headers=header))
print()
def argument_parser():
# define argument menu
description = "Gets statistics of Campari runs in one or more execution folders"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'-x',
'--xml',
default='',
help='XML file with the 3D position of the GCPs (and possible CPs)',
type=str,
required=True)
parser.add_argument(
'-f',
'--folders',
default='',
help='Comma-separated list of execution folders where to look for the Campari.log files',
type=str,
required=True)
return parser
def main():
try:
a = utils_execution.apply_argument_parser(argument_parser())
run(a.xml, a.folders)
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| [
"pymicmac.utils_execution.readGCPXMLFile",
"argparse.ArgumentParser",
"numpy.std",
"numpy.median",
"os.path.isfile",
"numpy.min",
"tabulate.tabulate",
"numpy.max",
"numpy.mean",
"sys.exit"
] | [((193, 232), 'pymicmac.utils_execution.readGCPXMLFile', 'utils_execution.readGCPXMLFile', (['xmlFile'], {}), '(xmlFile)\n', (223, 232), False, 'from pymicmac import utils_execution\n'), ((3557, 3605), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (3580, 3605), False, 'import argparse\n'), ((478, 505), 'os.path.isfile', 'os.path.isfile', (['logFileName'], {}), '(logFileName)\n', (492, 505), False, 'import os\n'), ((3005, 3046), 'tabulate.tabulate', 'tabulate', (['tableKOs'], {'headers': "['#Name', '']"}), "(tableKOs, headers=['#Name', ''])\n", (3013, 3046), False, 'from tabulate import tabulate\n'), ((3283, 3318), 'tabulate.tabulate', 'tabulate', (['tableGCPs'], {'headers': 'header'}), '(tableGCPs, headers=header)\n', (3291, 3318), False, 'from tabulate import tabulate\n'), ((3360, 3394), 'tabulate.tabulate', 'tabulate', (['tableCPs'], {'headers': 'header'}), '(tableCPs, headers=header)\n', (3368, 3394), False, 'from tabulate import tabulate\n'), ((1407, 1418), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1415, 1418), False, 'import sys\n'), ((1827, 1844), 'numpy.min', 'numpy.min', (['dsGCPs'], {}), '(dsGCPs)\n', (1836, 1844), False, 'import numpy\n'), ((1890, 1907), 'numpy.max', 'numpy.max', (['dsGCPs'], {}), '(dsGCPs)\n', (1899, 1907), False, 'import numpy\n'), ((1953, 1971), 'numpy.mean', 'numpy.mean', (['dsGCPs'], {}), '(dsGCPs)\n', (1963, 1971), False, 'import numpy\n'), ((2017, 2034), 'numpy.std', 'numpy.std', (['dsGCPs'], {}), '(dsGCPs)\n', (2026, 2034), False, 'import numpy\n'), ((2080, 2100), 'numpy.median', 'numpy.median', (['dsGCPs'], {}), '(dsGCPs)\n', (2092, 2100), False, 'import numpy\n'), ((2308, 2324), 'numpy.min', 'numpy.min', (['dsCPs'], {}), '(dsCPs)\n', (2317, 2324), False, 'import numpy\n'), ((2369, 2385), 'numpy.max', 'numpy.max', (['dsCPs'], {}), '(dsCPs)\n', (2378, 2385), False, 'import numpy\n'), ((2430, 2447), 'numpy.mean', 'numpy.mean', (['dsCPs'], {}), '(dsCPs)\n', (2440, 2447), False, 'import numpy\n'), ((2492, 2508), 'numpy.std', 'numpy.std', (['dsCPs'], {}), '(dsCPs)\n', (2501, 2508), False, 'import numpy\n'), ((2553, 2572), 'numpy.median', 'numpy.median', (['dsCPs'], {}), '(dsCPs)\n', (2565, 2572), False, 'import numpy\n')] |
from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, \
check_exog
from pmdarima.utils import get_callable
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pytest
import pandas as pd
import numpy as np
x = np.arange(5)
m = np.array([10, 5, 12, 23, 18, 3, 2, 0, 12]).reshape(3, 3).T
X = pd.DataFrame.from_records(
np.random.RandomState(2).rand(4, 4),
columns=['a', 'b', 'c', 'd']
)
# need some infinite values in X for testing check_exog
X_nan = X.copy()
X_nan.loc[0, 'a'] = np.nan
X_inf = X.copy()
X_inf.loc[0, 'a'] = np.inf
# for diffinv
x_mat = (np.arange(9) + 1).reshape(3, 3).T
def test_diff():
# test vector for lag = (1, 2), diff = (1, 2)
assert_array_equal(diff(x, lag=1, differences=1), np.ones(4))
assert_array_equal(diff(x, lag=1, differences=2), np.zeros(3))
assert_array_equal(diff(x, lag=2, differences=1), np.ones(3) * 2)
assert_array_equal(diff(x, lag=2, differences=2), np.zeros(1))
# test matrix for lag = (1, 2), diff = (1, 2)
assert_array_equal(diff(m, lag=1, differences=1),
np.array([[-5, -5, -2], [7, -15, 12]]))
assert_array_equal(diff(m, lag=1, differences=2),
np.array([[12, -10, 14]]))
assert_array_equal(diff(m, lag=2, differences=1), np.array([[2, -20, 10]]))
assert diff(m, lag=2, differences=2).shape[0] == 0
@pytest.mark.parametrize(
'arr,lag,differences,xi,expected', [
# VECTORS -------------------------------------------------------------
# > x = c(0, 1, 2, 3, 4)
# > diffinv(x, lag=1, differences=1)
# [1] 0 0 1 3 6 10
pytest.param(x, 1, 1, None, [0, 0, 1, 3, 6, 10]),
# > diffinv(x, lag=1, differences=2)
# [1] 0 0 0 1 4 10 20
pytest.param(x, 1, 2, None, [0, 0, 0, 1, 4, 10, 20]),
# > diffinv(x, lag=2, differences=1)
# [1] 0 0 0 1 2 4 6
pytest.param(x, 2, 1, None, [0, 0, 0, 1, 2, 4, 6]),
# > diffinv(x, lag=2, differences=2)
# [1] 0 0 0 0 0 1 2 5 8
pytest.param(x, 2, 2, None, [0, 0, 0, 0, 0, 1, 2, 5, 8]),
# This is a test of the intermediate stage when x == [1, 0, 3, 2]
pytest.param([1, 0, 3, 2], 1, 1, [0], [0, 1, 1, 4, 6]),
# This is an intermediate stage when x == [0, 1, 2, 3, 4]
pytest.param(x, 1, 1, [0], [0, 0, 1, 3, 6, 10]),
# MATRICES ------------------------------------------------------------
# > matrix(data=c(1, 2, 3, 4, 5, 6, 7, 8, 9), nrow=3, ncol=3)
# [,1] [,2] [,3]
# [1,] 1 4 7
# [2,] 2 5 8
# [3,] 3 6 9
# > diffinv(X, 1, 1)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 1 4 7
# [3,] 3 9 15
# [4,] 6 15 24
pytest.param(x_mat, 1, 1, None,
[[0, 0, 0],
[1, 4, 7],
[3, 9, 15],
[6, 15, 24]]),
# > diffinv(X, 1, 2)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 0 0 0
# [3,] 1 4 7
# [4,] 4 13 22
# [5,] 10 28 46
pytest.param(x_mat, 1, 2, None,
[[0, 0, 0],
[0, 0, 0],
[1, 4, 7],
[4, 13, 22],
[10, 28, 46]]),
# > diffinv(X, 2, 1)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 0 0 0
# [3,] 1 4 7
# [4,] 2 5 8
# [5,] 4 10 16
pytest.param(x_mat, 2, 1, None,
[[0, 0, 0],
[0, 0, 0],
[1, 4, 7],
[2, 5, 8],
[4, 10, 16]]),
# > diffinv(X, 2, 2)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 0 0 0
# [3,] 0 0 0
# [4,] 0 0 0
# [5,] 1 4 7
# [6,] 2 5 8
# [7,] 5 14 23
pytest.param(x_mat, 2, 2, None,
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 4, 7],
[2, 5, 8],
[5, 14, 23]]),
]
)
def test_diff_inv(arr, lag, differences, xi, expected):
res = diff_inv(arr, lag=lag, differences=differences, xi=xi)
expected = np.array(expected, dtype=np.float)
assert_array_equal(expected, res)
def test_concatenate():
assert_array_equal(c(1, np.zeros(3)), np.array([1.0, 0.0, 0.0, 0.0]))
assert_array_equal(c([1], np.zeros(3)), np.array([1.0, 0.0, 0.0, 0.0]))
assert_array_equal(c(1), np.ones(1))
assert c() is None
assert_array_equal(c([1]), np.ones(1))
def test_corner_in_callable():
# test the ValueError in the get-callable method
with pytest.raises(ValueError):
get_callable('fake-key', {'a': 1})
def test_corner():
# fails because lag < 1
with pytest.raises(ValueError):
diff(x=x, lag=0)
with pytest.raises(ValueError):
diff_inv(x=x, lag=0)
# fails because differences < 1
with pytest.raises(ValueError):
diff(x=x, differences=0)
with pytest.raises(ValueError):
diff_inv(x=x, differences=0)
# Passing in xi with the incorrect shape to a 2-d array
with pytest.raises(IndexError):
diff_inv(x=np.array([[1, 1], [1, 1]]), xi=np.array([[1]]))
def test_is_iterable():
assert not is_iterable("this string")
assert is_iterable(["this", "list"])
assert not is_iterable(None)
assert is_iterable(np.array([1, 2]))
def test_as_series():
assert isinstance(as_series([1, 2, 3]), pd.Series)
assert isinstance(as_series(np.arange(5)), pd.Series)
assert isinstance(as_series(pd.Series([1, 2, 3])), pd.Series)
@pytest.mark.parametrize(
'arr', [
np.random.rand(5),
pd.Series(np.random.rand(5)),
]
)
def test_check_exog_ndim_value_err(arr):
with pytest.raises(ValueError):
check_exog(arr)
@pytest.mark.parametrize('arr', [X_nan, X_inf])
def test_check_exog_infinite_value_err(arr):
with pytest.raises(ValueError):
check_exog(arr, force_all_finite=True)
# show it passes when False
assert check_exog(
arr, force_all_finite=False, dtype=None, copy=False) is arr
def test_exog_pd_dataframes():
# test with copy
assert check_exog(X, force_all_finite=True, copy=True).equals(X)
# test without copy
assert check_exog(X, force_all_finite=True, copy=False) is X
def test_exog_np_array():
X_np = np.random.RandomState(1).rand(5, 5)
# show works on a list
assert_array_almost_equal(X_np, check_exog(X_np.tolist()))
assert_array_almost_equal(X_np, check_exog(X_np))
| [
"numpy.testing.assert_array_equal",
"numpy.zeros",
"numpy.ones",
"pmdarima.utils.array.c",
"numpy.random.RandomState",
"pytest.param",
"pytest.raises",
"pmdarima.utils.array.diff",
"numpy.array",
"numpy.arange",
"pmdarima.utils.get_callable",
"pmdarima.utils.array.check_exog",
"numpy.random.... | [((265, 277), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (274, 277), True, 'import numpy as np\n'), ((6199, 6245), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""arr"""', '[X_nan, X_inf]'], {}), "('arr', [X_nan, X_inf])\n", (6222, 6245), False, 'import pytest\n'), ((4486, 4540), 'pmdarima.utils.array.diff_inv', 'diff_inv', (['arr'], {'lag': 'lag', 'differences': 'differences', 'xi': 'xi'}), '(arr, lag=lag, differences=differences, xi=xi)\n', (4494, 4540), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((4556, 4590), 'numpy.array', 'np.array', (['expected'], {'dtype': 'np.float'}), '(expected, dtype=np.float)\n', (4564, 4590), True, 'import numpy as np\n'), ((4595, 4628), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected', 'res'], {}), '(expected, res)\n', (4613, 4628), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((5674, 5703), 'pmdarima.utils.array.is_iterable', 'is_iterable', (["['this', 'list']"], {}), "(['this', 'list'])\n", (5685, 5703), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((744, 773), 'pmdarima.utils.array.diff', 'diff', (['x'], {'lag': '(1)', 'differences': '(1)'}), '(x, lag=1, differences=1)\n', (748, 773), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((775, 785), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (782, 785), True, 'import numpy as np\n'), ((810, 839), 'pmdarima.utils.array.diff', 'diff', (['x'], {'lag': '(1)', 'differences': '(2)'}), '(x, lag=1, differences=2)\n', (814, 839), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((841, 852), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (849, 852), True, 'import numpy as np\n'), ((877, 906), 'pmdarima.utils.array.diff', 'diff', (['x'], {'lag': '(2)', 'differences': '(1)'}), '(x, lag=2, differences=1)\n', (881, 906), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((947, 976), 'pmdarima.utils.array.diff', 'diff', (['x'], {'lag': '(2)', 'differences': '(2)'}), '(x, lag=2, differences=2)\n', (951, 976), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((978, 989), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (986, 989), True, 'import numpy as np\n'), ((1065, 1094), 'pmdarima.utils.array.diff', 'diff', (['m'], {'lag': '(1)', 'differences': '(1)'}), '(m, lag=1, differences=1)\n', (1069, 1094), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((1119, 1157), 'numpy.array', 'np.array', (['[[-5, -5, -2], [7, -15, 12]]'], {}), '([[-5, -5, -2], [7, -15, 12]])\n', (1127, 1157), True, 'import numpy as np\n'), ((1182, 1211), 'pmdarima.utils.array.diff', 'diff', (['m'], {'lag': '(1)', 'differences': '(2)'}), '(m, lag=1, differences=2)\n', (1186, 1211), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((1236, 1261), 'numpy.array', 'np.array', (['[[12, -10, 14]]'], {}), '([[12, -10, 14]])\n', (1244, 1261), True, 'import numpy as np\n'), ((1286, 1315), 'pmdarima.utils.array.diff', 'diff', (['m'], {'lag': '(2)', 'differences': '(1)'}), '(m, lag=2, differences=1)\n', (1290, 1315), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((1317, 1341), 'numpy.array', 'np.array', (['[[2, -20, 10]]'], {}), '([[2, -20, 10]])\n', (1325, 1341), True, 'import numpy as np\n'), ((1665, 1713), 'pytest.param', 'pytest.param', (['x', '(1)', '(1)', 'None', '[0, 0, 1, 3, 6, 10]'], {}), '(x, 1, 1, None, [0, 0, 1, 3, 6, 10])\n', (1677, 1713), False, 'import pytest\n'), ((1804, 1856), 'pytest.param', 'pytest.param', (['x', '(1)', '(2)', 'None', '[0, 0, 0, 1, 4, 10, 20]'], {}), '(x, 1, 2, None, [0, 0, 0, 1, 4, 10, 20])\n', (1816, 1856), False, 'import pytest\n'), ((1940, 1990), 'pytest.param', 'pytest.param', (['x', '(2)', '(1)', 'None', '[0, 0, 0, 1, 2, 4, 6]'], {}), '(x, 2, 1, None, [0, 0, 0, 1, 2, 4, 6])\n', (1952, 1990), False, 'import pytest\n'), ((2078, 2134), 'pytest.param', 'pytest.param', (['x', '(2)', '(2)', 'None', '[0, 0, 0, 0, 0, 1, 2, 5, 8]'], {}), '(x, 2, 2, None, [0, 0, 0, 0, 0, 1, 2, 5, 8])\n', (2090, 2134), False, 'import pytest\n'), ((2219, 2273), 'pytest.param', 'pytest.param', (['[1, 0, 3, 2]', '(1)', '(1)', '[0]', '[0, 1, 1, 4, 6]'], {}), '([1, 0, 3, 2], 1, 1, [0], [0, 1, 1, 4, 6])\n', (2231, 2273), False, 'import pytest\n'), ((2350, 2397), 'pytest.param', 'pytest.param', (['x', '(1)', '(1)', '[0]', '[0, 0, 1, 3, 6, 10]'], {}), '(x, 1, 1, [0], [0, 0, 1, 3, 6, 10])\n', (2362, 2397), False, 'import pytest\n'), ((2857, 2942), 'pytest.param', 'pytest.param', (['x_mat', '(1)', '(1)', 'None', '[[0, 0, 0], [1, 4, 7], [3, 9, 15], [6, 15, 24]]'], {}), '(x_mat, 1, 1, None, [[0, 0, 0], [1, 4, 7], [3, 9, 15], [6, 15, 24]]\n )\n', (2869, 2942), False, 'import pytest\n'), ((3244, 3342), 'pytest.param', 'pytest.param', (['x_mat', '(1)', '(2)', 'None', '[[0, 0, 0], [0, 0, 0], [1, 4, 7], [4, 13, 22], [10, 28, 46]]'], {}), '(x_mat, 1, 2, None, [[0, 0, 0], [0, 0, 0], [1, 4, 7], [4, 13, \n 22], [10, 28, 46]])\n', (3256, 3342), False, 'import pytest\n'), ((3666, 3760), 'pytest.param', 'pytest.param', (['x_mat', '(2)', '(1)', 'None', '[[0, 0, 0], [0, 0, 0], [1, 4, 7], [2, 5, 8], [4, 10, 16]]'], {}), '(x_mat, 2, 1, None, [[0, 0, 0], [0, 0, 0], [1, 4, 7], [2, 5, 8],\n [4, 10, 16]])\n', (3678, 3760), False, 'import pytest\n'), ((4145, 4261), 'pytest.param', 'pytest.param', (['x_mat', '(2)', '(2)', 'None', '[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 4, 7], [2, 5, 8], [5, 14, 23]]'], {}), '(x_mat, 2, 2, None, [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0],\n [1, 4, 7], [2, 5, 8], [5, 14, 23]])\n', (4157, 4261), False, 'import pytest\n'), ((4697, 4727), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (4705, 4727), True, 'import numpy as np\n'), ((4773, 4803), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (4781, 4803), True, 'import numpy as np\n'), ((4828, 4832), 'pmdarima.utils.array.c', 'c', (['(1)'], {}), '(1)\n', (4829, 4832), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((4834, 4844), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (4841, 4844), True, 'import numpy as np\n'), ((4857, 4860), 'pmdarima.utils.array.c', 'c', ([], {}), '()\n', (4858, 4860), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((4892, 4898), 'pmdarima.utils.array.c', 'c', (['[1]'], {}), '([1])\n', (4893, 4898), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((4900, 4910), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (4907, 4910), True, 'import numpy as np\n'), ((5007, 5032), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5020, 5032), False, 'import pytest\n'), ((5042, 5076), 'pmdarima.utils.get_callable', 'get_callable', (['"""fake-key"""', "{'a': 1}"], {}), "('fake-key', {'a': 1})\n", (5054, 5076), False, 'from pmdarima.utils import get_callable\n'), ((5135, 5160), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5148, 5160), False, 'import pytest\n'), ((5170, 5186), 'pmdarima.utils.array.diff', 'diff', ([], {'x': 'x', 'lag': '(0)'}), '(x=x, lag=0)\n', (5174, 5186), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((5196, 5221), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5209, 5221), False, 'import pytest\n'), ((5231, 5251), 'pmdarima.utils.array.diff_inv', 'diff_inv', ([], {'x': 'x', 'lag': '(0)'}), '(x=x, lag=0)\n', (5239, 5251), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((5298, 5323), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5311, 5323), False, 'import pytest\n'), ((5333, 5357), 'pmdarima.utils.array.diff', 'diff', ([], {'x': 'x', 'differences': '(0)'}), '(x=x, differences=0)\n', (5337, 5357), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((5367, 5392), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5380, 5392), False, 'import pytest\n'), ((5402, 5430), 'pmdarima.utils.array.diff_inv', 'diff_inv', ([], {'x': 'x', 'differences': '(0)'}), '(x=x, differences=0)\n', (5410, 5430), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((5501, 5526), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (5514, 5526), False, 'import pytest\n'), ((5636, 5662), 'pmdarima.utils.array.is_iterable', 'is_iterable', (['"""this string"""'], {}), "('this string')\n", (5647, 5662), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((5719, 5736), 'pmdarima.utils.array.is_iterable', 'is_iterable', (['None'], {}), '(None)\n', (5730, 5736), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((5760, 5776), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (5768, 5776), True, 'import numpy as np\n'), ((5824, 5844), 'pmdarima.utils.array.as_series', 'as_series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (5833, 5844), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((6145, 6170), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6158, 6170), False, 'import pytest\n'), ((6180, 6195), 'pmdarima.utils.array.check_exog', 'check_exog', (['arr'], {}), '(arr)\n', (6190, 6195), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((6030, 6047), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (6044, 6047), True, 'import numpy as np\n'), ((6300, 6325), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6313, 6325), False, 'import pytest\n'), ((6335, 6373), 'pmdarima.utils.array.check_exog', 'check_exog', (['arr'], {'force_all_finite': '(True)'}), '(arr, force_all_finite=True)\n', (6345, 6373), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((6418, 6481), 'pmdarima.utils.array.check_exog', 'check_exog', (['arr'], {'force_all_finite': '(False)', 'dtype': 'None', 'copy': '(False)'}), '(arr, force_all_finite=False, dtype=None, copy=False)\n', (6428, 6481), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((6657, 6705), 'pmdarima.utils.array.check_exog', 'check_exog', (['X'], {'force_all_finite': '(True)', 'copy': '(False)'}), '(X, force_all_finite=True, copy=False)\n', (6667, 6705), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((6913, 6929), 'pmdarima.utils.array.check_exog', 'check_exog', (['X_np'], {}), '(X_np)\n', (6923, 6929), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((282, 324), 'numpy.array', 'np.array', (['[10, 5, 12, 23, 18, 3, 2, 0, 12]'], {}), '([10, 5, 12, 23, 18, 3, 2, 0, 12])\n', (290, 324), True, 'import numpy as np\n'), ((376, 400), 'numpy.random.RandomState', 'np.random.RandomState', (['(2)'], {}), '(2)\n', (397, 400), True, 'import numpy as np\n'), ((908, 918), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (915, 918), True, 'import numpy as np\n'), ((4683, 4694), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4691, 4694), True, 'import numpy as np\n'), ((4759, 4770), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4767, 4770), True, 'import numpy as np\n'), ((5889, 5901), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (5898, 5901), True, 'import numpy as np\n'), ((5947, 5967), 'pandas.Series', 'pd.Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (5956, 5967), True, 'import pandas as pd\n'), ((6067, 6084), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (6081, 6084), True, 'import numpy as np\n'), ((6563, 6610), 'pmdarima.utils.array.check_exog', 'check_exog', (['X'], {'force_all_finite': '(True)', 'copy': '(True)'}), '(X, force_all_finite=True, copy=True)\n', (6573, 6610), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((6750, 6774), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (6771, 6774), True, 'import numpy as np\n'), ((618, 630), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (627, 630), True, 'import numpy as np\n'), ((1354, 1383), 'pmdarima.utils.array.diff', 'diff', (['m'], {'lag': '(2)', 'differences': '(2)'}), '(m, lag=2, differences=2)\n', (1358, 1383), False, 'from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, check_exog\n'), ((5547, 5573), 'numpy.array', 'np.array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (5555, 5573), True, 'import numpy as np\n'), ((5578, 5593), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (5586, 5593), True, 'import numpy as np\n')] |
import numpy as np
from skimage import measure
def psnrc(true_tensor,test_tensor,val=1):
psnrt=0
for i in range(true_tensor.shape[0]):
mse=np.mean((np.abs(true_tensor[i,:,:,:]-test_tensor[i,:,:,:]))**2)
psnrt+=10*np.log10(val**2/mse)
psnrt=psnrt/true_tensor.shape[0]
return psnrt
def metrics(true_tensor, test_tensor,max_val=1):
psnrt = 0
ssimt = 0
for i in range(true_tensor.shape[0]):
psnr = measure.compare_psnr(true_tensor[i,:,:], test_tensor[i,:,:], data_range = max_val)
ssim = measure.compare_ssim(true_tensor[i,:,:], test_tensor[i,:,:], data_range = max_val)
psnrt = psnrt+psnr
ssimt = ssimt+ssim
psnrt = psnrt/true_tensor.shape[0]
ssimt = ssimt/true_tensor.shape[0]
return psnrt, ssimt
| [
"numpy.log10",
"skimage.measure.compare_psnr",
"numpy.abs",
"skimage.measure.compare_ssim"
] | [((447, 536), 'skimage.measure.compare_psnr', 'measure.compare_psnr', (['true_tensor[i, :, :]', 'test_tensor[i, :, :]'], {'data_range': 'max_val'}), '(true_tensor[i, :, :], test_tensor[i, :, :], data_range\n =max_val)\n', (467, 536), False, 'from skimage import measure\n'), ((546, 635), 'skimage.measure.compare_ssim', 'measure.compare_ssim', (['true_tensor[i, :, :]', 'test_tensor[i, :, :]'], {'data_range': 'max_val'}), '(true_tensor[i, :, :], test_tensor[i, :, :], data_range\n =max_val)\n', (566, 635), False, 'from skimage import measure\n'), ((234, 258), 'numpy.log10', 'np.log10', (['(val ** 2 / mse)'], {}), '(val ** 2 / mse)\n', (242, 258), True, 'import numpy as np\n'), ((163, 220), 'numpy.abs', 'np.abs', (['(true_tensor[i, :, :, :] - test_tensor[i, :, :, :])'], {}), '(true_tensor[i, :, :, :] - test_tensor[i, :, :, :])\n', (169, 220), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Create a sine wave in Gaussian noise and compare the Bayesian spectral estimation to a power spectrum
"""
import matplotlib.pyplot as pl
import matplotlib.mlab as mlab
import matplotlib.gridspec as gridspec
import numpy as np
# set plot to render labels using latex
pl.rc('text', usetex=True)
pl.rc('font', family='serif')
pl.rc('font', size=14)
# times
t = np.linspace(0., 1., 100)
# set up signal to inject
f = 25.43 # Hz
phi = 2.1 # radians
A = 1.0 # amplitude
# sinewave
y = A*np.sin(2.*np.pi*f*t + phi)
# noise with sigma = 1
sigma = 1.;
n = np.random.randn(len(t))
d = y + n # the "data"
fig = pl.figure(figsize=(12,6), dpi=100)
gs = gridspec.GridSpec(1, 2, width_ratios=[1,2])
pl.subplot(gs[0])
# plot data
pl.plot(t, d, 'bo')
pl.plot(t, y, 'k-')
ax = pl.gca()
ax.set_xlabel('time (seconds)')
# get posterior on frequency
freqs = np.linspace(0., 1./(2.*(t[1]-t[0])), 200)
logpost1 = np.zeros(len(freqs))
logpost2 = np.zeros(len(freqs))
# get sums in posterior
d2 = np.sum(d**2)
for i in range(200):
R = np.sum(d*np.cos(2.*np.pi*freqs[i]*t))
I = np.sum(d*np.sin(2.*np.pi*freqs[i]*t))
#print 2.*(R**2+I**2), d2
logpost1[i] = -0.5*(len(d)-2)*np.log(1.-2.*(R**2 + I**2)/(len(d)*d2))
logpost2[i] = (R**2 + I**2)/sigma**2
# convert log posterior into posterior
post1 = np.exp(logpost1-np.max(logpost1))
post1 = post1/np.trapz(post1, freqs)
post2 = np.exp(logpost2-np.max(logpost2))
post2 = post2/np.trapz(post2, freqs)
# get power spectrum
pxx, pfreqs = mlab.psd(d, NFFT=len(d), Fs=(1./(t[1]-t[0])), detrend=mlab.detrend_none,
window=mlab.window_none, noverlap=0, pad_to=None, sides='default', scale_by_freq=None)
# normalise posteriors to the same height as the power spectrum
if np.max(post1) > np.max(post2):
pxx = pxx*(np.max(post1)/np.max(pxx))
else:
pxx = pxx*(np.max(post2)/np.max(pxx))
pl.subplot(gs[1])
pl.plot(freqs, post1, 'b', label='$p(f|d,I)$ for unknown $\sigma$')
pl.plot(freqs, post2, 'b--', label='$p(f|d,I)$ for known $\sigma$')
pl.plot(pfreqs, pxx, 'r-o', label='Power spectrum')
pl.plot([f, f], [0., np.max(pxx)], 'k--', label='True frequency')
pl.legend(loc='upper left', fancybox=True, framealpha=0.3, prop={'size': 14})
ax = pl.gca()
ax.set_xlabel('Frequency (Hz)')
ax.set_yticklabels([])
ax.set_ylim(0., np.max(pxx))
pl.tight_layout()
pl.show()
fig.savefig('../spectral_estimation.pdf') | [
"matplotlib.pyplot.subplot",
"numpy.trapz",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.max",
"matplotlib.pyplot.rc",
"numpy.linspace",
"matplotlib.pyplot.gca",
"numpy.cos",
"matplotlib.gridspe... | [((295, 321), 'matplotlib.pyplot.rc', 'pl.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (300, 321), True, 'import matplotlib.pyplot as pl\n'), ((322, 351), 'matplotlib.pyplot.rc', 'pl.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (327, 351), True, 'import matplotlib.pyplot as pl\n'), ((352, 374), 'matplotlib.pyplot.rc', 'pl.rc', (['"""font"""'], {'size': '(14)'}), "('font', size=14)\n", (357, 374), True, 'import matplotlib.pyplot as pl\n'), ((388, 414), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(100)'], {}), '(0.0, 1.0, 100)\n', (399, 414), True, 'import numpy as np\n'), ((635, 670), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(12, 6)', 'dpi': '(100)'}), '(figsize=(12, 6), dpi=100)\n', (644, 670), True, 'import matplotlib.pyplot as pl\n'), ((676, 720), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {'width_ratios': '[1, 2]'}), '(1, 2, width_ratios=[1, 2])\n', (693, 720), True, 'import matplotlib.gridspec as gridspec\n'), ((721, 738), 'matplotlib.pyplot.subplot', 'pl.subplot', (['gs[0]'], {}), '(gs[0])\n', (731, 738), True, 'import matplotlib.pyplot as pl\n'), ((752, 771), 'matplotlib.pyplot.plot', 'pl.plot', (['t', 'd', '"""bo"""'], {}), "(t, d, 'bo')\n", (759, 771), True, 'import matplotlib.pyplot as pl\n'), ((772, 791), 'matplotlib.pyplot.plot', 'pl.plot', (['t', 'y', '"""k-"""'], {}), "(t, y, 'k-')\n", (779, 791), True, 'import matplotlib.pyplot as pl\n'), ((797, 805), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (803, 805), True, 'import matplotlib.pyplot as pl\n'), ((876, 926), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0 / (2.0 * (t[1] - t[0])))', '(200)'], {}), '(0.0, 1.0 / (2.0 * (t[1] - t[0])), 200)\n', (887, 926), True, 'import numpy as np\n'), ((1012, 1026), 'numpy.sum', 'np.sum', (['(d ** 2)'], {}), '(d ** 2)\n', (1018, 1026), True, 'import numpy as np\n'), ((1860, 1877), 'matplotlib.pyplot.subplot', 'pl.subplot', (['gs[1]'], {}), '(gs[1])\n', (1870, 1877), True, 'import matplotlib.pyplot as pl\n'), ((1878, 1946), 'matplotlib.pyplot.plot', 'pl.plot', (['freqs', 'post1', '"""b"""'], {'label': '"""$p(f|d,I)$ for unknown $\\\\sigma$"""'}), "(freqs, post1, 'b', label='$p(f|d,I)$ for unknown $\\\\sigma$')\n", (1885, 1946), True, 'import matplotlib.pyplot as pl\n'), ((1946, 2014), 'matplotlib.pyplot.plot', 'pl.plot', (['freqs', 'post2', '"""b--"""'], {'label': '"""$p(f|d,I)$ for known $\\\\sigma$"""'}), "(freqs, post2, 'b--', label='$p(f|d,I)$ for known $\\\\sigma$')\n", (1953, 2014), True, 'import matplotlib.pyplot as pl\n'), ((2014, 2065), 'matplotlib.pyplot.plot', 'pl.plot', (['pfreqs', 'pxx', '"""r-o"""'], {'label': '"""Power spectrum"""'}), "(pfreqs, pxx, 'r-o', label='Power spectrum')\n", (2021, 2065), True, 'import matplotlib.pyplot as pl\n'), ((2132, 2209), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'loc': '"""upper left"""', 'fancybox': '(True)', 'framealpha': '(0.3)', 'prop': "{'size': 14}"}), "(loc='upper left', fancybox=True, framealpha=0.3, prop={'size': 14})\n", (2141, 2209), True, 'import matplotlib.pyplot as pl\n'), ((2215, 2223), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (2221, 2223), True, 'import matplotlib.pyplot as pl\n'), ((2309, 2326), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (2324, 2326), True, 'import matplotlib.pyplot as pl\n'), ((2327, 2336), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2334, 2336), True, 'import matplotlib.pyplot as pl\n'), ((513, 546), 'numpy.sin', 'np.sin', (['(2.0 * np.pi * f * t + phi)'], {}), '(2.0 * np.pi * f * t + phi)\n', (519, 546), True, 'import numpy as np\n'), ((1371, 1393), 'numpy.trapz', 'np.trapz', (['post1', 'freqs'], {}), '(post1, freqs)\n', (1379, 1393), True, 'import numpy as np\n'), ((1451, 1473), 'numpy.trapz', 'np.trapz', (['post2', 'freqs'], {}), '(post2, freqs)\n', (1459, 1473), True, 'import numpy as np\n'), ((1742, 1755), 'numpy.max', 'np.max', (['post1'], {}), '(post1)\n', (1748, 1755), True, 'import numpy as np\n'), ((1758, 1771), 'numpy.max', 'np.max', (['post2'], {}), '(post2)\n', (1764, 1771), True, 'import numpy as np\n'), ((2295, 2306), 'numpy.max', 'np.max', (['pxx'], {}), '(pxx)\n', (2301, 2306), True, 'import numpy as np\n'), ((1339, 1355), 'numpy.max', 'np.max', (['logpost1'], {}), '(logpost1)\n', (1345, 1355), True, 'import numpy as np\n'), ((1419, 1435), 'numpy.max', 'np.max', (['logpost2'], {}), '(logpost2)\n', (1425, 1435), True, 'import numpy as np\n'), ((2087, 2098), 'numpy.max', 'np.max', (['pxx'], {}), '(pxx)\n', (2093, 2098), True, 'import numpy as np\n'), ((1062, 1096), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * freqs[i] * t)'], {}), '(2.0 * np.pi * freqs[i] * t)\n', (1068, 1096), True, 'import numpy as np\n'), ((1106, 1140), 'numpy.sin', 'np.sin', (['(2.0 * np.pi * freqs[i] * t)'], {}), '(2.0 * np.pi * freqs[i] * t)\n', (1112, 1140), True, 'import numpy as np\n'), ((1786, 1799), 'numpy.max', 'np.max', (['post1'], {}), '(post1)\n', (1792, 1799), True, 'import numpy as np\n'), ((1800, 1811), 'numpy.max', 'np.max', (['pxx'], {}), '(pxx)\n', (1806, 1811), True, 'import numpy as np\n'), ((1832, 1845), 'numpy.max', 'np.max', (['post2'], {}), '(post2)\n', (1838, 1845), True, 'import numpy as np\n'), ((1846, 1857), 'numpy.max', 'np.max', (['pxx'], {}), '(pxx)\n', (1852, 1857), True, 'import numpy as np\n')] |
import numpy as np
from scipy.linalg import lu_factor, lu_solve
from compas.geometry import allclose # noqa: F401
from compas_nurbs.helpers import find_spans
from compas_nurbs.helpers import basis_functions
from compas_nurbs.knot_vectors import knot_vector_and_params
from compas_nurbs.knot_vectors import CurveKnotStyle
# TODO: estimate derivatives for degree==3
# https://link.springer.com/content/pdf/10.1007/s003660050038.pdf
def global_curve_interpolation(points, degree, knot_style=0, periodic=False):
"""Global curve interpolation through points.
Please refer to Algorithm A9.1 on The NURBS Book (2nd Edition), pp.369-370
for details.
Parameters
----------
points : list of point
The list of points on the curve we are looking for.
degree : int
The degree of the output parametric curve.
knotstyle : int, optional
The knot style, either 0, 1, or 2 [uniform, chord, or chord_square_root].
Defaults to 0, uniform.
Returns
-------
tuple (control_points, knot_vector)
Examples
--------
>>> degree = 3
>>> points = [(0., 0., 0.), (3., 4., 0.), (-1., 4., 0.), (-4., 0., 0.), (-4., -3., 0.)]
>>> control_points, knot_vector = global_curve_interpolation(points, degree)
>>> solution = [(0, 0, 0), (6.44, 3.72, 0.), (-2.67, 7.5, 0), (-5.11, -2.72, 0), (-4, -3, 0)]
>>> allclose(control_points, solution, tol=0.005)
True
"""
points = np.array(points)
kv, uk = knot_vector_and_params(points, degree, knot_style, extended=False)
M = coefficient_matrix(degree, kv, uk)
lu, piv = lu_factor(M)
return lu_solve((lu, piv), points), kv
def global_curve_interpolation_with_end_derivatives(points,
degree,
start_derivative,
end_derivative,
knot_style=0,
periodic=False):
"""Global curve interpolation through points with end derivatives specified.
Please refer to The NURBS Book (2nd Edition), pp. 370-371 for details.
Parameters
----------
points : list of point
The list of points on the curve we are looking for.
degree : int
The degree of the output parametric curve.
start_derivative : vector
The start derivative of the curve
end_derivative : vector
The end derivative of the curve
knotstyle : int, optional
The knot style, either 0, 1, or 2 [uniform, chord, or chord_square_root].
Defaults to 0, uniform.
Returns
-------
tuple (control_points, knot_vector)
Examples
--------
>>> degree = 3
>>> points = [(0., 0., 0.), (3., 4., 0.), (-1., 4., 0.), (-4., 0., 0.), (-4., -3., 0.)]
>>> start_derivative = [17.75, 10.79, 0.0]
>>> end_derivative = [-0.71, -12.62, 0.0]
>>> result = global_curve_interpolation_with_end_derivatives(points, degree, start_derivative, end_derivative)
>>> solution = [(0, 0, 0), (1.48, 0.9, 0), (4.95, 5.08, 0), (-1.56, 4.87, 0), (-4.72, -0.56, 0), (-3.94, -1.95, 0), (-4, -3, 0)]
>>> allclose(result[0], solution, tol=0.005)
True
"""
points = np.array(points)
kv, uk = knot_vector_and_params(points, degree, knot_style, extended=True)
M = coefficient_matrix(degree, kv, uk)
M[1][0] = -1.
M[1][1] = 1.
M[-2][-2] = -1.
M[-2][-1] = 1.
v0 = np.array(start_derivative) * kv[degree + 1] / degree
vn = np.array(end_derivative) * (1 - kv[len(kv) - 1 - degree - 1]) / degree
C = points[:]
C = np.insert(C, 1, v0, axis=0)
C = np.insert(C, -1, vn, axis=0)
lu, piv = lu_factor(M)
return lu_solve((lu, piv), C), kv
def coefficient_matrix(degree, knot_vector, uk):
"""Returns the coefficient matrix for global interpolation.
Please refer to The NURBS Book (2nd Edition), pp. 370-371 for details.
Parameters
----------
degree : int
The degree of the curve.
knot_vector : list of float
The knot vector of the curve.
uk : list of float
parameters
Returns
-------
coefficient matrix, list
"""
# TODO: use this for evaluators?
num_points = len(uk)
spans = find_spans(knot_vector, num_points, uk)
bases = basis_functions(degree, knot_vector, spans, uk)
M = [[0.0 for _ in range(num_points)] for _ in range(num_points)]
for i, (span, basis) in enumerate(zip(spans, bases)):
M[i][span - degree:span + 1] = basis[:degree + 1]
return np.array(M)
def interpolate_curve(points, degree, knot_style=0, start_derivative=None, end_derivative=None, periodic=False):
"""Interpolate curve by the specified parameters.
Parameters
----------
points : list of point
The list of points on the curve we are looking for.
degree : int
The degree of the output parametric curve.
start_derivative : vector, optional
The start derivative of the curve. Defaults to ``None``.
end_derivative : vector
The end derivative of the curve. Defaults to ``None``.
knotstyle : int, optional
The knot style, either 0, 1, or 2 [uniform, chord, or chord_square_root].
Defaults to 0, uniform.
Returns
-------
tuple (control_points, knot_vector)
Raises
------
ValueError
If the knot style is not correct or if only one derivative is passed.
"""
if knot_style not in [CurveKnotStyle.Uniform, CurveKnotStyle.Chord, CurveKnotStyle.ChordSquareRoot]:
raise ValueError("Please pass a valid knot style: [0, 1, 2].")
if start_derivative is not None and end_derivative is not None:
return global_curve_interpolation_with_end_derivatives(points,
degree,
start_derivative,
end_derivative,
knot_style,
periodic=periodic)
elif start_derivative is not None or end_derivative is not None:
raise ValueError("Please pass start- AND end derivatives")
else:
return global_curve_interpolation(points, degree, knot_style=knot_style, periodic=periodic)
if __name__ == "__main__":
from compas_nurbs import Curve
knot_style = CurveKnotStyle.Uniform
points = [[0.0, 0.0, 0.0], [0.973412, 1.962979, 0.0], [-0.66136, 2.434672, 0.0], [-2.34574, 1.016886, 0.0], [-3.513204, -0.855328, 0.0], [-4.0, -3.0, 0.0]]
degree = 3
control_points, knot_vector = global_curve_interpolation(points, degree, CurveKnotStyle.Uniform)
crv = Curve(control_points, degree, knot_vector)
start_derivative, end_derivative = [17.75, 10.79, 0.0], [-0.71, -12.62, 0.0]
result = global_curve_interpolation_with_end_derivatives(points,
degree,
start_derivative,
end_derivative,
knot_style)
crv = Curve(result[0], degree, result[1])
D0, D1 = crv.derivatives_at([0, 1], order=1)[0]
assert(allclose(D0, start_derivative))
assert(allclose(D1, end_derivative))
result = interpolate_curve(points, degree, knot_style)
crv = Curve(result[0], degree, result[1])
D0, D1 = crv.derivatives_at([0, 1], order=1)[0]
assert(allclose(D0, start_derivative, tol=0.01))
assert(allclose(D1, end_derivative, tol=0.01))
| [
"compas_nurbs.knot_vectors.knot_vector_and_params",
"compas_nurbs.Curve",
"compas_nurbs.helpers.basis_functions",
"scipy.linalg.lu_solve",
"numpy.insert",
"compas_nurbs.helpers.find_spans",
"numpy.array",
"scipy.linalg.lu_factor",
"compas.geometry.allclose"
] | [((1457, 1473), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1465, 1473), True, 'import numpy as np\n'), ((1487, 1553), 'compas_nurbs.knot_vectors.knot_vector_and_params', 'knot_vector_and_params', (['points', 'degree', 'knot_style'], {'extended': '(False)'}), '(points, degree, knot_style, extended=False)\n', (1509, 1553), False, 'from compas_nurbs.knot_vectors import knot_vector_and_params\n'), ((1611, 1623), 'scipy.linalg.lu_factor', 'lu_factor', (['M'], {}), '(M)\n', (1620, 1623), False, 'from scipy.linalg import lu_factor, lu_solve\n'), ((3313, 3329), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (3321, 3329), True, 'import numpy as np\n'), ((3343, 3408), 'compas_nurbs.knot_vectors.knot_vector_and_params', 'knot_vector_and_params', (['points', 'degree', 'knot_style'], {'extended': '(True)'}), '(points, degree, knot_style, extended=True)\n', (3365, 3408), False, 'from compas_nurbs.knot_vectors import knot_vector_and_params\n'), ((3696, 3723), 'numpy.insert', 'np.insert', (['C', '(1)', 'v0'], {'axis': '(0)'}), '(C, 1, v0, axis=0)\n', (3705, 3723), True, 'import numpy as np\n'), ((3732, 3760), 'numpy.insert', 'np.insert', (['C', '(-1)', 'vn'], {'axis': '(0)'}), '(C, -1, vn, axis=0)\n', (3741, 3760), True, 'import numpy as np\n'), ((3776, 3788), 'scipy.linalg.lu_factor', 'lu_factor', (['M'], {}), '(M)\n', (3785, 3788), False, 'from scipy.linalg import lu_factor, lu_solve\n'), ((4347, 4386), 'compas_nurbs.helpers.find_spans', 'find_spans', (['knot_vector', 'num_points', 'uk'], {}), '(knot_vector, num_points, uk)\n', (4357, 4386), False, 'from compas_nurbs.helpers import find_spans\n'), ((4399, 4446), 'compas_nurbs.helpers.basis_functions', 'basis_functions', (['degree', 'knot_vector', 'spans', 'uk'], {}), '(degree, knot_vector, spans, uk)\n', (4414, 4446), False, 'from compas_nurbs.helpers import basis_functions\n'), ((4644, 4655), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (4652, 4655), True, 'import numpy as np\n'), ((6882, 6924), 'compas_nurbs.Curve', 'Curve', (['control_points', 'degree', 'knot_vector'], {}), '(control_points, degree, knot_vector)\n', (6887, 6924), False, 'from compas_nurbs import Curve\n'), ((7385, 7420), 'compas_nurbs.Curve', 'Curve', (['result[0]', 'degree', 'result[1]'], {}), '(result[0], degree, result[1])\n', (7390, 7420), False, 'from compas_nurbs import Curve\n'), ((7484, 7514), 'compas.geometry.allclose', 'allclose', (['D0', 'start_derivative'], {}), '(D0, start_derivative)\n', (7492, 7514), False, 'from compas.geometry import allclose\n'), ((7527, 7555), 'compas.geometry.allclose', 'allclose', (['D1', 'end_derivative'], {}), '(D1, end_derivative)\n', (7535, 7555), False, 'from compas.geometry import allclose\n'), ((7627, 7662), 'compas_nurbs.Curve', 'Curve', (['result[0]', 'degree', 'result[1]'], {}), '(result[0], degree, result[1])\n', (7632, 7662), False, 'from compas_nurbs import Curve\n'), ((7726, 7766), 'compas.geometry.allclose', 'allclose', (['D0', 'start_derivative'], {'tol': '(0.01)'}), '(D0, start_derivative, tol=0.01)\n', (7734, 7766), False, 'from compas.geometry import allclose\n'), ((7779, 7817), 'compas.geometry.allclose', 'allclose', (['D1', 'end_derivative'], {'tol': '(0.01)'}), '(D1, end_derivative, tol=0.01)\n', (7787, 7817), False, 'from compas.geometry import allclose\n'), ((1635, 1662), 'scipy.linalg.lu_solve', 'lu_solve', (['(lu, piv)', 'points'], {}), '((lu, piv), points)\n', (1643, 1662), False, 'from scipy.linalg import lu_factor, lu_solve\n'), ((3800, 3822), 'scipy.linalg.lu_solve', 'lu_solve', (['(lu, piv)', 'C'], {}), '((lu, piv), C)\n', (3808, 3822), False, 'from scipy.linalg import lu_factor, lu_solve\n'), ((3537, 3563), 'numpy.array', 'np.array', (['start_derivative'], {}), '(start_derivative)\n', (3545, 3563), True, 'import numpy as np\n'), ((3599, 3623), 'numpy.array', 'np.array', (['end_derivative'], {}), '(end_derivative)\n', (3607, 3623), True, 'import numpy as np\n')] |
import time
import math
import random
import numpy as np
import basis.robot_math as rm
import networkx as nx
import matplotlib.pyplot as plt
from operator import itemgetter
class RRTDW(object):
def __init__(self, robot_s):
self.robot_s = robot_s.copy()
self.roadmap = nx.Graph()
self.start_conf = None
self.goal_conf = None
def _is_collided(self,
component_name,
conf,
obstacle_list=[],
otherrobot_list=[]):
self.robot_s.fk(component_name=component_name, jnt_values=conf)
return self.robot_s.is_collided(obstacle_list=obstacle_list, otherrobot_list=otherrobot_list)
def _sample_conf(self, component_name, rand_rate, default_conf):
if random.randint(0, 99) < rand_rate:
return self.robot_s.rand_conf(component_name=component_name)
else:
return default_conf
def _get_nearest_nid(self, roadmap, new_conf):
"""
convert to numpy to accelerate access
:param roadmap:
:param new_conf:
:return:
"""
nodes_dict = dict(roadmap.nodes(data='conf'))
nodes_key_list = list(nodes_dict.keys())
nodes_value_list = list(nodes_dict.values()) # attention, correspondence is not guanranteed in python
# use the following alternative if correspondence is bad (a bit slower), 20210523, weiwei
# # nodes_value_list = list(nodes_dict.values())
# nodes_value_list = itemgetter(*nodes_key_list)(nodes_dict)
# if type(nodes_value_list) == np.ndarray:
# nodes_value_list = [nodes_value_list]
conf_array = np.array(nodes_value_list)
diff_conf_array = np.linalg.norm(conf_array - new_conf, axis=1)
min_dist_nid = np.argmin(diff_conf_array)
return nodes_key_list[min_dist_nid]
def _extend_conf(self, conf1, conf2, ext_dist):
"""
WARNING: This extend_conf is specially designed for differential-wheel robots
:param conf1:
:param conf2:
:param ext_dist:
:return: a list of 1xn nparray
"""
angle_ext_dist = ext_dist
len, vec = rm.unit_vector(conf2[:2] - conf1[:2], toggle_length=True)
if len > 0:
translational_theta = rm.angle_between_2d_vectors(np.array([1, 0]), vec)
conf1_theta_to_translational_theta = translational_theta - conf1[2]
else:
conf1_theta_to_translational_theta = (conf2[2] - conf1[2])
translational_theta = conf2[2]
# rotate
nval = abs(math.ceil(conf1_theta_to_translational_theta / angle_ext_dist))
linear_conf1 = np.array([conf1[0], conf1[1], translational_theta])
conf1_angular_arary = np.linspace(conf1, linear_conf1, nval)
# translate
nval = math.ceil(len / ext_dist)
linear_conf2 = np.array([conf2[0], conf2[1], translational_theta])
conf12_linear_arary = np.linspace(linear_conf1, linear_conf2, nval)
# rotate
translational_theta_to_conf2_theta = conf2[2] - translational_theta
nval = abs(math.ceil(translational_theta_to_conf2_theta / angle_ext_dist))
conf2_angular_arary = np.linspace(linear_conf2, conf2, nval)
conf_array = np.vstack((conf1_angular_arary, conf12_linear_arary, conf2_angular_arary))
return list(conf_array)
def _extend_roadmap(self,
component_name,
roadmap,
conf,
ext_dist,
goal_conf,
obstacle_list=[],
otherrobot_list=[],
animation=False):
"""
find the nearest point between the given roadmap and the conf and then extend towards the conf
:return:
"""
nearest_nid = self._get_nearest_nid(roadmap, conf)
new_conf_list = self._extend_conf(roadmap.nodes[nearest_nid]['conf'], conf, ext_dist)[1:]
for new_conf in new_conf_list:
if self._is_collided(component_name, new_conf, obstacle_list, otherrobot_list):
return nearest_nid
else:
new_nid = random.randint(0, 1e16)
roadmap.add_node(new_nid, conf=new_conf)
roadmap.add_edge(nearest_nid, new_nid)
nearest_nid = new_nid
# all_sampled_confs.append([new_node.point, False])
if animation:
self.draw_wspace([roadmap], self.start_conf, self.goal_conf,
obstacle_list, [roadmap.nodes[nearest_nid]['conf'], conf],
new_conf)
# check goal
if self._goal_test(conf=roadmap.nodes[new_nid]['conf'], goal_conf=goal_conf, threshold=ext_dist):
roadmap.add_node('connection', conf=goal_conf) # TODO current name -> connection
roadmap.add_edge(new_nid, 'connection')
return 'connection'
else:
return nearest_nid
def _goal_test(self, conf, goal_conf, threshold):
dist = np.linalg.norm(conf - goal_conf)
if dist <= threshold:
# print("Goal reached!")
return True
else:
return False
def _path_from_roadmap(self):
nid_path = nx.shortest_path(self.roadmap, 'start', 'goal')
return list(itemgetter(*nid_path)(self.roadmap.nodes(data='conf')))
def _smooth_path(self,
component_name,
path,
obstacle_list=[],
otherrobot_list=[],
granularity=2,
iterations=50,
animation=False):
smoothed_path = path
for _ in range(iterations):
if len(smoothed_path) <= 2:
return smoothed_path
i = random.randint(0, len(smoothed_path) - 1)
j = random.randint(0, len(smoothed_path) - 1)
if abs(i - j) <= 1:
continue
if j < i:
i, j = j, i
shortcut = self._extend_conf(smoothed_path[i], smoothed_path[j], granularity)
if all(not self._is_collided(component_name=component_name,
conf=conf,
obstacle_list=obstacle_list,
otherrobot_list=otherrobot_list)
for conf in shortcut):
smoothed_path = smoothed_path[:i] + shortcut + smoothed_path[j + 1:]
if animation:
self.draw_wspace([self.roadmap], self.start_conf, self.goal_conf,
obstacle_list, shortcut=shortcut, smoothed_path=smoothed_path)
return smoothed_path
def plan(self,
component_name,
start_conf,
goal_conf,
obstacle_list=[],
otherrobot_list=[],
ext_dist=2,
rand_rate=70,
max_iter=1000,
max_time=15.0,
smoothing_iterations=50,
animation=False):
"""
:return: [path, all_sampled_confs]
"""
self.roadmap.clear()
self.start_conf = start_conf
self.goal_conf = goal_conf
# check seed_jnt_values and end_conf
if self._is_collided(component_name, start_conf, obstacle_list, otherrobot_list):
print("The start robot_s configuration is in collision!")
return None
if self._is_collided(component_name, goal_conf, obstacle_list, otherrobot_list):
print("The goal robot_s configuration is in collision!")
return None
if self._goal_test(conf=start_conf, goal_conf=goal_conf, threshold=ext_dist):
return [[start_conf, goal_conf], None]
self.roadmap.add_node('start', conf=start_conf)
tic = time.time()
for _ in range(max_iter):
toc = time.time()
if max_time > 0.0:
if toc - tic > max_time:
print("Too much motion time! Failed to find a path.")
return None
# Random Sampling
rand_conf = self._sample_conf(component_name=component_name, rand_rate=rand_rate, default_conf=goal_conf)
last_nid = self._extend_roadmap(component_name=component_name,
roadmap=self.roadmap,
conf=rand_conf,
ext_dist=ext_dist,
goal_conf=goal_conf,
obstacle_list=obstacle_list,
otherrobot_list=otherrobot_list,
animation=animation)
if last_nid == 'connection':
mapping = {'connection': 'goal'}
self.roadmap = nx.relabel_nodes(self.roadmap, mapping)
path = self._path_from_roadmap()
smoothed_path = self._smooth_path(component_name=component_name,
path=path,
obstacle_list=obstacle_list,
otherrobot_list=otherrobot_list,
granularity=ext_dist,
iterations=smoothing_iterations,
animation=animation)
return smoothed_path
else:
print("Reach to maximum iteration! Failed to find a path.")
return None
@staticmethod
def draw_robot(plt, conf, facecolor='grey', edgecolor='grey'):
ax = plt.gca()
x = conf[0]
y = conf[1]
theta = conf[2]
ax.add_patch(plt.Circle((x, y), .5, edgecolor=edgecolor, facecolor=facecolor))
ax.add_patch(plt.Rectangle((x, y), .7, .1, math.degrees(theta), color='y'))
ax.add_patch(plt.Rectangle((x, y), -.1, .1, math.degrees(theta),
edgecolor=edgecolor, facecolor=facecolor))
ax.add_patch(plt.Rectangle((x, y), .7, -.1, math.degrees(theta), color='y'))
ax.add_patch(plt.Rectangle((x, y), -.1, -.1, math.degrees(theta),
edgecolor=edgecolor, facecolor=facecolor))
@staticmethod
def draw_wspace(roadmap_list,
start_conf,
goal_conf,
obstacle_list,
near_rand_conf_pair=None,
new_conf=None,
shortcut=None,
smoothed_path=None,
delay_time=.02):
plt.clf()
ax = plt.gca()
ax.set_aspect('equal', 'box')
plt.grid(True)
plt.xlim(-4.0, 17.0)
plt.ylim(-4.0, 17.0)
RRTDW.draw_robot(plt, start_conf, facecolor='r', edgecolor='r')
RRTDW.draw_robot(plt, goal_conf, facecolor='g', edgecolor='g')
for (point, size) in obstacle_list:
ax.add_patch(plt.Circle((point[0], point[1]), size / 2.0, color='k'))
colors = 'bgrcmykw'
for i, roadmap in enumerate(roadmap_list):
for (u, v) in roadmap.edges:
plt.plot(roadmap.nodes[u]['conf'][0], roadmap.nodes[u]['conf'][1], 'o' + colors[i])
plt.plot(roadmap.nodes[v]['conf'][0], roadmap.nodes[v]['conf'][1], 'o' + colors[i])
plt.plot([roadmap.nodes[u]['conf'][0], roadmap.nodes[v]['conf'][0]],
[roadmap.nodes[u]['conf'][1], roadmap.nodes[v]['conf'][1]], '-' + colors[i])
if near_rand_conf_pair is not None:
plt.plot([near_rand_conf_pair[0][0], near_rand_conf_pair[1][0]],
[near_rand_conf_pair[0][1], near_rand_conf_pair[1][1]], "--k")
RRTDW.draw_robot(plt, near_rand_conf_pair[0], facecolor='grey', edgecolor='g')
RRTDW.draw_robot(plt, near_rand_conf_pair[1], facecolor='grey', edgecolor='c')
if new_conf is not None:
RRTDW.draw_robot(plt, new_conf, facecolor='grey', edgecolor='c')
if smoothed_path is not None:
plt.plot([conf[0] for conf in smoothed_path], [conf[1] for conf in smoothed_path], linewidth=7,
linestyle='-', color='c')
if shortcut is not None:
plt.plot([conf[0] for conf in shortcut], [conf[1] for conf in shortcut], linewidth=4, linestyle='--',
color='r')
if not hasattr(RRTDW, 'img_counter'):
RRTDW.img_counter = 0
else:
RRTDW.img_counter += 1
# plt.savefig(str( RRT.img_counter)+'.jpg')
if delay_time > 0:
plt.pause(delay_time)
# plt.waitforbuttonpress()
if __name__ == '__main__':
import robot_sim._kinematics.jlchain as jl
import robot_sim.robots.robot_interface as ri
class DWCARBOT(ri.RobotInterface):
def __init__(self, pos=np.zeros(3), rotmat=np.eye(3), name='TwoWheelCarBot'):
super().__init__(pos=pos, rotmat=rotmat, name=name)
self.jlc = jl.JLChain(homeconf=np.zeros(3), name='XYBot')
self.jlc.jnts[1]['type'] = 'prismatic'
self.jlc.jnts[1]['loc_motionax'] = np.array([1, 0, 0])
self.jlc.jnts[1]['loc_pos'] = np.zeros(3)
self.jlc.jnts[1]['motion_rng'] = [-2.0, 15.0]
self.jlc.jnts[2]['type'] = 'prismatic'
self.jlc.jnts[2]['loc_motionax'] = np.array([0, 1, 0])
self.jlc.jnts[2]['loc_pos'] = np.zeros(3)
self.jlc.jnts[2]['motion_rng'] = [-2.0, 15.0]
self.jlc.jnts[3]['loc_motionax'] = np.array([0, 0, 1])
self.jlc.jnts[3]['loc_pos'] = np.zeros(3)
self.jlc.jnts[3]['motion_rng'] = [-math.pi, math.pi]
self.jlc.reinitialize()
def fk(self, component_name='all', jnt_values=np.zeros(3)):
if component_name != 'all':
raise ValueError("Only support hnd_name == 'all'!")
self.jlc.fk(jnt_values)
def rand_conf(self, component_name='all'):
if component_name != 'all':
raise ValueError("Only support hnd_name == 'all'!")
return self.jlc.rand_conf()
def get_jntvalues(self, component_name='all'):
if component_name != 'all':
raise ValueError("Only support hnd_name == 'all'!")
return self.jlc.get_jnt_values()
def is_collided(self, obstacle_list=[], otherrobot_list=[]):
for (obpos, size) in obstacle_list:
dist = np.linalg.norm(np.asarray(obpos) - self.get_jntvalues()[:2])
if dist <= size / 2.0:
return True # collision
return False # safe
# ====Search Path with RRT====
obstacle_list = [
((5, 5), 3),
((3, 6), 3),
((3, 8), 3),
((3, 10), 3),
((7, 5), 3),
((9, 5), 3),
((10, 5), 3)
] # [x,y,size]
# Set Initial parameters
robot = DWCARBOT()
rrtdw = RRTDW(robot)
path = rrtdw.plan(start_conf=np.array([0, 0, 0]), goal_conf=np.array([6, 9, 0]), obstacle_list=obstacle_list,
ext_dist=1, rand_rate=70, max_time=300, component_name='all', animation=True)
# plt.show()
# nx.draw(rrt.roadmap, with_labels=True, font_weight='bold')
# plt.show()
# import time
# total_t = 0
# for i in range(1):
# tic = time.time()
# path, sampledpoints = rrt.motion(obstaclelist=obstaclelist, animation=True)
# toc = time.time()
# total_t = total_t + toc - tic
# print(total_t)
# Draw final path
print(path)
rrtdw.draw_wspace([rrtdw.roadmap], rrtdw.start_conf, rrtdw.goal_conf, obstacle_list, delay_time=0)
for conf in path:
RRTDW.draw_robot(plt, conf, edgecolor='r')
# pathsm = smoother.pathsmoothing(path, rrt, 30)
# plt.plot([point[0] for point in pathsm], [point[1] for point in pathsm], '-r')
# plt.pause(0.001) # Need for Mac
plt.show()
| [
"matplotlib.pyplot.clf",
"numpy.argmin",
"numpy.linalg.norm",
"matplotlib.pyplot.gca",
"basis.robot_math.unit_vector",
"random.randint",
"networkx.shortest_path",
"numpy.linspace",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"math.ceil",
"matplotlib.pyplot.ylim",
"numpy.asarray",
... | [((16325, 16335), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16333, 16335), True, 'import matplotlib.pyplot as plt\n'), ((291, 301), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (299, 301), True, 'import networkx as nx\n'), ((1696, 1722), 'numpy.array', 'np.array', (['nodes_value_list'], {}), '(nodes_value_list)\n', (1704, 1722), True, 'import numpy as np\n'), ((1749, 1794), 'numpy.linalg.norm', 'np.linalg.norm', (['(conf_array - new_conf)'], {'axis': '(1)'}), '(conf_array - new_conf, axis=1)\n', (1763, 1794), True, 'import numpy as np\n'), ((1818, 1844), 'numpy.argmin', 'np.argmin', (['diff_conf_array'], {}), '(diff_conf_array)\n', (1827, 1844), True, 'import numpy as np\n'), ((2214, 2271), 'basis.robot_math.unit_vector', 'rm.unit_vector', (['(conf2[:2] - conf1[:2])'], {'toggle_length': '(True)'}), '(conf2[:2] - conf1[:2], toggle_length=True)\n', (2228, 2271), True, 'import basis.robot_math as rm\n'), ((2708, 2759), 'numpy.array', 'np.array', (['[conf1[0], conf1[1], translational_theta]'], {}), '([conf1[0], conf1[1], translational_theta])\n', (2716, 2759), True, 'import numpy as np\n'), ((2790, 2828), 'numpy.linspace', 'np.linspace', (['conf1', 'linear_conf1', 'nval'], {}), '(conf1, linear_conf1, nval)\n', (2801, 2828), True, 'import numpy as np\n'), ((2864, 2889), 'math.ceil', 'math.ceil', (['(len / ext_dist)'], {}), '(len / ext_dist)\n', (2873, 2889), False, 'import math\n'), ((2913, 2964), 'numpy.array', 'np.array', (['[conf2[0], conf2[1], translational_theta]'], {}), '([conf2[0], conf2[1], translational_theta])\n', (2921, 2964), True, 'import numpy as np\n'), ((2995, 3040), 'numpy.linspace', 'np.linspace', (['linear_conf1', 'linear_conf2', 'nval'], {}), '(linear_conf1, linear_conf2, nval)\n', (3006, 3040), True, 'import numpy as np\n'), ((3247, 3285), 'numpy.linspace', 'np.linspace', (['linear_conf2', 'conf2', 'nval'], {}), '(linear_conf2, conf2, nval)\n', (3258, 3285), True, 'import numpy as np\n'), ((3307, 3381), 'numpy.vstack', 'np.vstack', (['(conf1_angular_arary, conf12_linear_arary, conf2_angular_arary)'], {}), '((conf1_angular_arary, conf12_linear_arary, conf2_angular_arary))\n', (3316, 3381), True, 'import numpy as np\n'), ((5213, 5245), 'numpy.linalg.norm', 'np.linalg.norm', (['(conf - goal_conf)'], {}), '(conf - goal_conf)\n', (5227, 5245), True, 'import numpy as np\n'), ((5430, 5477), 'networkx.shortest_path', 'nx.shortest_path', (['self.roadmap', '"""start"""', '"""goal"""'], {}), "(self.roadmap, 'start', 'goal')\n", (5446, 5477), True, 'import networkx as nx\n'), ((8046, 8057), 'time.time', 'time.time', ([], {}), '()\n', (8055, 8057), False, 'import time\n'), ((9978, 9987), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9985, 9987), True, 'import matplotlib.pyplot as plt\n'), ((10964, 10973), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10971, 10973), True, 'import matplotlib.pyplot as plt\n'), ((10987, 10996), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10994, 10996), True, 'import matplotlib.pyplot as plt\n'), ((11043, 11057), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (11051, 11057), True, 'import matplotlib.pyplot as plt\n'), ((11066, 11086), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-4.0)', '(17.0)'], {}), '(-4.0, 17.0)\n', (11074, 11086), True, 'import matplotlib.pyplot as plt\n'), ((11095, 11115), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-4.0)', '(17.0)'], {}), '(-4.0, 17.0)\n', (11103, 11115), True, 'import matplotlib.pyplot as plt\n'), ((791, 812), 'random.randint', 'random.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (805, 812), False, 'import random\n'), ((2621, 2683), 'math.ceil', 'math.ceil', (['(conf1_theta_to_translational_theta / angle_ext_dist)'], {}), '(conf1_theta_to_translational_theta / angle_ext_dist)\n', (2630, 2683), False, 'import math\n'), ((3153, 3215), 'math.ceil', 'math.ceil', (['(translational_theta_to_conf2_theta / angle_ext_dist)'], {}), '(translational_theta_to_conf2_theta / angle_ext_dist)\n', (3162, 3215), False, 'import math\n'), ((8110, 8121), 'time.time', 'time.time', ([], {}), '()\n', (8119, 8121), False, 'import time\n'), ((10073, 10138), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x, y)', '(0.5)'], {'edgecolor': 'edgecolor', 'facecolor': 'facecolor'}), '((x, y), 0.5, edgecolor=edgecolor, facecolor=facecolor)\n', (10083, 10138), True, 'import matplotlib.pyplot as plt\n'), ((11948, 12080), 'matplotlib.pyplot.plot', 'plt.plot', (['[near_rand_conf_pair[0][0], near_rand_conf_pair[1][0]]', '[near_rand_conf_pair[0][1], near_rand_conf_pair[1][1]]', '"""--k"""'], {}), "([near_rand_conf_pair[0][0], near_rand_conf_pair[1][0]], [\n near_rand_conf_pair[0][1], near_rand_conf_pair[1][1]], '--k')\n", (11956, 12080), True, 'import matplotlib.pyplot as plt\n'), ((12439, 12564), 'matplotlib.pyplot.plot', 'plt.plot', (['[conf[0] for conf in smoothed_path]', '[conf[1] for conf in smoothed_path]'], {'linewidth': '(7)', 'linestyle': '"""-"""', 'color': '"""c"""'}), "([conf[0] for conf in smoothed_path], [conf[1] for conf in\n smoothed_path], linewidth=7, linestyle='-', color='c')\n", (12447, 12564), True, 'import matplotlib.pyplot as plt\n'), ((12627, 12743), 'matplotlib.pyplot.plot', 'plt.plot', (['[conf[0] for conf in shortcut]', '[conf[1] for conf in shortcut]'], {'linewidth': '(4)', 'linestyle': '"""--"""', 'color': '"""r"""'}), "([conf[0] for conf in shortcut], [conf[1] for conf in shortcut],\n linewidth=4, linestyle='--', color='r')\n", (12635, 12743), True, 'import matplotlib.pyplot as plt\n'), ((12981, 13002), 'matplotlib.pyplot.pause', 'plt.pause', (['delay_time'], {}), '(delay_time)\n', (12990, 13002), True, 'import matplotlib.pyplot as plt\n'), ((13237, 13248), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (13245, 13248), True, 'import numpy as np\n'), ((13257, 13266), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (13263, 13266), True, 'import numpy as np\n'), ((13524, 13543), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (13532, 13543), True, 'import numpy as np\n'), ((13586, 13597), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (13594, 13597), True, 'import numpy as np\n'), ((13754, 13773), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (13762, 13773), True, 'import numpy as np\n'), ((13816, 13827), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (13824, 13827), True, 'import numpy as np\n'), ((13933, 13952), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (13941, 13952), True, 'import numpy as np\n'), ((13995, 14006), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (14003, 14006), True, 'import numpy as np\n'), ((14163, 14174), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (14171, 14174), True, 'import numpy as np\n'), ((15386, 15405), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (15394, 15405), True, 'import numpy as np\n'), ((15417, 15436), 'numpy.array', 'np.array', (['[6, 9, 0]'], {}), '([6, 9, 0])\n', (15425, 15436), True, 'import numpy as np\n'), ((2354, 2370), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2362, 2370), True, 'import numpy as np\n'), ((4257, 4281), 'random.randint', 'random.randint', (['(0)', '(1e+16)'], {}), '(0, 1e+16)\n', (4271, 4281), False, 'import random\n'), ((5498, 5519), 'operator.itemgetter', 'itemgetter', (['*nid_path'], {}), '(*nid_path)\n', (5508, 5519), False, 'from operator import itemgetter\n'), ((9113, 9152), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.roadmap', 'mapping'], {}), '(self.roadmap, mapping)\n', (9129, 9152), True, 'import networkx as nx\n'), ((10190, 10209), 'math.degrees', 'math.degrees', (['theta'], {}), '(theta)\n', (10202, 10209), False, 'import math\n'), ((10275, 10294), 'math.degrees', 'math.degrees', (['theta'], {}), '(theta)\n', (10287, 10294), False, 'import math\n'), ((10426, 10445), 'math.degrees', 'math.degrees', (['theta'], {}), '(theta)\n', (10438, 10445), False, 'import math\n'), ((10512, 10531), 'math.degrees', 'math.degrees', (['theta'], {}), '(theta)\n', (10524, 10531), False, 'import math\n'), ((11328, 11383), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(point[0], point[1])', '(size / 2.0)'], {'color': '"""k"""'}), "((point[0], point[1]), size / 2.0, color='k')\n", (11338, 11383), True, 'import matplotlib.pyplot as plt\n'), ((11521, 11608), 'matplotlib.pyplot.plot', 'plt.plot', (["roadmap.nodes[u]['conf'][0]", "roadmap.nodes[u]['conf'][1]", "('o' + colors[i])"], {}), "(roadmap.nodes[u]['conf'][0], roadmap.nodes[u]['conf'][1], 'o' +\n colors[i])\n", (11529, 11608), True, 'import matplotlib.pyplot as plt\n'), ((11621, 11708), 'matplotlib.pyplot.plot', 'plt.plot', (["roadmap.nodes[v]['conf'][0]", "roadmap.nodes[v]['conf'][1]", "('o' + colors[i])"], {}), "(roadmap.nodes[v]['conf'][0], roadmap.nodes[v]['conf'][1], 'o' +\n colors[i])\n", (11629, 11708), True, 'import matplotlib.pyplot as plt\n'), ((11721, 11871), 'matplotlib.pyplot.plot', 'plt.plot', (["[roadmap.nodes[u]['conf'][0], roadmap.nodes[v]['conf'][0]]", "[roadmap.nodes[u]['conf'][1], roadmap.nodes[v]['conf'][1]]", "('-' + colors[i])"], {}), "([roadmap.nodes[u]['conf'][0], roadmap.nodes[v]['conf'][0]], [\n roadmap.nodes[u]['conf'][1], roadmap.nodes[v]['conf'][1]], '-' + colors[i])\n", (11729, 11871), True, 'import matplotlib.pyplot as plt\n'), ((13399, 13410), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (13407, 13410), True, 'import numpy as np\n'), ((14886, 14903), 'numpy.asarray', 'np.asarray', (['obpos'], {}), '(obpos)\n', (14896, 14903), True, 'import numpy as np\n')] |
import numpy as np
import cv2
cap = cv2.VideoCapture("C:\\Users\\prana\\Desktop\\droplet.mov")
fourcc = cv2.VideoWriter_fourcc(*'DVIX')
#frameVid = cv2.VideoWriter('frame.avi',-1, 20.0, (640,480))
#maskVid = cv2.VideoWriter('mask.avi',-1, 20.0, (640,480))
#resVid = cv2.VideoWriter('res.avi',-1, 20.0, (640,480))
#cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([38,50,50])
upper_blue = np.array([130,255,255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
res = cv2.bitwise_and(frame,frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
if ret == True:
#frameVid.write(frame)
#maskVid.write(mask)
#resVid.write(res)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cv2.destroyAllWindows()
cap.release()
#frameVid.release()
#maskVid.release()
#resVid.release()
| [
"cv2.VideoWriter_fourcc",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.inRange"
] | [((40, 98), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""C:\\\\Users\\\\prana\\\\Desktop\\\\droplet.mov"""'], {}), "('C:\\\\Users\\\\prana\\\\Desktop\\\\droplet.mov')\n", (56, 98), False, 'import cv2\n'), ((109, 140), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DVIX'"], {}), "(*'DVIX')\n", (131, 140), False, 'import cv2\n'), ((958, 981), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (979, 981), False, 'import cv2\n'), ((416, 454), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (428, 454), False, 'import cv2\n'), ((475, 497), 'numpy.array', 'np.array', (['[38, 50, 50]'], {}), '([38, 50, 50])\n', (483, 497), True, 'import numpy as np\n'), ((514, 539), 'numpy.array', 'np.array', (['[130, 255, 255]'], {}), '([130, 255, 255])\n', (522, 539), True, 'import numpy as np\n'), ((556, 596), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_blue', 'upper_blue'], {}), '(hsv, lower_blue, upper_blue)\n', (567, 596), False, 'import cv2\n'), ((614, 654), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (629, 654), False, 'import cv2\n'), ((666, 692), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (676, 692), False, 'import cv2\n'), ((697, 721), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (707, 721), False, 'import cv2\n'), ((726, 748), 'cv2.imshow', 'cv2.imshow', (['"""res"""', 'res'], {}), "('res', res)\n", (736, 748), False, 'import cv2\n'), ((871, 885), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (882, 885), False, 'import cv2\n')] |
import cv2
import numpy as np
drawing = False # true if mouse is pressed
mode = True # if True, draw rectangle. Press 'm' to toggle to curve
ix, iy = -1, -1
def draw_circle(event, x, y, flags, param):
global ix, iy, drawing, mode
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0, 0, 255), -1)
if __name__ == '__main__':
events = [i for i in dir(cv2) if 'EVENT' in i]
# Create a black image, a window and bind the function to window
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_circle)
while (1):
cv2.imshow('image', img)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
print(events)
| [
"cv2.circle",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.setMouseCallback",
"cv2.rectangle",
"cv2.imshow",
"cv2.namedWindow"
] | [((951, 984), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (959, 984), True, 'import numpy as np\n'), ((989, 1013), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (1004, 1013), False, 'import cv2\n'), ((1018, 1060), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'draw_circle'], {}), "('image', draw_circle)\n", (1038, 1060), False, 'import cv2\n'), ((1173, 1196), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1194, 1196), False, 'import cv2\n'), ((1085, 1109), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1095, 1109), False, 'import cv2\n'), ((1121, 1136), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (1132, 1136), False, 'import cv2\n'), ((435, 488), 'cv2.rectangle', 'cv2.rectangle', (['img', '(ix, iy)', '(x, y)', '(0, 255, 0)', '(-1)'], {}), '(img, (ix, iy), (x, y), (0, 255, 0), -1)\n', (448, 488), False, 'import cv2\n'), ((523, 566), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(img, (x, y), 5, (0, 0, 255), -1)\n', (533, 566), False, 'import cv2\n'), ((667, 720), 'cv2.rectangle', 'cv2.rectangle', (['img', '(ix, iy)', '(x, y)', '(0, 255, 0)', '(-1)'], {}), '(img, (ix, iy), (x, y), (0, 255, 0), -1)\n', (680, 720), False, 'import cv2\n'), ((747, 790), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(img, (x, y), 5, (0, 0, 255), -1)\n', (757, 790), False, 'import cv2\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import threading
import matplotlib.pyplot as plt
import numpy as np
import re
import sys
import time
import math
#Detector de leitura (Elmadjian, 2015)
#------------------------------------
class Detector (threading.Thread):
def __init__(self, thresh, cv):
threading.Thread.__init__(self)
self.curr_x = None
self.curr_y = None
self.curr_t = None
self.next_x = None
self.next_y = None
self.next_t = None
self.thresh = thresh
self.state = 0
self.ant_saccade = False
self.stop = False
self.cv = cv
self.elapsed = time.time()
self.cnt_yes = 0
self.cnt_no = 0
self.cnt_total = 0
def run(self):
self.detect(self.cv)
def storeValues(self, next_point):
self.next_x = next_point[0]
self.next_y = next_point[1]
self.next_t = next_point[2]
def changeState(self, changeValue):
self.state += changeValue
if self.state > 100:
self.state = 100
elif self.state < -30:
self.state = -30
def changePercentage(self, var, value):
var += value
if var > 100:
var = 100
elif var < 15:
var = 15
return var
def checkCombination(self, buff):
if buff[0] and buff[1] and buff[2]:
return True
if buff[0] and buff[1] and not buff[2]:
return True
if buff[0] and not buff[1] and buff[2]:
return True
if not buff[0] and buff[1] and buff[2]:
return True
return False
def detect(self, cv):
prev_x = 0
prev_y = 0
prev_t = 0.000000001
diff_cnt = 0
reading_buff = []
yaxs_mov = np.array([])
xaxs_mov = np.array([])
global dxlist
global dylist
global timelist
global xlist
global ylist
global xdetected
global tdetected
global xnot_related
global tnot_related
global read_on
global skim_on
short_mov = 0
reading = 50
skimming = 50
finite_diff = 0.1
finite_cnt = 0
while True:
with cv:
cv.wait()
if self.stop:
break
dx = (self.next_x - prev_x) / finite_diff
dy = (self.next_y - prev_y) / finite_diff
yaxs_mov = np.append(yaxs_mov, [prev_y])
xaxs_mov = np.append(xaxs_mov, [prev_x])
finite_cnt += 0.1
timelist.append(finite_cnt)
xlist.append(self.next_x)
ylist.append(self.next_y)
dxlist.append(dx)
dylist.append(dy)
#read forward
if 0.15 < dx < 1.5 and -0.5 < dy < 0.5:
short_mov += 1
reading_buff.append(True)
if short_mov == 1:
ini = prev_x
if short_mov >= 1:
xdetected.append(self.next_x)
tdetected.append(finite_cnt)
#regression
elif dx < -2.0 and -1.0 < dy < 0.0:
reading_buff.append(False)
xdetected.append(self.next_x)
tdetected.append(finite_cnt)
if len(yaxs_mov) > 1 and short_mov >= 1:
criteria = np.ptp(xaxs_mov) * short_mov
xaxs_mov = np.array([])
yaxs_mov = np.array([])
short_mov = 0
if criteria < 2:
skimming = self.changePercentage(skimming, 10)
reading = self.changePercentage(reading, -10)
else:
skimming = self.changePercentage(skimming, -10)
reading = self.changePercentage(reading, 10)
#fixations
elif -0.15 < dx < 0.15 and -0.2 < dy < 0.2:
pass
#unrelated pattern
else:
self.changeState(-10)
xnot_related.append(self.next_x)
tnot_related.append(finite_cnt)
#validating window
if len(reading_buff) == 3:
if self.checkCombination(reading_buff):
self.changeState(20)
else:
pass
#self.changeState(-5)
reading_buff.pop(0)
#record state
if self.state >= self.thresh:
self.cnt_yes += 1
read_on.append(finite_cnt)
#print("time:", finite_cnt, "state:", self.state)
prev_x = self.next_x
prev_y = self.next_y
prev_t = self.next_t
self.cnt_total += 1
#Lê o arquivo de entrada
#-----------------------
class FileReader:
def __init__(self, filename):
self.x = []
self.y = []
self.time = []
self.values = []
self.readFile(filename)
def readFile(self, filename):
pattern = re.compile("\d+.?\d+")
with open(filename, 'r') as sample:
for line in sample:
group = pattern.findall(line)
if group:
x = float(group[0])
y = float(group[1])
t = float(group[2])
self.x.append(x)
self.y.append(y)
self.time.append(t)
self.values.append([x,y,t])
#------------------------
if __name__ == '__main__':
fr = FileReader(sys.argv[1])
cv = threading.Condition()
detector = Detector(30, cv)
detector.start()
timelist = []
dxlist = []
dylist = []
xlist = []
ylist = []
xdetected = []
tdetected = []
read_on = []
skim_on = []
xnot_related = []
tnot_related = []
for i in range(len(fr.values) - 1):
#detector.storeValues(fr.x.pop(0), fr.y.pop(0), fr.time.pop(0))
detector.storeValues(fr.values.pop(0))#, fr.values[0])
with cv:
cv.notify_all()
time.sleep(0.0001)
#fr.plota()
detector.stop = True
print("total:", detector.cnt_total)
print("found:", detector.cnt_yes)
print("not found:", detector.cnt_no)
with cv:
cv.notify_all()
detector.join()
#plot everything:
# plt.plot(timelist, dxlist, 'bo-')
# plt.grid()
# plt.show()
# plt.plot(timelist, dylist, 'ro-')
# plt.grid()
# plt.show()
plt.plot(timelist, xlist, 'bo-')
plt.plot(timelist, ylist, 'ro-')
plt.plot(tdetected, xdetected, 'yo-')
plt.plot(tnot_related, xnot_related, 'gs')
last = timelist[-1]
ceil = max(max(xlist), max(ylist))
#print(read_on)
for i in range(len(read_on) - 1):
if read_on[i+1] - read_on[i] <= 0.15:
plt.axhspan(0.0, ceil, read_on[i]/last, read_on[i+1]/last, edgecolor='none', facecolor='y', alpha=0.5)
plt.ylim(0, max(ylist))
plt.xlim(0, max(timelist))
plt.grid()
plt.show() | [
"threading.Thread.__init__",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.ptp",
"threading.Condition",
"time.time",
"time.sleep",
"numpy.append",
"matplotlib.pyplot.axhspan",
"numpy.array",
"matplotlib.pyplot.grid",
"re.compile"
] | [((5717, 5738), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (5736, 5738), False, 'import threading\n'), ((6632, 6664), 'matplotlib.pyplot.plot', 'plt.plot', (['timelist', 'xlist', '"""bo-"""'], {}), "(timelist, xlist, 'bo-')\n", (6640, 6664), True, 'import matplotlib.pyplot as plt\n'), ((6669, 6701), 'matplotlib.pyplot.plot', 'plt.plot', (['timelist', 'ylist', '"""ro-"""'], {}), "(timelist, ylist, 'ro-')\n", (6677, 6701), True, 'import matplotlib.pyplot as plt\n'), ((6706, 6743), 'matplotlib.pyplot.plot', 'plt.plot', (['tdetected', 'xdetected', '"""yo-"""'], {}), "(tdetected, xdetected, 'yo-')\n", (6714, 6743), True, 'import matplotlib.pyplot as plt\n'), ((6748, 6790), 'matplotlib.pyplot.plot', 'plt.plot', (['tnot_related', 'xnot_related', '"""gs"""'], {}), "(tnot_related, xnot_related, 'gs')\n", (6756, 6790), True, 'import matplotlib.pyplot as plt\n'), ((7136, 7146), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7144, 7146), True, 'import matplotlib.pyplot as plt\n'), ((7151, 7161), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7159, 7161), True, 'import matplotlib.pyplot as plt\n'), ((316, 347), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (341, 347), False, 'import threading\n'), ((665, 676), 'time.time', 'time.time', ([], {}), '()\n', (674, 676), False, 'import time\n'), ((1843, 1855), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1851, 1855), True, 'import numpy as np\n'), ((1875, 1887), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1883, 1887), True, 'import numpy as np\n'), ((5167, 5191), 're.compile', 're.compile', (['"""\\\\d+.?\\\\d+"""'], {}), "('\\\\d+.?\\\\d+')\n", (5177, 5191), False, 'import re\n'), ((6216, 6234), 'time.sleep', 'time.sleep', (['(0.0001)'], {}), '(0.0001)\n', (6226, 6234), False, 'import time\n'), ((2511, 2540), 'numpy.append', 'np.append', (['yaxs_mov', '[prev_y]'], {}), '(yaxs_mov, [prev_y])\n', (2520, 2540), True, 'import numpy as np\n'), ((2564, 2593), 'numpy.append', 'np.append', (['xaxs_mov', '[prev_x]'], {}), '(xaxs_mov, [prev_x])\n', (2573, 2593), True, 'import numpy as np\n'), ((6970, 7083), 'matplotlib.pyplot.axhspan', 'plt.axhspan', (['(0.0)', 'ceil', '(read_on[i] / last)', '(read_on[i + 1] / last)'], {'edgecolor': '"""none"""', 'facecolor': '"""y"""', 'alpha': '(0.5)'}), "(0.0, ceil, read_on[i] / last, read_on[i + 1] / last, edgecolor=\n 'none', facecolor='y', alpha=0.5)\n", (6981, 7083), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3522), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3518, 3522), True, 'import numpy as np\n'), ((3554, 3566), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3562, 3566), True, 'import numpy as np\n'), ((3450, 3466), 'numpy.ptp', 'np.ptp', (['xaxs_mov'], {}), '(xaxs_mov)\n', (3456, 3466), True, 'import numpy as np\n')] |
import sys
import unittest
import dask.array as da
import dask.distributed as dd
import numpy as np
import xarray as xr
# Import from directory structure if coverage test, or from installed
# packages otherwise
if "--cov" in str(sys.argv):
from src.geocat.comp import heat_index
else:
from geocat.comp import heat_index
class Test_heat_index(unittest.TestCase):
@classmethod
def setUpClass(cls):
# set up ground truths
cls.ncl_gt_1 = [
137.36142, 135.86795, 104.684456, 131.25621, 105.39449, 79.78999,
83.57511, 59.965, 30.
]
cls.ncl_gt_2 = [
68.585, 76.13114, 75.12854, 99.43573, 104.93261, 93.73293,
104.328705, 123.23398, 150.34001, 106.87023
]
cls.t1 = np.array([104, 100, 92, 92, 86, 80, 80, 60, 30])
cls.rh1 = np.array([55, 65, 60, 90, 90, 40, 75, 90, 50])
cls.t2 = np.array([70, 75, 80, 85, 90, 95, 100, 105, 110, 115])
cls.rh2 = np.array([10, 75, 15, 80, 65, 25, 30, 40, 50, 5])
# make client to reference in subsequent tests
cls.client = dd.Client()
def test_numpy_input(self):
assert np.allclose(heat_index(self.t1, self.rh1, False),
self.ncl_gt_1,
atol=0.005)
def test_multi_dimensional_input(self):
assert np.allclose(heat_index(self.t2.reshape(2, 5),
self.rh2.reshape(2, 5), True),
np.asarray(self.ncl_gt_2).reshape(2, 5),
atol=0.005)
def test_alt_coef(self):
assert np.allclose(heat_index(self.t2, self.rh2, True),
self.ncl_gt_2,
atol=0.005)
def test_float_input(self):
assert np.allclose(heat_index(80, 75), 83.5751, atol=0.005)
def test_list_input(self):
assert np.allclose(heat_index(self.t1.tolist(), self.rh1.tolist()),
self.ncl_gt_1,
atol=0.005)
def test_xarray_input(self):
t = xr.DataArray(self.t1)
rh = xr.DataArray(self.rh1)
assert np.allclose(heat_index(t, rh), self.ncl_gt_1, atol=0.005)
def test_alternate_xarray_tag(self):
t = xr.DataArray([15, 20])
rh = xr.DataArray([15, 20])
out = heat_index(t, rh)
assert out.tag == "NCL: heat_index_nws; (Steadman+t)*0.5"
def test_rh_warning(self):
self.assertWarns(UserWarning, heat_index, [50, 80, 90], [0.1, 0.2, 0.5])
def test_rh_valid(self):
self.assertRaises(ValueError, heat_index, [50, 80, 90], [-1, 101, 50])
def test_dask_unchunked_input(self):
t = da.from_array(self.t1)
rh = da.from_array(self.rh1)
out = self.client.submit(heat_index, t, rh).result()
assert np.allclose(out, self.ncl_gt_1, atol=0.005)
def test_dask_chunked_input(self):
t = da.from_array(self.t1, chunks='auto')
rh = da.from_array(self.rh1, chunks='auto')
out = self.client.submit(heat_index, t, rh).result()
assert np.allclose(out, self.ncl_gt_1, atol=0.005)
| [
"dask.distributed.Client",
"numpy.allclose",
"numpy.asarray",
"geocat.comp.heat_index",
"numpy.array",
"xarray.DataArray",
"dask.array.from_array"
] | [((775, 823), 'numpy.array', 'np.array', (['[104, 100, 92, 92, 86, 80, 80, 60, 30]'], {}), '([104, 100, 92, 92, 86, 80, 80, 60, 30])\n', (783, 823), True, 'import numpy as np\n'), ((842, 888), 'numpy.array', 'np.array', (['[55, 65, 60, 90, 90, 40, 75, 90, 50]'], {}), '([55, 65, 60, 90, 90, 40, 75, 90, 50])\n', (850, 888), True, 'import numpy as np\n'), ((907, 961), 'numpy.array', 'np.array', (['[70, 75, 80, 85, 90, 95, 100, 105, 110, 115]'], {}), '([70, 75, 80, 85, 90, 95, 100, 105, 110, 115])\n', (915, 961), True, 'import numpy as np\n'), ((980, 1029), 'numpy.array', 'np.array', (['[10, 75, 15, 80, 65, 25, 30, 40, 50, 5]'], {}), '([10, 75, 15, 80, 65, 25, 30, 40, 50, 5])\n', (988, 1029), True, 'import numpy as np\n'), ((1107, 1118), 'dask.distributed.Client', 'dd.Client', ([], {}), '()\n', (1116, 1118), True, 'import dask.distributed as dd\n'), ((2091, 2112), 'xarray.DataArray', 'xr.DataArray', (['self.t1'], {}), '(self.t1)\n', (2103, 2112), True, 'import xarray as xr\n'), ((2126, 2148), 'xarray.DataArray', 'xr.DataArray', (['self.rh1'], {}), '(self.rh1)\n', (2138, 2148), True, 'import xarray as xr\n'), ((2277, 2299), 'xarray.DataArray', 'xr.DataArray', (['[15, 20]'], {}), '([15, 20])\n', (2289, 2299), True, 'import xarray as xr\n'), ((2313, 2335), 'xarray.DataArray', 'xr.DataArray', (['[15, 20]'], {}), '([15, 20])\n', (2325, 2335), True, 'import xarray as xr\n'), ((2351, 2368), 'geocat.comp.heat_index', 'heat_index', (['t', 'rh'], {}), '(t, rh)\n', (2361, 2368), False, 'from geocat.comp import heat_index\n'), ((2711, 2733), 'dask.array.from_array', 'da.from_array', (['self.t1'], {}), '(self.t1)\n', (2724, 2733), True, 'import dask.array as da\n'), ((2747, 2770), 'dask.array.from_array', 'da.from_array', (['self.rh1'], {}), '(self.rh1)\n', (2760, 2770), True, 'import dask.array as da\n'), ((2849, 2892), 'numpy.allclose', 'np.allclose', (['out', 'self.ncl_gt_1'], {'atol': '(0.005)'}), '(out, self.ncl_gt_1, atol=0.005)\n', (2860, 2892), True, 'import numpy as np\n'), ((2945, 2982), 'dask.array.from_array', 'da.from_array', (['self.t1'], {'chunks': '"""auto"""'}), "(self.t1, chunks='auto')\n", (2958, 2982), True, 'import dask.array as da\n'), ((2996, 3034), 'dask.array.from_array', 'da.from_array', (['self.rh1'], {'chunks': '"""auto"""'}), "(self.rh1, chunks='auto')\n", (3009, 3034), True, 'import dask.array as da\n'), ((3113, 3156), 'numpy.allclose', 'np.allclose', (['out', 'self.ncl_gt_1'], {'atol': '(0.005)'}), '(out, self.ncl_gt_1, atol=0.005)\n', (3124, 3156), True, 'import numpy as np\n'), ((1179, 1215), 'geocat.comp.heat_index', 'heat_index', (['self.t1', 'self.rh1', '(False)'], {}), '(self.t1, self.rh1, False)\n', (1189, 1215), False, 'from geocat.comp import heat_index\n'), ((1637, 1672), 'geocat.comp.heat_index', 'heat_index', (['self.t2', 'self.rh2', '(True)'], {}), '(self.t2, self.rh2, True)\n', (1647, 1672), False, 'from geocat.comp import heat_index\n'), ((1815, 1833), 'geocat.comp.heat_index', 'heat_index', (['(80)', '(75)'], {}), '(80, 75)\n', (1825, 1833), False, 'from geocat.comp import heat_index\n'), ((2177, 2194), 'geocat.comp.heat_index', 'heat_index', (['t', 'rh'], {}), '(t, rh)\n', (2187, 2194), False, 'from geocat.comp import heat_index\n'), ((1500, 1525), 'numpy.asarray', 'np.asarray', (['self.ncl_gt_2'], {}), '(self.ncl_gt_2)\n', (1510, 1525), True, 'import numpy as np\n')] |
import numpy as np
def mask_nan_labels_np(labels, null_val=0.):
with np.errstate(divide='ignore', invalid='ignore'):
if np.isnan(null_val):
mask = ~np.isnan(labels)
else:
mask = np.not_equal(labels, null_val)
mask = mask.astype('float32')
mask /= np.mean(mask)
return mask
def masked_mse_np(preds, labels, null_val=np.nan):
with np.errstate(divide='ignore', invalid='ignore'):
mask = mask_nan_labels_np(labels=labels, null_val=null_val)
rmse = np.square(np.subtract(preds, labels)).astype('float32')
rmse = np.nan_to_num(rmse * mask)
return np.mean(rmse)
def masked_rmse_np(preds, labels, null_val=np.nan):
return np.sqrt(masked_mse_np(preds=preds, labels=labels, null_val=null_val))
def masked_mae_np(preds, labels, null_val=np.nan):
with np.errstate(divide='ignore', invalid='ignore'):
mask = mask_nan_labels_np(labels=labels, null_val=null_val)
mae = np.abs(np.subtract(preds, labels)).astype('float32')
mae = np.nan_to_num(mae * mask)
return np.mean(mae)
def masked_mape_np(preds, labels, null_val=np.nan):
with np.errstate(divide='ignore', invalid='ignore'):
mask = mask_nan_labels_np(labels=labels, null_val=null_val)
mape = np.abs(np.divide(np.subtract(preds, labels).astype('float32'), labels))
mape = np.nan_to_num(mask * mape)
return np.mean(mape)
def calculate_metrics(preds, labels, null_val=0.0):
"""
Calculate the MAE, MAPE, RMSE
:param df_pred:
:param df_test:
:param null_val:
:return:
"""
mape = masked_mape_np(preds, labels, null_val=null_val)
mae = masked_mae_np(preds, labels, null_val=null_val)
rmse = masked_rmse_np(preds, labels, null_val=null_val)
# Switched order according to the paper
return mae, rmse, mape
| [
"numpy.nan_to_num",
"numpy.subtract",
"numpy.isnan",
"numpy.errstate",
"numpy.not_equal",
"numpy.mean"
] | [((75, 121), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (86, 121), True, 'import numpy as np\n'), ((134, 152), 'numpy.isnan', 'np.isnan', (['null_val'], {}), '(null_val)\n', (142, 152), True, 'import numpy as np\n'), ((309, 322), 'numpy.mean', 'np.mean', (['mask'], {}), '(mask)\n', (316, 322), True, 'import numpy as np\n'), ((405, 451), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (416, 451), True, 'import numpy as np\n'), ((607, 633), 'numpy.nan_to_num', 'np.nan_to_num', (['(rmse * mask)'], {}), '(rmse * mask)\n', (620, 633), True, 'import numpy as np\n'), ((649, 662), 'numpy.mean', 'np.mean', (['rmse'], {}), '(rmse)\n', (656, 662), True, 'import numpy as np\n'), ((860, 906), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (871, 906), True, 'import numpy as np\n'), ((1057, 1082), 'numpy.nan_to_num', 'np.nan_to_num', (['(mae * mask)'], {}), '(mae * mask)\n', (1070, 1082), True, 'import numpy as np\n'), ((1098, 1110), 'numpy.mean', 'np.mean', (['mae'], {}), '(mae)\n', (1105, 1110), True, 'import numpy as np\n'), ((1174, 1220), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1185, 1220), True, 'import numpy as np\n'), ((1392, 1418), 'numpy.nan_to_num', 'np.nan_to_num', (['(mask * mape)'], {}), '(mask * mape)\n', (1405, 1418), True, 'import numpy as np\n'), ((1434, 1447), 'numpy.mean', 'np.mean', (['mape'], {}), '(mape)\n', (1441, 1447), True, 'import numpy as np\n'), ((224, 254), 'numpy.not_equal', 'np.not_equal', (['labels', 'null_val'], {}), '(labels, null_val)\n', (236, 254), True, 'import numpy as np\n'), ((174, 190), 'numpy.isnan', 'np.isnan', (['labels'], {}), '(labels)\n', (182, 190), True, 'import numpy as np\n'), ((546, 572), 'numpy.subtract', 'np.subtract', (['preds', 'labels'], {}), '(preds, labels)\n', (557, 572), True, 'import numpy as np\n'), ((997, 1023), 'numpy.subtract', 'np.subtract', (['preds', 'labels'], {}), '(preds, labels)\n', (1008, 1023), True, 'import numpy as np\n'), ((1322, 1348), 'numpy.subtract', 'np.subtract', (['preds', 'labels'], {}), '(preds, labels)\n', (1333, 1348), True, 'import numpy as np\n')] |
import json
import pprint
import argparse
import firebase_admin
import numpy as np
from copy import deepcopy
from firebase_admin import credentials, db, firestore
pp = pprint.PrettyPrinter(indent=4)
def valid_identifier(identifier):
splited = identifier.split(' ')
if len(splited) != 2 or not splited[0].isupper() or not splited[1].isdigit():
return False
return True
def is_X(identifier):
splited = identifier.split(' ')
if len(splited) != 2:
return False
num = splited[1]
idx = num.find('X')
while idx != -1:
num = num[:idx] + num[idx + 1:]
idx = num.find('X')
if num.isdigit(): return True
else: return False
def count_graph_size(cur, prerequisites, encounter_cls, counts, start=False):
if type(cur) == type(str()):
if cur in encounter_cls:
return
encounter_cls.add(cur)
counts[0] += 1
if cur in prerequisites:
count_graph_size(prerequisites[cur], prerequisites, encounter_cls, counts, start=True)
else:
if not start: counts[0] += 1
for obj in cur['courses']:
counts[1] += 1
count_graph_size(obj, prerequisites, encounter_cls, counts, start=False)
def get_level(pre):
level = 1
for element in pre['courses']:
if type(element) == type({}):
level = max(level, 1 + get_level(element))
return level
def prune(pre, threshold, level=0):
if level >= threshold:
i = 0
while i < len(pre['courses']):
if type(pre['courses'][i]) == type({}):
print ('prune')
del pre['courses'][i]
else: i += 1
else:
for element in pre['courses']:
if type(element) == type({}):
prune(element, threshold, level=level + 1)
def dfs_process_prerequisite(pre, invalid_list, id_set, counts):
if 'courses' not in pre or 'type' not in pre:
print ('invalid structure')
return None, 1
pre_list = pre['courses']
pre_type = pre['type']
process_list = []
for element in pre_list:
if type(element) == type(str()):
if valid_identifier(element):
process_list.append(element)
if element not in id_set:
print (element)
counts[1] += 1
else:
if is_X(element): invalid_list[0].append(element)
else: invalid_list[1].append(element)
counts[0] += 1
elif type(element) == type({}):
obj = dfs_process_prerequisite(element, invalid_list, id_set, counts)
if not obj: continue
if pre_type == obj['type'] or len(obj['courses']) == 1:
process_list.extend(obj['courses'])
process_list.append(obj)
else:
print ('WARNING: unexpect data structure in first level: %s' % type(element))
if len(process_list) == 0:
return None
if len(process_list) == 1 and type(process_list[0]) == type({}):
return process_list[0]
obj = { 'courses': process_list,
'type': pre_type}
return obj
def process_prerequisite(pre):
if 'courses' not in pre or 'type' not in pre:
return None
pre_list = pre['courses']
pre_type = pre['type']
process_list = []
invalid = []
element_count = 0
for element in pre_list:
if type(element) == type(str()):
if valid_identifier(element):
process_list.append(element)
else:
invalid.append(element)
element_count += 1
elif type(element) == type({}):
if 'courses' not in element or 'type' not in element:
continue
nest_list = element['courses']
nest_type = element['type']
nest_process_list = []
for nest_element in nest_list:
if type(nest_element) != type(str()):
print ('WARNING: unexpect data structure in second level: %s' % type(nest_element))
pp.pprint(pre)
continue
if valid_identifier(nest_element):
nest_process_list.append(nest_element)
if len(nest_process_list) == 0:
continue
if nest_type == pre_type or len(nest_process_list) == 1:
print ('Merge second level object to first level.')
process_list.extend(nest_process_list)
continue
process_list.append({'courses': nest_process_list,
'type': nest_type})
else:
print ('WARNING: unexpect data structure in first level: %s' % type(element))
if len(process_list) == 0:
return None
if len(process_list) == 1 and type(process_list[0]) == type({}):
return process_list[0]
return {'courses': process_list,
'type': pre_type}
def updatePrerequisite(filepath, update=False):
print ("Start updating prerequisites from %s" % filepath)
with open(filepath, 'r') as f:
courses = json.load(f)
if update:
prerequisites = db.reference('Prerequisites')
prerequisites.delete()
raw_pre = {}
processed_pre = {}
prune_pre = {}
invalid_list = [[], []]
pre_count = 0
counts = [0, 0]
id_set = set()
for c in courses:
id_set.add(c['identifier'])
for c in courses:
if 'prerequisites' in c:
pre_count += 1
raw_pre[c['identifier']] = c['prerequisites']
#processed = process_prerequisite(c['prerequisites'])
processed = dfs_process_prerequisite(c['prerequisites'], invalid_list, id_set, counts)
if not processed:
continue
processed_pre[c['identifier']] = processed
processed_copy = deepcopy(processed)
prune(processed, 1)
prune_pre[c['identifier']] = processed_copy
if update:
prerequisites.child(c['identifier']).set(processed)
size_diffs = []
biggest_raw, biggest_raw_cls, biggest_prune, biggest_prune_cls = 0, '', 0, ''
for cls in raw_pre.keys():
raw_counts = [0, 0]
count_graph_size(cls, raw_pre, set(), raw_counts)
raw_size = raw_counts[0] + raw_counts[1]
if raw_size > biggest_raw:
biggest_raw = raw_size
biggest_raw_cls = cls
processed_counts = [0, 0]
if cls in processed_pre:
count_graph_size(cls, processed_pre, set(), processed_counts)
processed_size = processed_counts[0] + processed_counts[1]
size_diffs.append((raw_size - processed_size) / raw_size)
prune_counts = [0, 0]
if cls in prune_pre:
count_graph_size(cls, prune_pre, set(), prune_counts)
prune_size = prune_counts[0] + prune_counts[1]
if prune_size > biggest_prune:
biggest_prune = prune_size
biggest_prune_cls = cls
element_count, element_not_in_id = counts[0], counts[1]
#print (invalid_list[0])
#print (invalid_list[1])
print ('Number of course need prerequisites: %d' % pre_count)
print ('Number of element: %d' % element_count)
print ('Number of invalid element: %d' % len(invalid_list[1]))
print ('Number of identifier with "X": %d' % len(invalid_list[0]))
print ('Number of element not in id: %d' % element_not_in_id)
print ('Biggest Raw Graph: %s, %d' % (biggest_raw_cls, biggest_raw))
print ('Biggest Prune Graph: %s, %d' % (biggest_prune_cls, biggest_prune))
print (size_diffs[19])
print ('Reduce Amount: %f (%f)' % (np.mean(size_diffs), np.std(size_diffs)))
#print (len(invalid_list) / element_count)
pre_map = create_map(processed_pre)
if update:
prerequisite_map = db.reference('prerequisite_map')
prerequisite_map.set(pre_map)
def create_map(prerequisites):
pre_map = {}
counts = {}
cycle_count = 0
for cls in prerequisites.keys():
encounter_cls = set()
#print ('Processing prerequisite map for %s' % cls)
counts[cls] = dfs(cls, prerequisites, encounter_cls, set())
if counts[cls] > 0:
cycle_count += 1
pre_map[cls] = list(encounter_cls)
print ('Percentages of Cycle: %f' % (cycle_count / len(counts)))
return pre_map
def dfs(cur, prerequisites, encounter_cls, path):
#print (cur)
count = 0
if type(cur) == type(str()):
if cur in prerequisites:
if cur in path:
#print ("WARNING: Cycle detected...")
return 1
if cur in encounter_cls:
return count
encounter_cls.add(cur)
path.add(cur)
count += dfs(prerequisites[cur], prerequisites, encounter_cls, path)
path.remove(cur)
else:
for obj in cur['courses']:
count += dfs(obj, prerequisites, encounter_cls, path)
return count
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filepath', help="The path to the course data file")
parser.add_argument('-u', '--update', action='store_true', help="If specified, update prerequisites in database")
args = parser.parse_args()
if args.update:
print ("Connecting to firebase database...")
cred = credentials.Certificate('./credential.json')
firebase_admin.initialize_app(cred, {'databaseURL': "https://buzzplan-d333f.firebaseio.com"})
print ("Done")
print ()
updatePrerequisite(args.filepath)
#print (count_graph_size(pre)) | [
"firebase_admin.credentials.Certificate",
"json.load",
"copy.deepcopy",
"argparse.ArgumentParser",
"numpy.std",
"pprint.PrettyPrinter",
"numpy.mean",
"firebase_admin.db.reference",
"firebase_admin.initialize_app"
] | [((171, 201), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (191, 201), False, 'import pprint\n'), ((7731, 7756), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7754, 7756), False, 'import argparse\n'), ((4317, 4329), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4326, 4329), False, 'import json\n'), ((4361, 4390), 'firebase_admin.db.reference', 'db.reference', (['"""Prerequisites"""'], {}), "('Prerequisites')\n", (4373, 4390), False, 'from firebase_admin import credentials, db, firestore\n'), ((6712, 6744), 'firebase_admin.db.reference', 'db.reference', (['"""prerequisite_map"""'], {}), "('prerequisite_map')\n", (6724, 6744), False, 'from firebase_admin import credentials, db, firestore\n'), ((8049, 8093), 'firebase_admin.credentials.Certificate', 'credentials.Certificate', (['"""./credential.json"""'], {}), "('./credential.json')\n", (8072, 8093), False, 'from firebase_admin import credentials, db, firestore\n'), ((8096, 8193), 'firebase_admin.initialize_app', 'firebase_admin.initialize_app', (['cred', "{'databaseURL': 'https://buzzplan-d333f.firebaseio.com'}"], {}), "(cred, {'databaseURL':\n 'https://buzzplan-d333f.firebaseio.com'})\n", (8125, 8193), False, 'import firebase_admin\n'), ((4952, 4971), 'copy.deepcopy', 'deepcopy', (['processed'], {}), '(processed)\n', (4960, 4971), False, 'from copy import deepcopy\n'), ((6554, 6573), 'numpy.mean', 'np.mean', (['size_diffs'], {}), '(size_diffs)\n', (6561, 6573), True, 'import numpy as np\n'), ((6575, 6593), 'numpy.std', 'np.std', (['size_diffs'], {}), '(size_diffs)\n', (6581, 6593), True, 'import numpy as np\n')] |
import numpy as np
import imcut.pycut as pspc
import matplotlib.pyplot as plt
# create data
data = np.random.rand(30, 30, 30)
data[10:20, 5:15, 3:13] += 1
data = data * 30
data = data.astype(np.int16)
# Make seeds
seeds = np.zeros([30, 30, 30])
seeds[13:17, 7:10, 5:11] = 1
seeds[0:5:, 0:10, 0:11] = 2
# Run
igc = pspc.ImageGraphCut(data, voxelsize=[1, 1, 1])
igc.set_seeds(seeds)
igc.run()
# Show results
colormap = plt.cm.get_cmap('brg')
colormap._init()
colormap._lut[:1:, 3] = 0
plt.imshow(data[:, :, 10], cmap='gray')
plt.contour(igc.segmentation[:, :, 10], levels=[0.5])
plt.imshow(igc.seeds[:, :, 10], cmap=colormap, interpolation='none')
plt.savefig("gc_example.png")
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"imcut.pycut.ImageGraphCut",
"matplotlib.pyplot.contour",
"numpy.random.rand",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.savefig"
] | [((100, 126), 'numpy.random.rand', 'np.random.rand', (['(30)', '(30)', '(30)'], {}), '(30, 30, 30)\n', (114, 126), True, 'import numpy as np\n'), ((224, 246), 'numpy.zeros', 'np.zeros', (['[30, 30, 30]'], {}), '([30, 30, 30])\n', (232, 246), True, 'import numpy as np\n'), ((317, 362), 'imcut.pycut.ImageGraphCut', 'pspc.ImageGraphCut', (['data'], {'voxelsize': '[1, 1, 1]'}), '(data, voxelsize=[1, 1, 1])\n', (335, 362), True, 'import imcut.pycut as pspc\n'), ((421, 443), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""brg"""'], {}), "('brg')\n", (436, 443), True, 'import matplotlib.pyplot as plt\n'), ((488, 527), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data[:, :, 10]'], {'cmap': '"""gray"""'}), "(data[:, :, 10], cmap='gray')\n", (498, 527), True, 'import matplotlib.pyplot as plt\n'), ((528, 581), 'matplotlib.pyplot.contour', 'plt.contour', (['igc.segmentation[:, :, 10]'], {'levels': '[0.5]'}), '(igc.segmentation[:, :, 10], levels=[0.5])\n', (539, 581), True, 'import matplotlib.pyplot as plt\n'), ((582, 650), 'matplotlib.pyplot.imshow', 'plt.imshow', (['igc.seeds[:, :, 10]'], {'cmap': 'colormap', 'interpolation': '"""none"""'}), "(igc.seeds[:, :, 10], cmap=colormap, interpolation='none')\n", (592, 650), True, 'import matplotlib.pyplot as plt\n'), ((651, 680), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gc_example.png"""'], {}), "('gc_example.png')\n", (662, 680), True, 'import matplotlib.pyplot as plt\n'), ((681, 691), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (689, 691), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
class ReverseScalar():
def __init__(self, val: float):
self._val = val
self._gradient = 1
self._children = {}
def get(self):
return self._val, self.compute_gradient()
def compute_gradient(self):
if len(self._children.keys()) > 0:
gradient = 0
for node, val in self._children.items():
gradient += val*node.compute_gradient()
return gradient
#return sum([val * node.compute_gradient() for node, val in self._children.items()])
#except TypeError:
# return self._gradient
else:
return 1
def __add__(self, other):
try: # If scalar
child = ReverseScalar(self._val+other._val)
child._children[self] = 1
child._children[other] = 1
return child
except AttributeError: # If constant
child = ReverseScalar(self._val+other)
child._children[self] = 1
return child
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
try: # If scalar
child = ReverseScalar(self._val-other._val)
child._children[self] = 1
child._children[other] = -1
return child
except AttributeError: # If constant
child = ReverseScalar(self._val-other)
child._children[self] = 1
return child
def __rsub__(self, other):
try: # If scalar
child = ReverseScalar(other._val- self._val)
child._children[self] = -1
child._children[other] = 1
return child
except AttributeError: # If constant
child = ReverseScalar(other-self._val)
child._children[self] = -1
return child
def __mul__(self, other):
try: # If scalar
child = ReverseScalar(self._val*other._val)
child._children[self] = other._val
child._children[other] = self._val
return child
except AttributeError: # If constant
child = ReverseScalar(self._val*other)
child._children[self] = other
return child
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
try: # If scalar
child = ReverseScalar(self._val/other._val)
child._children[self] = 1/other._val
child._children[other] = -self._val/other._val**2
return child
except AttributeError: # If constant
child = ReverseScalar(self._val/other)
child._children[self] = 1/other
return child
def __rtruediv__(self, other):
# Might be wrong
try: # If scalar
child = ReverseScalar(other._val/self._val)
child._children[self] = -other._val/self._val**2
child._children[other] = 1/self._val
return child
except AttributeError: # If constant
child = ReverseScalar(other/self._val)
child._children[self] = -other/self._val**2
return child
def __pow__(self, other):
try: # If scalar
child = ReverseScalar(self._val**other._val)
child._children[self] = other._val*self._val**(other._val-1)
child._children[other] = np.log(self._val)*self._val**other._val
return child
except AttributeError: # If constant
child = ReverseScalar(self._val**other)
child._children[self] = other*self._val**(other-1)
return child
def __rpow__(self, other):
child = ReverseScalar(other**self._val)
child._children[self] = np.log(other)*other**self._val
return child
def __neg__(self):
child = ReverseScalar(-self._val)
child._children[self] = -1
return child
| [
"numpy.log"
] | [((4035, 4048), 'numpy.log', 'np.log', (['other'], {}), '(other)\n', (4041, 4048), True, 'import numpy as np\n'), ((3646, 3663), 'numpy.log', 'np.log', (['self._val'], {}), '(self._val)\n', (3652, 3663), True, 'import numpy as np\n')] |
import dolfin as df
import numpy as np
import os
from . import *
from common.io import mpi_is_root, load_mesh
from common.bcs import Fixed, Charged
from .porous import Obstacles
from mpi4py import MPI
__author__ = "<NAME>"
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
class PeriodicBoundary(df.SubDomain):
# Left boundary is target domain
def __init__(self, Lx, Ly):
self.Lx = Lx
self.Ly = Ly
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
return bool((df.near(x[0], -self.Lx/2) or
df.near(x[1], -self.Ly/2)) and
(not ((df.near(x[0], -self.Lx/2) and
df.near(x[1], self.Ly/2)) or
(df.near(x[0], self.Lx/2) and
df.near(x[1], -self.Ly/2))))
and on_boundary)
def map(self, x, y):
if df.near(x[0], self.Lx/2) and df.near(x[1], self.Ly/2):
y[0] = x[0] - self.Lx
y[1] = x[1] - self.Ly
elif df.near(x[0], self.Lx/2):
y[0] = x[0] - self.Lx
y[1] = x[1]
else:
y[0] = x[0]
y[1] = x[1] - self.Ly
def problem():
info_cyan("Fully periodic porous media flow with electrohydrodynamics.")
solutes = [["c_p", 1, 0.01, 0.01, 0., 0.],
["c_m", -1, 0.01, 0.01, 0., 0.]]
# Default parameters to be loaded unless starting from checkpoint.
parameters = dict(
solver="stable_single",
folder="results_single_porous",
restart_folder=False,
enable_NS=True,
enable_PF=False,
enable_EC=True,
save_intv=5,
stats_intv=5,
checkpoint_intv=50,
tstep=0,
dt=0.05,
t_0=0.,
T=10.0,
N=32,
solutes=solutes,
Lx=4., # 8.,
Ly=4., # 8.,
rad=0.25,
num_obstacles=25, # 100,
grid_spacing=0.05,
#
density=[1., 1.],
viscosity=[0.05, 0.05],
permittivity=[2., 2.],
surface_charge=1.0,
composition=[0.45, 0.55], # must sum to one
#
EC_scheme="NL2",
use_iterative_solvers=True,
V_lagrange=True,
p_lagrange=False,
c_lagrange=True,
#
grav_const=0.2,
grav_dir=[1., 0.],
c_cutoff=0.1
)
return parameters
def constrained_domain(Lx, Ly, **namespace):
return PeriodicBoundary(Lx, Ly)
def mesh(Lx=8., Ly=8., rad=0.25, num_obstacles=100,
grid_spacing=0.05, **namespace):
try:
mesh = load_mesh(
"meshes/periodic_porous_Lx{}_Ly{}_rad{}_N{}_dx{}.h5".format(
Lx, Ly, rad, num_obstacles, grid_spacing))
except:
info_error("Can't find mesh. Go to utilities/ "
"and run \n\n\t"
"python3 generate_mesh.py mesh=periodic_porous_2d \n\n"
"with default parameters.")
return mesh
def initialize(Lx, Ly,
solutes, restart_folder,
field_to_subspace,
num_obstacles, rad,
surface_charge,
composition,
enable_NS, enable_EC,
**namespace):
""" Create the initial state. """
# Enforcing the compatibility condition.
total_charge = num_obstacles*2*np.pi*rad*surface_charge
total_area = Lx*Ly - num_obstacles*np.pi*rad**2
sum_zx = sum([composition[i]*solutes[i][1]
for i in range(len(composition))])
C = -(total_charge/total_area)/sum_zx
w_init_field = dict()
if not restart_folder:
if enable_EC:
for i, solute in enumerate(solutes):
c_init_expr = df.Expression(
"c0",
c0=composition[i]*C,
degree=2)
c_init = df.interpolate(
c_init_expr,
field_to_subspace[solute[0]].collapse())
w_init_field[solute[0]] = c_init
return w_init_field
def create_bcs(Lx, Ly, mesh, grid_spacing, rad, num_obstacles,
surface_charge, solutes, enable_NS, enable_EC,
p_lagrange, V_lagrange,
**namespace):
""" The boundaries and boundary conditions are defined here. """
data = np.loadtxt(
"meshes/periodic_porous_Lx{}_Ly{}_rad{}_N{}_dx{}.dat".format(
Lx, Ly, rad, num_obstacles, grid_spacing))
centroids = data[:, :2]
rad = data[:, 2]
# Find a single node to pin pressure to
x_loc = np.array(mesh.coordinates())
x_proc = np.zeros((size, 2))
ids_notboun = np.logical_and(
x_loc[:, 0] > x_loc[:, 0].min() + df.DOLFIN_EPS,
x_loc[:, 1] > x_loc[:, 1].min() + df.DOLFIN_EPS)
x_loc = x_loc[ids_notboun, :]
d2 = (x_loc[:, 0]+Lx/2)**2 + (x_loc[:, 1]+Ly/2)**2
x_bottomleft = x_loc[d2 == d2.min()][0]
x_proc[rank, :] = x_bottomleft
x_pin = np.zeros_like(x_proc)
comm.Allreduce(x_proc, x_pin, op=MPI.SUM)
x_pin = x_pin[x_pin[:, 0] == x_pin[:, 0].min(), :][0]
info("Pinning point: {}".format(x_pin))
pin_code = ("x[0] < {x}+{eps} && "
"x[0] > {x}-{eps} && "
"x[1] > {y}-{eps} && "
"x[1] < {y}+{eps}").format(
x=x_pin[0], y=x_pin[1], eps=1e-3)
boundaries = dict(
obstacles=[Obstacles(Lx, centroids, rad, grid_spacing)]
)
# Allocating the boundary dicts
bcs = dict()
bcs_pointwise = dict()
for boundary in boundaries:
bcs[boundary] = dict()
noslip = Fixed((0., 0.))
if enable_NS:
bcs["obstacles"]["u"] = noslip
if not p_lagrange:
bcs_pointwise["p"] = (0., pin_code)
if enable_EC:
bcs["obstacles"]["V"] = Charged(surface_charge)
if not V_lagrange:
bcs_pointwise["V"] = (0., pin_code)
return boundaries, bcs, bcs_pointwise
def tstep_hook(t, tstep, stats_intv, statsfile, field_to_subspace,
field_to_subproblem, subproblems, w_, **namespace):
info_blue("Timestep = {}".format(tstep))
def integrate_bulk_charge(x_, solutes, dx):
total_bulk_charge = []
for solute in solutes:
total_bulk_charge.append(df.assemble(solute[1]*x_[solute[0]]*dx))
return sum(total_bulk_charge)
def start_hook(w_, x_,
newfolder, field_to_subspace, field_to_subproblem,
boundaries,
boundary_to_mark,
dx, ds,
surface_charge, solutes,
**namespace):
total_surface_charge = df.assemble(
df.Constant(surface_charge)*ds(
boundary_to_mark["obstacles"]))
info("Total surface charge: {}".format(total_surface_charge))
total_bulk_charge = integrate_bulk_charge(x_, solutes, dx)
info("Total bulk charge: {}".format(total_bulk_charge))
rescale_factor = -total_surface_charge/total_bulk_charge
info("Rescale factor: {}".format(rescale_factor))
subproblem = field_to_subproblem[solutes[0][0]][0]
w_[subproblem].vector().set_local(
rescale_factor*w_[subproblem].vector().get_local())
total_bulk_charge_after = integrate_bulk_charge(x_, solutes, dx)
info("Final bulk charge: {}".format(total_bulk_charge_after))
statsfile = os.path.join(newfolder, "Statistics/stats.dat")
return dict(statsfile=statsfile)
| [
"common.bcs.Charged",
"numpy.zeros_like",
"common.bcs.Fixed",
"numpy.zeros",
"dolfin.Expression",
"dolfin.Constant",
"dolfin.assemble",
"os.path.join",
"dolfin.SubDomain.__init__",
"dolfin.near"
] | [((4628, 4647), 'numpy.zeros', 'np.zeros', (['(size, 2)'], {}), '((size, 2))\n', (4636, 4647), True, 'import numpy as np\n'), ((4976, 4997), 'numpy.zeros_like', 'np.zeros_like', (['x_proc'], {}), '(x_proc)\n', (4989, 4997), True, 'import numpy as np\n'), ((5615, 5632), 'common.bcs.Fixed', 'Fixed', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (5620, 5632), False, 'from common.bcs import Fixed, Charged\n'), ((7339, 7386), 'os.path.join', 'os.path.join', (['newfolder', '"""Statistics/stats.dat"""'], {}), "(newfolder, 'Statistics/stats.dat')\n", (7351, 7386), False, 'import os\n'), ((451, 478), 'dolfin.SubDomain.__init__', 'df.SubDomain.__init__', (['self'], {}), '(self)\n', (472, 478), True, 'import dolfin as df\n'), ((5816, 5839), 'common.bcs.Charged', 'Charged', (['surface_charge'], {}), '(surface_charge)\n', (5823, 5839), False, 'from common.bcs import Fixed, Charged\n'), ((919, 945), 'dolfin.near', 'df.near', (['x[0]', '(self.Lx / 2)'], {}), '(x[0], self.Lx / 2)\n', (926, 945), True, 'import dolfin as df\n'), ((948, 974), 'dolfin.near', 'df.near', (['x[1]', '(self.Ly / 2)'], {}), '(x[1], self.Ly / 2)\n', (955, 974), True, 'import dolfin as df\n'), ((1055, 1081), 'dolfin.near', 'df.near', (['x[0]', '(self.Lx / 2)'], {}), '(x[0], self.Lx / 2)\n', (1062, 1081), True, 'import dolfin as df\n'), ((6273, 6316), 'dolfin.assemble', 'df.assemble', (['(solute[1] * x_[solute[0]] * dx)'], {}), '(solute[1] * x_[solute[0]] * dx)\n', (6284, 6316), True, 'import dolfin as df\n'), ((6639, 6666), 'dolfin.Constant', 'df.Constant', (['surface_charge'], {}), '(surface_charge)\n', (6650, 6666), True, 'import dolfin as df\n'), ((3748, 3800), 'dolfin.Expression', 'df.Expression', (['"""c0"""'], {'c0': '(composition[i] * C)', 'degree': '(2)'}), "('c0', c0=composition[i] * C, degree=2)\n", (3761, 3800), True, 'import dolfin as df\n'), ((539, 566), 'dolfin.near', 'df.near', (['x[0]', '(-self.Lx / 2)'], {}), '(x[0], -self.Lx / 2)\n', (546, 566), True, 'import dolfin as df\n'), ((589, 616), 'dolfin.near', 'df.near', (['x[1]', '(-self.Ly / 2)'], {}), '(x[1], -self.Ly / 2)\n', (596, 616), True, 'import dolfin as df\n'), ((647, 674), 'dolfin.near', 'df.near', (['x[0]', '(-self.Lx / 2)'], {}), '(x[0], -self.Lx / 2)\n', (654, 674), True, 'import dolfin as df\n'), ((704, 730), 'dolfin.near', 'df.near', (['x[1]', '(self.Ly / 2)'], {}), '(x[1], self.Ly / 2)\n', (711, 730), True, 'import dolfin as df\n'), ((760, 786), 'dolfin.near', 'df.near', (['x[0]', '(self.Lx / 2)'], {}), '(x[0], self.Lx / 2)\n', (767, 786), True, 'import dolfin as df\n'), ((816, 843), 'dolfin.near', 'df.near', (['x[1]', '(-self.Ly / 2)'], {}), '(x[1], -self.Ly / 2)\n', (823, 843), True, 'import dolfin as df\n')] |
# Copyright 2021 <NAME>, Department of Chemistry, University of Wisconsin-Madison
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import matplotlib.pyplot as plt
import scipy.linalg
class QuasiMSM(object):
"""
Wrapper for the functions used for the development of a Quasi-Markov State Model.
Parameters
----------
input_len: int, default = 1
The number of flatten TPM in your input. It will be used to reshape the input TPM.
input_len = (largest lag_time - smallest lag_time) / delta_time
dimension: int, default = 1
The dimensions of the TPM. This number is equal to the number of macrostate in your model.
It will be used to reshape the input TPM.
delta_time: float, optional, default = 1.0
The time interval between each flatten TPM. All the calculation share the
same time unit as delta_time. When using the default vaule 1,
one should pay attention to the unit.
There are other initial properties passed to the Class once they are created.
TPM property can be retrieved through reshaping the original input data.
lag_time property can be calculated through the input delta_time.
dTPM_dt property is the derivative of TPM and dTPM_dt_0 is the first element of dTPM_dt.
tau_k property is the time of memory kernel used to build qMSM for long time dynamics prediction.
K_matrix property is set to store the memory kernel matrix K in qMSM.
MIK property is a matrix set to store the Mean Integral of the memory kernel.
Other properties are private properties which are used to determine the process inside the program.
"""
def __init__(self, input_len=1, dimension=1, delta_time=1.0):
self.delta_time = delta_time
self.input_len = input_len
self.dimension = dimension
self.TPM = np.zeros((input_len, dimension, dimension))
self.lag_time = np.zeros(input_len)
self.dTPM_dt = np.zeros((input_len-1, dimension, dimension))
self.dTPM_dt_0 = 0
self.tau_k = 0
self.K_matrix = []
self.MIK = []
self.__row_norm = False
self.__col_norm = False
self.__get_data = False
self.__pre_set_data = False
self.__get_dTPM_dt = False
self.__calculate_K_matrix = False
def GetData(self, input_data):
"""
Fetch the initial TPM data and determine the data format.
:param input_data: input_data is the TPMs(Transition Probability Matrix) at different lag_time;
The input_data can be either row normalized or column normalized, and in any shape.
The input_data will be reshaped accordingly for our calculation.
"""
self.raw_data= input_data
if not isinstance(self.raw_data, np.ndarray):
raise TypeError("Loading input data is not np.ndarray type")
elif not self.raw_data.ndim == 2:
raise IOError("Dimension of input data is not 2")
else:
self.__get_data = True
def Pre_SetData(self):
"""
This method preprocesses the input raw data, measures its characteristics and ensures the stability
of the subsequent calculations. The input data needs to be consistent with the length and dimensionality
of the input, and if the test passes, the initial data is reconstructed to produce a TPM tensor.
It will also determine whether the TPM is row normalized or column normalized, and the different normalization
schemes will affect the subsequent matrix multiplication operations.
"""
if self.input_len != len(self.raw_data):
raise IOError("Input length is inconsistent with real data length")
elif not self.dimension == np.sqrt(len(self.raw_data[0])):
raise IOError("Input dimension is inconsistent with real data dimension")
else:
self.TPM = np.reshape(self.raw_data, (self.input_len, self.dimension, self.dimension))
for i in range(self.input_len):
self.lag_time[i] = self.delta_time * (i+1)
if abs((np.sum(self.TPM[3, 0])) - 1) < 1e-3:
self.__row_norm = True
print("The Transtion Probability Matrix is row normalized and row normalization algorithm is used !")
elif abs(np.sum(self.TPM[3, :, 0]) - 1) < 1e-3:
self.__col_norm = True
print("The Transtion Probability Matrix is column normalized and column normalization algorithm is used !")
for i in range(len(self.TPM)):
self.TPM[i] = self.TPM[i].T
else:
raise IOError("Transition Probablity Matrix is not normalized, cannot do qMSM")
self.__pre_set_data = True
def Get_dTPM_dt(self):
"""
This method is designed to calculate the derivative of TPM;
Notably, the derivative at time zero point should be computed individually.
(When computing the zero point, ignore the influence of memory kernel term)
Returns
-------
dTPM_dt_0 is the derivative at zero point, dTPM_dt is a derivative for different lag_time
"""
for k in range(0, int(self.input_len) - 1):
self.dTPM_dt[k] = (self.TPM[k + 1] - self.TPM[k]) / self.delta_time
self.dTPM_dt_0 = np.dot(np.linalg.inv(self.TPM[0]), self.dTPM_dt[0])
self.__get_dTPM_dt = True
return self.dTPM_dt_0, self.dTPM_dt
def Calculate_K_matrix(self, cal_step=10, outasfile=False,outdir="./"):
"""
This method is designed to calculate the Memory Kernel K matrices.
This step uses the greedy algorithm to iteratively compute memory kernel items, and
requires great attention to the handling of the initial items.
Parameters
----------
cal_step: Equal to tau_k, corresponding to the point where memory kernel decays to zero.
(This is a very import parameter for qMSM. You may want to try different values to optimize your result)
outasfile: Decide whether to output the results of memory kernel calculations to a file.
outdir: The output directory for your result
Returns
-------
K_matrix: Memory kernel calculation result tensor with cal_step entries.
"""
self.K_matrix = np.zeros((int(cal_step), self.dimension, self.dimension))
self.tau_k = cal_step
if not self.__get_data:
raise ValueError('Please use get_data method to get appropriate TPM data')
if not self.__pre_set_data:
raise NotImplementedError('Please use pre_set_data method to reset TPM')
if not self.__get_dTPM_dt:
raise NotImplementedError('Please use get_dTPM_dt method to calculate derivative')
n = 0
while n < self.tau_k:
memory_term = np.zeros((self.dimension, self.dimension))
if n > 0:
for m in range(0, n):
memory_term += np.dot(self.TPM[n - m], self.K_matrix[m])
self.K_matrix[n] = np.dot(np.linalg.inv(self.TPM[0]),
(((self.dTPM_dt[n] - np.dot(self.TPM[n], self.dTPM_dt_0)) / self.delta_time) - memory_term))
else:
self.K_matrix[n] = np.dot(np.linalg.inv(self.TPM[0]), ((self.dTPM_dt[n] - np.dot(self.TPM[n], self.dTPM_dt_0)) / self.delta_time))
n += 1
if outasfile:
kernel = np.zeros((int(cal_step), self.dimension**2))
for i in range(int(cal_step)):
kernel[i] = np.reshape(self.K_matrix[i], (1, -1))
with open("{}calculate_K_matrix_output.txt".format(outdir), 'ab') as file:
if not os.path.getsize("{}calculate_K_matrix_output.txt".format(outdir)):
np.savetxt(file, kernel, delimiter=' ')
else:
raise IOError('Output File already exists, please create another!!')
del kernel
self.__calculate_K_matrix = True
return self.K_matrix
def KernelPlot(self, K_matrix):
"""
This method is designed to plot a figure for memory kernel at different lag time.
The trend of the memory kernel can determine the adequate values of tau_k and cal_step.
Parameters
----------
K_matrix
The returned tensor from Calculate_K_matrix method.
"""
if not isinstance(K_matrix, np.ndarray) or not K_matrix.ndim == 3:
raise ValueError('K_matrix should be a return value of Calculate_K_matrix method')
else:
length = len(K_matrix)
lag_time = np.zeros(length)
for i in range(length):
lag_time[i] = (i+1) * self.delta_time
plt.figure(1)
for i in range(self.dimension):
for j in range(self.dimension):
plt.subplot(self.dimension, self.dimension, i*self.dimension+j+1)
plt.plot(lag_time, K_matrix[:, i, j], color='black', label='K'+str(i+1)+str(j+1))
plt.legend(loc='best', frameon=True)
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.show()
del length
del K_matrix
def MeanIntegralKernel(self, MIK_time=0, figure=False,outdir="./"):
"""
This method is designed to calculate the integral of the memory kernel over a period of time (MIK_time).
The trend of the integral value over time can also be plotted using figure parameters.
It can be used to determine parameters such as tau_k and cal_step.
Parameters
----------
MIK_time: Time used to compute the integral of the memory kernel.
Shouldn't be Out of the total range of the K_matrix.
figure: Decide weather to plot the figure or not.
outdir: The output directories for your result
"""
if not self.__calculate_K_matrix:
raise NotImplementedError('Please use calculate_K_matrix to calculate kernel matrix in advance')
if self.tau_k < MIK_time:
raise ValueError('MIK_time is longer than kernel matrix length, not enough data for calculation')
integral_kernel = np.zeros((int(MIK_time), self.dimension, self.dimension))
integral_kernel[0] = self.K_matrix[0]
for i in range(1, MIK_time):
integral_kernel[i] = integral_kernel[i-1] + self.K_matrix[i]
integral_kernel = integral_kernel * self.delta_time
self.__dict__['MIK'] = np.zeros(int(MIK_time))
for i in range(MIK_time):
self.MIK[i] = np.sqrt(np.sum(np.power(integral_kernel[i], 2))) / self.dimension
del integral_kernel
if figure:
plt.figure(figsize=(8, 5))
plt.plot(self.lag_time[:int(MIK_time)], self.MIK, color='black')
plt.title("Mean Integral of the Memory Kernel(MIK)")
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.ylim(bottom=0)
plt.tick_params(labelsize='large')
plt.xlabel("Lag time(ns)",size=16)
plt.ylabel("MIK(1/Lag time)",size=16)
plt.savefig("{}MIK.png".format(outdir))
plt.show()
def QuasiMSMPrediction(self, kernel_matrix, tau_k=10, end_point=100, outasfile=False, out_RMSE=False, outdir="./"):
"""
This method is designed to use the qMSM algorithm in combination with the memory kernel
for accurate prediction of long time scale dynamics.
Parameters
----------
kernel_matrix: K_matrix calculated from the previous method; containing all information about memory;
tau_k: The Point where memory term decays to zeros and prediction starts;
end_point: The Point where prediction stops;
outasfile: Decide whether to output the results of prediction to a file;
out_RMSE: Decide whether to output the results of predicted TPM for later RMSE calculations;
outdir: The output directory for your result
Returns
-------
TPM_propagate: The result of prediction using qMSM and memory kernel;
TPM_gen_RMSE: The result of predicted TPMs and lag_time used for later RMSE calculations.
"""
if not isinstance(kernel_matrix, np.ndarray):
raise TypeError("Loading input matrix is not numpy.ndarray type")
elif not kernel_matrix.ndim == 3:
raise IOError("Dimension of input data is not correct, should be 3-D tensor")
elif not len(kernel_matrix) > tau_k+1:
raise IOError('Input kernel_matrix is inconsistent with tau_k set')
elif tau_k >= end_point:
raise ValueError('tau_k is longer than end_point, cannot propogate')
elif not self.__get_data:
raise ValueError('Please use get_data method to get appropriate TPM data')
elif not self.__pre_set_data:
raise NotImplementedError('Please use pre_set_data method to set TPM')
else:
TPM_propagate = np.zeros((int(end_point+1), self.dimension, self.dimension))
TPM_propagate[:tau_k,:, :] = self.TPM[:tau_k, :, :]
TPM_grad = np.zeros((int(end_point), self.dimension, self.dimension))
kernel = kernel_matrix[:tau_k-1, :, :]
for i in range(tau_k-1):
TPM_grad[i] = (TPM_propagate[i + 1] - TPM_propagate[i]) / self.delta_time
TPM_grad0 = np.dot(np.linalg.inv(TPM_propagate[0]), TPM_grad[0])
n = tau_k-2
while n < end_point:
memory_kernel = 0
for m in range(0, min(tau_k-1, n+1), 1):
memory_kernel += np.dot(TPM_propagate[n - m], kernel[m])
TPM_grad[n] = np.dot(TPM_propagate[n], TPM_grad0) + self.delta_time * memory_kernel
TPM_propagate[n + 1] = (self.delta_time * TPM_grad[n]) + TPM_propagate[n]
n += 1
TPM_gen = np.zeros((int(end_point+1), self.dimension ** 2))
TPM_gen_RMSE = np.zeros((int(end_point+1), self.dimension**2 + 1))
for i in range(int(end_point+1)):
TPM_gen[i] = np.reshape(TPM_propagate[i], (1, -1))
TPM_gen_RMSE[i] = np.insert(TPM_gen[i], 0, values=(self.delta_time * i), axis=0)
if outasfile:
with open("{}qMSM_Propagate_TPM.txt".format(outdir), 'ab') as file1:
if not os.path.getsize("{}qMSM_Propagate_TPM.txt".format(outdir)):
np.savetxt(file1, TPM_gen, delimiter=' ')
else:
raise IOError('Output File already exists, please create another!!')
if out_RMSE:
with open('qMSM_Propagate_TPM_RMSE.txt', 'ab') as file2:
if not os.path.getsize('qMSM_Propagate_TPM_RMSE.txt'):
np.savetxt(file2, TPM_gen_RMSE, delimiter=' ')
else:
raise IOError('Output File already exists, please create another!!')
del kernel
del TPM_grad
del TPM_grad0
del TPM_gen
return TPM_propagate, TPM_gen_RMSE
def MSMPrediction(self, tau=10, end_point=100, add_iden_mat=False, outasfile=False, out_RMSE=False, outdir="./"):
"""
This method is designed to propagate long time scale dynamics TPMs using Markov State Model.
T[n tau] = (T[tau])^{n}
Parameters
----------
tau: Lag-time for Markov Chain propagations;
end_point: Point where prediction stops;
add_iden_mat: Decide weather add an identity matrix at the beginning of the TPM;
outasfile: Decide whether to output the results of propagation to a file;
out_RMSE: Decide whether to output the results of propagated TPM for later RMSE calculations;
outdir: The output directory for your result
Returns
-------
TPM_propagate: The result of prediction using MSM;
TPM_gen_RMSE: The result of propagated TPMs and lag_time used for later RMSE calculations.
"""
if tau >= end_point:
raise ValueError('tau is longer than end_point, cannot propagate')
elif not self.__get_data:
raise ValueError('Please use get_data method to get appropriate TPM data')
elif not self.__pre_set_data:
raise NotImplementedError('Please use pre_set_data method to set TPM')
else:
TPM_propagate = np.zeros(((int(add_iden_mat) + (end_point // tau)), self.dimension, self.dimension))
time = np.zeros(int(add_iden_mat) + (end_point // tau))
if add_iden_mat:
TPM_propagate[0] = np.identity(self.dimension)
TPM_propagate[1] = self.TPM[tau]
time[0] = 0
time[1] = tau * self.delta_time
else:
TPM_propagate[0] = self.TPM[tau]
time[0] = tau * self.delta_time
for i in range((int(add_iden_mat) + 1), len(TPM_propagate), 1):
TPM_propagate[i] = np.dot(TPM_propagate[i - 1], self.TPM[tau])
time[i] = time[i - 1] + (tau * self.delta_time)
TPM_gen = np.zeros((len(TPM_propagate), self.dimension**2))
TPM_gen_RMSE = np.zeros((len(TPM_propagate), self.dimension**2 + 1))
for i in range(len(TPM_propagate)):
TPM_gen[i] = np.reshape(TPM_propagate[i], (1, -1))
TPM_gen_RMSE[i] = np.insert(TPM_gen[i], 0, values=time[i], axis=0)
if outasfile:
with open("{}MSM_Propagate_TPM.txt".format(outdir), 'ab') as file1:
if not os.path.getsize("{}MSM_Propagate_TPM.txt".format(outdir)):
np.savetxt(file1, TPM_gen, delimiter=' ')
else:
raise IOError('Output File already exists, please create another!!')
if out_RMSE:
with open("{}MSM_Propagate_TPM_RMSE.txt".format(outdir), 'ab') as file2:
if not os.path.getsize("{}MSM_Propagate_TPM_RMSE.txt".format(outdir)):
np.savetxt(file2, TPM_gen_RMSE, delimiter=' ')
else:
raise IOError('Output File already exists, please create another!!')
del time
del TPM_gen
return TPM_propagate, TPM_gen_RMSE
def CK_figure(self, qMSM_TPM, MSM_TPM, grid=np.zeros(2), slice_dot=10, add_iden_mat=False, diag=True, outdir="./"):
"""
This method is designed to plot a figure for Chapman–Kolmogorov (CK) tests of results generated by
MD(reference), qMSM and MSM. The CK test can be used to visualize the differences and similarities between
qMSM, MSM prediction results for long time kinetics and real MD results.
Parameters
----------
qMSM_TPM: TPM containing lag_time information predicted by qMSM;
MSM_TPM: TPM containing lag_time information predicted by MSM;
grid: Distribution of image positions for different TPM elements;
slice_dot: Lag_time to draw the scatter plots;
add_iden_mat: Decide weather add an identity matrix or not at the beginning of the TPM sequence;
diag: Decide whether to draw the diagonal elements of the TPM matrix;
outdir: The output directory for your result
"""
if not self.__get_data or not self.__pre_set_data:
raise NotImplementedError('Please use get_data method and pre_set_data method to set TPM')
if not len(MSM_TPM) == 0:
if not isinstance(MSM_TPM, np.ndarray) or not MSM_TPM.ndim == 2:
raise ValueError('MSM_TPM should be a return value of MSMPrediction method')
if not len(MSM_TPM[0]) == self.dimension**2+1:
raise ValueError('Time information should be included in the input TPM')
if not isinstance(qMSM_TPM, np.ndarray) or not qMSM_TPM.ndim == 2:
raise ValueError('qMSM_TPM should be a return value of QuasiMSMPrediction method')
if not len(qMSM_TPM[0]) == self.dimension**2+1:
raise ValueError('Time information should be included in the input TPM')
if not len(grid) == 2 or not (grid[0]*grid[1] == self.dimension or grid[0]*grid[1] == self.dimension**2):
raise ValueError('Please set appropriate 2-D grid structure')
if not len(MSM_TPM) == 0:
MSM_time = MSM_TPM[:, 0]
MSM_TPM_plt = np.reshape(MSM_TPM[:, 1:], (len(MSM_TPM), self.dimension, self.dimension))
qMSM_time = qMSM_TPM[:, 0]
qMSM_TPM_plt = np.reshape(qMSM_TPM[:, 1:], (len(qMSM_TPM), self.dimension, self.dimension))
if add_iden_mat:
qMSM_time = np.insert(qMSM_time, 0, values=0, axis=0)
qMSM_TPM_plt = np.insert(qMSM_TPM_plt, 0, values=np.identity(self.dimension), axis=0)
if len(qMSM_TPM) > (self.input_len) :
print("Length of referred TPM is shorter, use the referred TPM length as cut-off; ")
num_dot = len(self.TPM) // slice_dot
if len(qMSM_TPM) <= (self.input_len):
print("Length of qMSM TPM is shorter, use the qMSM TPM length as cut-off;")
num_dot = len(qMSM_TPM) // slice_dot
del qMSM_TPM
qMSM_time_dot = np.zeros(num_dot)
qMSM_TPM_dot = np.zeros((num_dot, self.dimension, self.dimension))
MD_time_dot = np.zeros(num_dot)
MD_TPM_dot = np.zeros((num_dot, self.dimension, self.dimension))
for i in range(0, num_dot):
qMSM_time_dot[i] = qMSM_time[i*slice_dot]
qMSM_TPM_dot[i] = qMSM_TPM_plt[i*slice_dot]
MD_time_dot[i] = self.lag_time[i*slice_dot]
MD_TPM_dot[i] = self.TPM[i*slice_dot]
if diag:
plt.figure(figsize=(20, 5))
for i in range(self.dimension):
plt.subplot(1, grid[0], i+1)
if not len(MSM_TPM) == 0:
plt.scatter(MSM_time, MSM_TPM_plt[:, i, i], marker='o', color='green', s=10)
plt.plot(MSM_time, MSM_TPM_plt[:, i, i],color='green', linewidth=2.5, linestyle='--', label='MSM')
plt.scatter(qMSM_time_dot, qMSM_TPM_dot[:, i, i], marker='o', color='red', s=10)
plt.plot(qMSM_time, qMSM_TPM_plt[:, i, i], color='red', linewidth=2.5, label='qMSM')
plt.scatter(MD_time_dot, MD_TPM_dot[:, i, i], marker='o', color='white', edgecolors='gray', s= 20, label='MD')
plt.title(r"$P_{" + str(i)*2 + "}$", fontsize=15)
plt.legend(loc='best', frameon=True)
plt.xlim(left=0)
plt.ylim(top=1)
plt.tick_params(labelsize='large')
plt.xlabel("Lag time(ns)",size=16)
plt.ylabel("Residence Probability",size=16)
plt.tight_layout()
plt.savefig("{}CK_plot.png".format(outdir))
plt.show()
else:
plt.figure(figsize=(20, 5))
for i in range(self.dimension):
for j in range(self.dimension):
plt.subplot(grid[0], grid[1], i*self.dimension+j+1)
if not len(MSM_TPM) == 0:
plt.scatter(MSM_time, MSM_TPM_plt[:, i, j], marker='o', color='green', s=10)
plt.plot(MSM_time, MSM_TPM_plt[:, i, j], color='green', linewidth=2.5, linestyle='--', label='MSM')
plt.scatter(qMSM_time_dot, qMSM_TPM_dot[:, i, j], marker='o', color='red', s=10)
plt.plot(qMSM_time, qMSM_TPM_plt[:, i, j], color='red', linewidth=2.5, label='qMSM')
plt.scatter(MD_time_dot, MD_TPM_dot[:, i, j], marker='o', color='white', edgecolors='gray', s=20, label='MD')
plt.title(r"$P_{" + str(i)*2 + "}$", fontsize=15)
plt.legend(loc='best', frameon=True)
plt.xlim(0, num_dot)
plt.ylim(top=1)
plt.tick_params(labelsize='large')
plt.xlabel("Lag time(ns)",size=16)
plt.ylabel("Residence Probability",size=16)
plt.tight_layout()
plt.savefig("{}CK_plot.png".format(outdir))
plt.show()
if not len(MSM_TPM_plt) == 0:
del MSM_time
del MSM_TPM_plt
del MSM_TPM
del qMSM_TPM_plt
del qMSM_time
del qMSM_TPM_dot
del qMSM_time_dot
del MD_time_dot
del MD_TPM_dot
def RMSE(self, kernel, end_point=100, figure=False, outasfile=False,outdir="./"):
"""
This method is used to compute time-averaged root mean squared error(RMSE) of qMSM and MSM.
RMSE can evaluate the performance of qMSM and MSM.
We can also optimize tau_k(the lag_time of qMSM) through RMSE calculation.
Parameters
----------
kernel: Memory kernel used to do qMSM, generated from the Calculate_K_matrix method;
end_point: The end point for calculation for RMSE;
figure: Decide whether to plot a figure for RMSE or not;
outasfile: Decide whether to output the data of RMSE or not;
outdir: The output directory for your result
Returns
-------
the detailed data for RMSE of both qMSM and MSM of different tau_k;
"""
if not self.__get_data or not self.__pre_set_data:
raise NotImplementedError('Please use get_data method and pre_set_data method to set TPM')
if not isinstance(kernel, np.ndarray):
raise TypeError("Loading input matrix is not numpy.ndarray type")
if not kernel.ndim == 3:
raise IOError("Dimension of input data is not correct, should be 3-D tensor")
if len(kernel)-1 < end_point:
raise ValueError("The length of memory kernel matrices is shorter than end point")
qMSM_RMSE = []
MSM_RMSE = []
RMSE_time = []
n = 2
eign_val, eign_vec = scipy.linalg.eig(self.TPM[10], right=False, left=True)
eign_vec = eign_vec.real
tolerance = 1e-8
mask = abs(eign_val - 1) < tolerance
# temp = eign_vec[:, mask]
temp = eign_vec[:, mask].T
p_k = temp / np.sum(temp)
p_k = np.reshape(p_k, (4))
p_k = np.diag(p_k)
# for i in range(len(p_k)):
# eigenval, p_k[i] = scipy.linalg.eig(self.TPM[i], right=False, left=True)
while n < end_point:
qMSM_TPM, qMSM_TPM_RMSE = self.QuasiMSMPrediction(kernel_matrix=kernel, tau_k=n, end_point=end_point)
MSM_TPM, MSM_TPM_RMSE = self. MSMPrediction(tau=n, end_point=end_point)
MSM_TPM_time = MSM_TPM_RMSE[:, 0]
qMSM_delt_mat = np.zeros((len(qMSM_TPM), self.dimension, self.dimension))
for i in range(end_point):
# qMSM_delt_mat[i] = (qMSM_TPM[i] - self.TPM[i])
# qMSM_delt_mat[i] = np.dot((qMSM_TPM[i] - self.TPM[i]), p_k)
qMSM_delt_mat[i] = np.dot(p_k, (qMSM_TPM[i] - self.TPM[i]))
qMSM_delt_mat[i] = np.power(qMSM_delt_mat[i], 2)
MSM_delt_mat = np.zeros((len(MSM_TPM_time), self.dimension, self.dimension))
for i in range(len(MSM_TPM_time)):
# MSM_delt_mat[i] = (MSM_TPM[i] - self.TPM[int(MSM_TPM_time[i]/self.delta_time - 1)])
# MSM_delt_mat[i] = np.dot((MSM_TPM[i] - self.TPM[int(MSM_TPM_time[i] / self.delta_time - 1)]), p_k)
MSM_delt_mat[i] = np.dot(p_k, (MSM_TPM[i] - self.TPM[int(MSM_TPM_time[i] / self.delta_time - 1)]))
MSM_delt_mat[i] = np.power(MSM_delt_mat[i], 2)
qMSM_RMSE = np.append(qMSM_RMSE, 100*(np.sqrt((np.sum(qMSM_delt_mat) / self.dimension**2 / (len(qMSM_delt_mat))))))
MSM_RMSE = np.append(MSM_RMSE, 100*(np.sqrt((np.sum(MSM_delt_mat) / self.dimension**2 / (len(MSM_delt_mat))))))
RMSE_time.append(n * self.delta_time)
n += 1
del qMSM_TPM
del qMSM_TPM_RMSE
del MSM_TPM
del MSM_TPM_RMSE
if figure:
plt.figure(figsize=(5, 5))
plt.plot(RMSE_time, qMSM_RMSE, color='red', label='qMSM', linewidth=2.5)
plt.plot(RMSE_time, MSM_RMSE, color='black', label='MSM', linewidth=2.5)
plt.legend(loc='best', frameon=True)
plt.ylabel('RMSE(%)',size=16)
plt.xlabel('Lag Time(ns)',size=16)
plt.xlim(left=0,right=RMSE_time[int((len(RMSE_time) - 1)/2)])
plt.ylim(bottom=0)
plt.tick_params(labelsize='large')
plt.tight_layout()
plt.savefig("{}RMSE.png".format(outdir))
plt.show()
if outasfile:
qMSM_RMSE_out = np.zeros((len(qMSM_RMSE), 2))
qMSM_RMSE_out[:, 0] = RMSE_time
qMSM_RMSE_out[:, 1] = qMSM_RMSE
MSM_RMSE_out = np.zeros((len(MSM_RMSE), 2))
MSM_RMSE_out[:, 0] = RMSE_time
MSM_RMSE_out[:, 1] = MSM_RMSE
with open("{}qMSM_RMSE.txt".format(outdir), 'ab') as file1:
if not os.path.getsize("{}qMSM_RMSE.txt".format(outdir)):
np.savetxt(file1, qMSM_RMSE_out, delimiter=' ')
else:
raise IOError('Output File already exists, please create another!!')
with open("{}MSM_RMSE.txt".format(outdir), 'ab') as file2:
if not os.path.getsize("{}MSM_RMSE.txt".format(outdir)):
np.savetxt(file2, MSM_RMSE_out, delimiter=' ')
else:
raise IOError('Output File already exists, please create another!!')
del qMSM_RMSE_out
del MSM_RMSE_out
return qMSM_RMSE, MSM_RMSE
#####################################################
#For calculation done in
#Cao S. et al. On the advantages of exploiting memory in Markov state models for biomolecular dynamics.
#J. Chem. Phys. 153. 014105. (2020), https://doi.org/10.1063/5.0010787
## qMSM for Alaine Dipeptide
# input_data = np.loadtxt("ala2-pccap-4states-0.1ps-50ps.txt", dtype=float)
# qmsm = QuasiMSM(input_len=500, delta_time=0.1, dimension=4)
# qmsm.GetData(input_data)
# qmsm.Pre_SetData()
# qmsm.Get_dTPM_dt()
# km = qmsm.Calculate_K_matrix(cal_step=300)
# qmsm.MeanIntegralKernel(MIK_time=50, figure=True)
# qmsm_tpm, qmsm_tpm_time = qmsm.QuasiMSMPrediction(kernel_matrix=km, tau_k=15, end_point=200)
# msm_tpm, msm_tpm_time = qmsm.MSMPrediction(tau=15, end_point=200, add_iden_mat=False)
# qmsm.CK_figure(qMSM_TPM=qmsm_tpm_time, MSM_TPM=msm_tpm_time, add_iden_mat=True, diag=False, grid=[4,4], slice_dot=10)
# qmsm.RMSE(kernel=km, end_point=200, figure=True, outasfile=False)
## qMSM for FIP35 WW_Domain
# input_data = np.loadtxt("FIP35_TPM_4states_1ns_2us.txt", dtype=float)
# qmsm = QuasiMSM(input_len=2000, delta_time=1, dimension=4)
# qmsm.GetData(input_data)
# qmsm.Pre_SetData()
# qmsm.Get_dTPM_dt()
# km = qmsm.Calculate_K_matrix(cal_step=400)
# qmsm.MeanIntegralKernel(MIK_time=250, figure=True)
# qmsm_tpm, qmsm_tpm_time = qmsm.QuasiMSMPrediction(kernel_matrix=km, tau_k=25, end_point=400)
# msm_tpm, msm_tpm_time = qmsm.MSMPrediction(tau=25, end_point=400, add_iden_mat=True)
# qmsm.CK_figure(qMSM_TPM=qmsm_tpm_time, MSM_TPM=msm_tpm_time, add_iden_mat=True, diag=True, grid=[4,4], slice_dot=40)
# qmsm.RMSE(kernel=km, end_point=399, figure=True, outasfile=False)
#####################################################
#For calculations done in
#<NAME> al. Critical role of backbone coordination in the mRNA recognition by RNA induced silencing complex.
#Commun. Biol. 4. 1345. (2021). https://doi.org/10.1038/s42003-021-02822-7
## qMSM for hAgo2 System
# input_data = np.loadtxt("Lizhe_TPM.sm.macro-transpose.5-800ns.txt", dtype=float)
# qmsm = QuasiMSM(input_len=160, delta_time=1, dimension=4)
# qmsm.GetData(input_data)
# qmsm.Pre_SetData()
# qmsm.Get_dTPM_dt()
# km = qmsm.Calculate_K_matrix(cal_step=100)
# qmsm.MeanIntegralKernel(MIK_time=80, figure=True)
# qmsm.KernelPlot(km)
# qmsm_tpm, qmsm_tpm_time = qmsm.QuasiMSMPrediction(kernel_matrix=km, tau_k=50, end_point=200)
# msm_tpm, msm_tpm_time = qmsm.MSMPrediction(tau=5, end_point=150, add_iden_mat=True)
# qmsm.CK_figure(qMSM_TPM=qmsm_tpm_time, MSM_TPM=msm_tpm_time, add_iden_mat=True, diag=True, grid=[4,4], slice_dot=10)
# qmsm.RMSE(kernel=km, end_point=80, figure=True, outasfile=False) | [
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"numpy.diag",
"matplotlib.pyplot.tight_layout",
"numpy.power",
"numpy.savetxt",
"numpy.identity",
"numpy.insert",
"numpy.reshape",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"os.pa... | [((2366, 2409), 'numpy.zeros', 'np.zeros', (['(input_len, dimension, dimension)'], {}), '((input_len, dimension, dimension))\n', (2374, 2409), True, 'import numpy as np\n'), ((2434, 2453), 'numpy.zeros', 'np.zeros', (['input_len'], {}), '(input_len)\n', (2442, 2453), True, 'import numpy as np\n'), ((2477, 2524), 'numpy.zeros', 'np.zeros', (['(input_len - 1, dimension, dimension)'], {}), '((input_len - 1, dimension, dimension))\n', (2485, 2524), True, 'import numpy as np\n'), ((19133, 19144), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (19141, 19144), True, 'import numpy as np\n'), ((22011, 22028), 'numpy.zeros', 'np.zeros', (['num_dot'], {}), '(num_dot)\n', (22019, 22028), True, 'import numpy as np\n'), ((22052, 22103), 'numpy.zeros', 'np.zeros', (['(num_dot, self.dimension, self.dimension)'], {}), '((num_dot, self.dimension, self.dimension))\n', (22060, 22103), True, 'import numpy as np\n'), ((22126, 22143), 'numpy.zeros', 'np.zeros', (['num_dot'], {}), '(num_dot)\n', (22134, 22143), True, 'import numpy as np\n'), ((22165, 22216), 'numpy.zeros', 'np.zeros', (['(num_dot, self.dimension, self.dimension)'], {}), '((num_dot, self.dimension, self.dimension))\n', (22173, 22216), True, 'import numpy as np\n'), ((27041, 27059), 'numpy.reshape', 'np.reshape', (['p_k', '(4)'], {}), '(p_k, 4)\n', (27051, 27059), True, 'import numpy as np\n'), ((27076, 27088), 'numpy.diag', 'np.diag', (['p_k'], {}), '(p_k)\n', (27083, 27088), True, 'import numpy as np\n'), ((7380, 7422), 'numpy.zeros', 'np.zeros', (['(self.dimension, self.dimension)'], {}), '((self.dimension, self.dimension))\n', (7388, 7422), True, 'import numpy as np\n'), ((9188, 9204), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (9196, 9204), True, 'import numpy as np\n'), ((9307, 9320), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (9317, 9320), True, 'import matplotlib.pyplot as plt\n'), ((9772, 9782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9780, 9782), True, 'import matplotlib.pyplot as plt\n'), ((11335, 11361), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (11345, 11361), True, 'import matplotlib.pyplot as plt\n'), ((11451, 11503), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Integral of the Memory Kernel(MIK)"""'], {}), "('Mean Integral of the Memory Kernel(MIK)')\n", (11460, 11503), True, 'import matplotlib.pyplot as plt\n'), ((11618, 11636), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(0)'}), '(bottom=0)\n', (11626, 11636), True, 'import matplotlib.pyplot as plt\n'), ((11649, 11683), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '"""large"""'}), "(labelsize='large')\n", (11664, 11683), True, 'import matplotlib.pyplot as plt\n'), ((11696, 11731), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lag time(ns)"""'], {'size': '(16)'}), "('Lag time(ns)', size=16)\n", (11706, 11731), True, 'import matplotlib.pyplot as plt\n'), ((11743, 11781), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MIK(1/Lag time)"""'], {'size': '(16)'}), "('MIK(1/Lag time)', size=16)\n", (11753, 11781), True, 'import matplotlib.pyplot as plt\n'), ((11845, 11855), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11853, 11855), True, 'import matplotlib.pyplot as plt\n'), ((21450, 21491), 'numpy.insert', 'np.insert', (['qMSM_time', '(0)'], {'values': '(0)', 'axis': '(0)'}), '(qMSM_time, 0, values=0, axis=0)\n', (21459, 21491), True, 'import numpy as np\n'), ((22498, 22525), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (22508, 22525), True, 'import matplotlib.pyplot as plt\n'), ((23671, 23681), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23679, 23681), True, 'import matplotlib.pyplot as plt\n'), ((23708, 23735), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (23718, 23735), True, 'import matplotlib.pyplot as plt\n'), ((24999, 25009), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25007, 25009), True, 'import matplotlib.pyplot as plt\n'), ((27014, 27026), 'numpy.sum', 'np.sum', (['temp'], {}), '(temp)\n', (27020, 27026), True, 'import numpy as np\n'), ((28872, 28898), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (28882, 28898), True, 'import matplotlib.pyplot as plt\n'), ((28911, 28983), 'matplotlib.pyplot.plot', 'plt.plot', (['RMSE_time', 'qMSM_RMSE'], {'color': '"""red"""', 'label': '"""qMSM"""', 'linewidth': '(2.5)'}), "(RMSE_time, qMSM_RMSE, color='red', label='qMSM', linewidth=2.5)\n", (28919, 28983), True, 'import matplotlib.pyplot as plt\n'), ((28996, 29068), 'matplotlib.pyplot.plot', 'plt.plot', (['RMSE_time', 'MSM_RMSE'], {'color': '"""black"""', 'label': '"""MSM"""', 'linewidth': '(2.5)'}), "(RMSE_time, MSM_RMSE, color='black', label='MSM', linewidth=2.5)\n", (29004, 29068), True, 'import matplotlib.pyplot as plt\n'), ((29081, 29117), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'frameon': '(True)'}), "(loc='best', frameon=True)\n", (29091, 29117), True, 'import matplotlib.pyplot as plt\n'), ((29130, 29160), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE(%)"""'], {'size': '(16)'}), "('RMSE(%)', size=16)\n", (29140, 29160), True, 'import matplotlib.pyplot as plt\n'), ((29172, 29207), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lag Time(ns)"""'], {'size': '(16)'}), "('Lag Time(ns)', size=16)\n", (29182, 29207), True, 'import matplotlib.pyplot as plt\n'), ((29293, 29311), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(0)'}), '(bottom=0)\n', (29301, 29311), True, 'import matplotlib.pyplot as plt\n'), ((29324, 29358), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '"""large"""'}), "(labelsize='large')\n", (29339, 29358), True, 'import matplotlib.pyplot as plt\n'), ((29371, 29389), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (29387, 29389), True, 'import matplotlib.pyplot as plt\n'), ((29455, 29465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29463, 29465), True, 'import matplotlib.pyplot as plt\n'), ((4434, 4509), 'numpy.reshape', 'np.reshape', (['self.raw_data', '(self.input_len, self.dimension, self.dimension)'], {}), '(self.raw_data, (self.input_len, self.dimension, self.dimension))\n', (4444, 4509), True, 'import numpy as np\n'), ((5843, 5869), 'numpy.linalg.inv', 'np.linalg.inv', (['self.TPM[0]'], {}), '(self.TPM[0])\n', (5856, 5869), True, 'import numpy as np\n'), ((8104, 8141), 'numpy.reshape', 'np.reshape', (['self.K_matrix[i]', '(1, -1)'], {}), '(self.K_matrix[i], (1, -1))\n', (8114, 8141), True, 'import numpy as np\n'), ((22586, 22616), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'grid[0]', '(i + 1)'], {}), '(1, grid[0], i + 1)\n', (22597, 22616), True, 'import matplotlib.pyplot as plt\n'), ((22889, 22974), 'matplotlib.pyplot.scatter', 'plt.scatter', (['qMSM_time_dot', 'qMSM_TPM_dot[:, i, i]'], {'marker': '"""o"""', 'color': '"""red"""', 's': '(10)'}), "(qMSM_time_dot, qMSM_TPM_dot[:, i, i], marker='o', color='red', s=10\n )\n", (22900, 22974), True, 'import matplotlib.pyplot as plt\n'), ((22986, 23074), 'matplotlib.pyplot.plot', 'plt.plot', (['qMSM_time', 'qMSM_TPM_plt[:, i, i]'], {'color': '"""red"""', 'linewidth': '(2.5)', 'label': '"""qMSM"""'}), "(qMSM_time, qMSM_TPM_plt[:, i, i], color='red', linewidth=2.5,\n label='qMSM')\n", (22994, 23074), True, 'import matplotlib.pyplot as plt\n'), ((23087, 23200), 'matplotlib.pyplot.scatter', 'plt.scatter', (['MD_time_dot', 'MD_TPM_dot[:, i, i]'], {'marker': '"""o"""', 'color': '"""white"""', 'edgecolors': '"""gray"""', 's': '(20)', 'label': '"""MD"""'}), "(MD_time_dot, MD_TPM_dot[:, i, i], marker='o', color='white',\n edgecolors='gray', s=20, label='MD')\n", (23098, 23200), True, 'import matplotlib.pyplot as plt\n'), ((23297, 23333), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'frameon': '(True)'}), "(loc='best', frameon=True)\n", (23307, 23333), True, 'import matplotlib.pyplot as plt\n'), ((23350, 23366), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(0)'}), '(left=0)\n', (23358, 23366), True, 'import matplotlib.pyplot as plt\n'), ((23383, 23398), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': '(1)'}), '(top=1)\n', (23391, 23398), True, 'import matplotlib.pyplot as plt\n'), ((23415, 23449), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '"""large"""'}), "(labelsize='large')\n", (23430, 23449), True, 'import matplotlib.pyplot as plt\n'), ((23466, 23501), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lag time(ns)"""'], {'size': '(16)'}), "('Lag time(ns)', size=16)\n", (23476, 23501), True, 'import matplotlib.pyplot as plt\n'), ((23524, 23568), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Residence Probability"""'], {'size': '(16)'}), "('Residence Probability', size=16)\n", (23534, 23568), True, 'import matplotlib.pyplot as plt\n'), ((23584, 23602), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23600, 23602), True, 'import matplotlib.pyplot as plt\n'), ((27788, 27826), 'numpy.dot', 'np.dot', (['p_k', '(qMSM_TPM[i] - self.TPM[i])'], {}), '(p_k, qMSM_TPM[i] - self.TPM[i])\n', (27794, 27826), True, 'import numpy as np\n'), ((27864, 27893), 'numpy.power', 'np.power', (['qMSM_delt_mat[i]', '(2)'], {}), '(qMSM_delt_mat[i], 2)\n', (27872, 27893), True, 'import numpy as np\n'), ((28398, 28426), 'numpy.power', 'np.power', (['MSM_delt_mat[i]', '(2)'], {}), '(MSM_delt_mat[i], 2)\n', (28406, 28426), True, 'import numpy as np\n'), ((4630, 4652), 'numpy.sum', 'np.sum', (['self.TPM[3, 0]'], {}), '(self.TPM[3, 0])\n', (4636, 4652), True, 'import numpy as np\n'), ((7518, 7559), 'numpy.dot', 'np.dot', (['self.TPM[n - m]', 'self.K_matrix[m]'], {}), '(self.TPM[n - m], self.K_matrix[m])\n', (7524, 7559), True, 'import numpy as np\n'), ((7602, 7628), 'numpy.linalg.inv', 'np.linalg.inv', (['self.TPM[0]'], {}), '(self.TPM[0])\n', (7615, 7628), True, 'import numpy as np\n'), ((7820, 7846), 'numpy.linalg.inv', 'np.linalg.inv', (['self.TPM[0]'], {}), '(self.TPM[0])\n', (7833, 7846), True, 'import numpy as np\n'), ((8339, 8378), 'numpy.savetxt', 'np.savetxt', (['file', 'kernel'], {'delimiter': '""" """'}), "(file, kernel, delimiter=' ')\n", (8349, 8378), True, 'import numpy as np\n'), ((9433, 9504), 'matplotlib.pyplot.subplot', 'plt.subplot', (['self.dimension', 'self.dimension', '(i * self.dimension + j + 1)'], {}), '(self.dimension, self.dimension, i * self.dimension + j + 1)\n', (9444, 9504), True, 'import matplotlib.pyplot as plt\n'), ((9621, 9657), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'frameon': '(True)'}), "(loc='best', frameon=True)\n", (9631, 9657), True, 'import matplotlib.pyplot as plt\n'), ((21553, 21580), 'numpy.identity', 'np.identity', (['self.dimension'], {}), '(self.dimension)\n', (21564, 21580), True, 'import numpy as np\n'), ((22677, 22753), 'matplotlib.pyplot.scatter', 'plt.scatter', (['MSM_time', 'MSM_TPM_plt[:, i, i]'], {'marker': '"""o"""', 'color': '"""green"""', 's': '(10)'}), "(MSM_time, MSM_TPM_plt[:, i, i], marker='o', color='green', s=10)\n", (22688, 22753), True, 'import matplotlib.pyplot as plt\n'), ((22774, 22877), 'matplotlib.pyplot.plot', 'plt.plot', (['MSM_time', 'MSM_TPM_plt[:, i, i]'], {'color': '"""green"""', 'linewidth': '(2.5)', 'linestyle': '"""--"""', 'label': '"""MSM"""'}), "(MSM_time, MSM_TPM_plt[:, i, i], color='green', linewidth=2.5,\n linestyle='--', label='MSM')\n", (22782, 22877), True, 'import matplotlib.pyplot as plt\n'), ((23848, 23905), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid[0]', 'grid[1]', '(i * self.dimension + j + 1)'], {}), '(grid[0], grid[1], i * self.dimension + j + 1)\n', (23859, 23905), True, 'import matplotlib.pyplot as plt\n'), ((24191, 24276), 'matplotlib.pyplot.scatter', 'plt.scatter', (['qMSM_time_dot', 'qMSM_TPM_dot[:, i, j]'], {'marker': '"""o"""', 'color': '"""red"""', 's': '(10)'}), "(qMSM_time_dot, qMSM_TPM_dot[:, i, j], marker='o', color='red', s=10\n )\n", (24202, 24276), True, 'import matplotlib.pyplot as plt\n'), ((24292, 24380), 'matplotlib.pyplot.plot', 'plt.plot', (['qMSM_time', 'qMSM_TPM_plt[:, i, j]'], {'color': '"""red"""', 'linewidth': '(2.5)', 'label': '"""qMSM"""'}), "(qMSM_time, qMSM_TPM_plt[:, i, j], color='red', linewidth=2.5,\n label='qMSM')\n", (24300, 24380), True, 'import matplotlib.pyplot as plt\n'), ((24397, 24510), 'matplotlib.pyplot.scatter', 'plt.scatter', (['MD_time_dot', 'MD_TPM_dot[:, i, j]'], {'marker': '"""o"""', 'color': '"""white"""', 'edgecolors': '"""gray"""', 's': '(20)', 'label': '"""MD"""'}), "(MD_time_dot, MD_TPM_dot[:, i, j], marker='o', color='white',\n edgecolors='gray', s=20, label='MD')\n", (24408, 24510), True, 'import matplotlib.pyplot as plt\n'), ((24597, 24633), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'frameon': '(True)'}), "(loc='best', frameon=True)\n", (24607, 24633), True, 'import matplotlib.pyplot as plt\n'), ((24654, 24674), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'num_dot'], {}), '(0, num_dot)\n', (24662, 24674), True, 'import matplotlib.pyplot as plt\n'), ((24695, 24710), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': '(1)'}), '(top=1)\n', (24703, 24710), True, 'import matplotlib.pyplot as plt\n'), ((24731, 24765), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '"""large"""'}), "(labelsize='large')\n", (24746, 24765), True, 'import matplotlib.pyplot as plt\n'), ((24786, 24821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lag time(ns)"""'], {'size': '(16)'}), "('Lag time(ns)', size=16)\n", (24796, 24821), True, 'import matplotlib.pyplot as plt\n'), ((24848, 24892), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Residence Probability"""'], {'size': '(16)'}), "('Residence Probability', size=16)\n", (24858, 24892), True, 'import matplotlib.pyplot as plt\n'), ((24912, 24930), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (24928, 24930), True, 'import matplotlib.pyplot as plt\n'), ((29941, 29988), 'numpy.savetxt', 'np.savetxt', (['file1', 'qMSM_RMSE_out'], {'delimiter': '""" """'}), "(file1, qMSM_RMSE_out, delimiter=' ')\n", (29951, 29988), True, 'import numpy as np\n'), ((30264, 30310), 'numpy.savetxt', 'np.savetxt', (['file2', 'MSM_RMSE_out'], {'delimiter': '""" """'}), "(file2, MSM_RMSE_out, delimiter=' ')\n", (30274, 30310), True, 'import numpy as np\n'), ((4833, 4858), 'numpy.sum', 'np.sum', (['self.TPM[3, :, 0]'], {}), '(self.TPM[3, :, 0])\n', (4839, 4858), True, 'import numpy as np\n'), ((11223, 11254), 'numpy.power', 'np.power', (['integral_kernel[i]', '(2)'], {}), '(integral_kernel[i], 2)\n', (11231, 11254), True, 'import numpy as np\n'), ((17375, 17402), 'numpy.identity', 'np.identity', (['self.dimension'], {}), '(self.dimension)\n', (17386, 17402), True, 'import numpy as np\n'), ((17755, 17798), 'numpy.dot', 'np.dot', (['TPM_propagate[i - 1]', 'self.TPM[tau]'], {}), '(TPM_propagate[i - 1], self.TPM[tau])\n', (17761, 17798), True, 'import numpy as np\n'), ((18093, 18130), 'numpy.reshape', 'np.reshape', (['TPM_propagate[i]', '(1, -1)'], {}), '(TPM_propagate[i], (1, -1))\n', (18103, 18130), True, 'import numpy as np\n'), ((18165, 18213), 'numpy.insert', 'np.insert', (['TPM_gen[i]', '(0)'], {'values': 'time[i]', 'axis': '(0)'}), '(TPM_gen[i], 0, values=time[i], axis=0)\n', (18174, 18213), True, 'import numpy as np\n'), ((23970, 24046), 'matplotlib.pyplot.scatter', 'plt.scatter', (['MSM_time', 'MSM_TPM_plt[:, i, j]'], {'marker': '"""o"""', 'color': '"""green"""', 's': '(10)'}), "(MSM_time, MSM_TPM_plt[:, i, j], marker='o', color='green', s=10)\n", (23981, 24046), True, 'import matplotlib.pyplot as plt\n'), ((24071, 24174), 'matplotlib.pyplot.plot', 'plt.plot', (['MSM_time', 'MSM_TPM_plt[:, i, j]'], {'color': '"""green"""', 'linewidth': '(2.5)', 'linestyle': '"""--"""', 'label': '"""MSM"""'}), "(MSM_time, MSM_TPM_plt[:, i, j], color='green', linewidth=2.5,\n linestyle='--', label='MSM')\n", (24079, 24174), True, 'import matplotlib.pyplot as plt\n'), ((7868, 7903), 'numpy.dot', 'np.dot', (['self.TPM[n]', 'self.dTPM_dt_0'], {}), '(self.TPM[n], self.dTPM_dt_0)\n', (7874, 7903), True, 'import numpy as np\n'), ((7688, 7723), 'numpy.dot', 'np.dot', (['self.TPM[n]', 'self.dTPM_dt_0'], {}), '(self.TPM[n], self.dTPM_dt_0)\n', (7694, 7723), True, 'import numpy as np\n'), ((18435, 18476), 'numpy.savetxt', 'np.savetxt', (['file1', 'TPM_gen'], {'delimiter': '""" """'}), "(file1, TPM_gen, delimiter=' ')\n", (18445, 18476), True, 'import numpy as np\n'), ((18825, 18871), 'numpy.savetxt', 'np.savetxt', (['file2', 'TPM_gen_RMSE'], {'delimiter': '""" """'}), "(file2, TPM_gen_RMSE, delimiter=' ')\n", (18835, 18871), True, 'import numpy as np\n'), ((28487, 28508), 'numpy.sum', 'np.sum', (['qMSM_delt_mat'], {}), '(qMSM_delt_mat)\n', (28493, 28508), True, 'import numpy as np\n'), ((28613, 28633), 'numpy.sum', 'np.sum', (['MSM_delt_mat'], {}), '(MSM_delt_mat)\n', (28619, 28633), True, 'import numpy as np\n'), ((14094, 14125), 'numpy.linalg.inv', 'np.linalg.inv', (['TPM_propagate[0]'], {}), '(TPM_propagate[0])\n', (14107, 14125), True, 'import numpy as np\n'), ((14806, 14843), 'numpy.reshape', 'np.reshape', (['TPM_propagate[i]', '(1, -1)'], {}), '(TPM_propagate[i], (1, -1))\n', (14816, 14843), True, 'import numpy as np\n'), ((14878, 14938), 'numpy.insert', 'np.insert', (['TPM_gen[i]', '(0)'], {'values': '(self.delta_time * i)', 'axis': '(0)'}), '(TPM_gen[i], 0, values=self.delta_time * i, axis=0)\n', (14887, 14938), True, 'import numpy as np\n'), ((14326, 14365), 'numpy.dot', 'np.dot', (['TPM_propagate[n - m]', 'kernel[m]'], {}), '(TPM_propagate[n - m], kernel[m])\n', (14332, 14365), True, 'import numpy as np\n'), ((14396, 14431), 'numpy.dot', 'np.dot', (['TPM_propagate[n]', 'TPM_grad0'], {}), '(TPM_propagate[n], TPM_grad0)\n', (14402, 14431), True, 'import numpy as np\n'), ((15163, 15204), 'numpy.savetxt', 'np.savetxt', (['file1', 'TPM_gen'], {'delimiter': '""" """'}), "(file1, TPM_gen, delimiter=' ')\n", (15173, 15204), True, 'import numpy as np\n'), ((15449, 15495), 'os.path.getsize', 'os.path.getsize', (['"""qMSM_Propagate_TPM_RMSE.txt"""'], {}), "('qMSM_Propagate_TPM_RMSE.txt')\n", (15464, 15495), False, 'import os\n'), ((15521, 15567), 'numpy.savetxt', 'np.savetxt', (['file2', 'TPM_gen_RMSE'], {'delimiter': '""" """'}), "(file2, TPM_gen_RMSE, delimiter=' ')\n", (15531, 15567), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
#
# Author: <NAME>
# URL: http://kazuto1011.github.io
# Created: 2017-11-01
from __future__ import absolute_import, division, print_function
import os.path as osp
import click
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import yaml
from addict import Dict
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torchnet.meter import MovingAverageValueMeter
from tqdm import tqdm
from libs.datasets import CocoStuff10k
from libs.models import DeepLabV2_ResNet101_MSC
from libs.utils.loss import CrossEntropyLoss2d
def get_lr_params(model, key):
# For Dilated FCN
if key == '1x':
for m in model.named_modules():
if 'layer' in m[0]:
if isinstance(m[1], nn.Conv2d):
for p in m[1].parameters():
yield p
# For conv weight in the ASPP module
if key == '10x':
for m in model.named_modules():
if 'aspp' in m[0]:
if isinstance(m[1], nn.Conv2d):
yield m[1].weight
# For conv bias in the ASPP module
if key == '20x':
for m in model.named_modules():
if 'aspp' in m[0]:
if isinstance(m[1], nn.Conv2d):
yield m[1].bias
def poly_lr_scheduler(optimizer, init_lr, iter, lr_decay_iter, max_iter, power):
if iter % lr_decay_iter or iter > max_iter:
return None
new_lr = init_lr * (1 - float(iter) / max_iter)**power
optimizer.param_groups[0]['lr'] = new_lr
optimizer.param_groups[1]['lr'] = 10 * new_lr
optimizer.param_groups[2]['lr'] = 20 * new_lr
def resize_target(target, size):
new_target = np.zeros((target.shape[0], size, size), np.int32)
for i, t in enumerate(target.numpy()):
new_target[i, ...] = cv2.resize(t, (size, ) * 2, interpolation=cv2.INTER_NEAREST)
return torch.from_numpy(new_target).long()
@click.command()
@click.option('--config', '-c', type=str, required=True)
@click.option('--cuda/--no-cuda', default=True)
def main(config, cuda):
# Configuration
CONFIG = Dict(yaml.load(open(config)))
# CUDA check
cuda = cuda and torch.cuda.is_available()
# cuda = False
if cuda:
current_device = torch.cuda.current_device()
print('Running on', torch.cuda.get_device_name(current_device))
###
# Dataset
dataset = CocoStuff10k(
root=CONFIG.ROOT,
split='train',
image_size=513,
crop_size=CONFIG.IMAGE.SIZE.TRAIN,
scale=True,
flip=True,
)
# DataLoader
loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=CONFIG.BATCH_SIZE,
num_workers=CONFIG.NUM_WORKERS,
shuffle=True,
)
loader_iter = iter(loader)
# Model
model = DeepLabV2_ResNet101_MSC(n_classes=CONFIG.N_CLASSES)
state_dict = torch.load(CONFIG.INIT_MODEL)
model.load_state_dict(state_dict, strict=False) # Skip "aspp" layer
model = nn.DataParallel(model)
if cuda:
model.cuda()
# Optimizer
optimizer = {
'sgd':
torch.optim.SGD(
# cf lr_mult and decay_mult in train.prototxt
params=[{
'params': get_lr_params(model.module, key='1x'),
'lr': CONFIG.LR,
'weight_decay': CONFIG.WEIGHT_DECAY
}, {
'params': get_lr_params(model.module, key='10x'),
'lr': 10 * CONFIG.LR,
'weight_decay': CONFIG.WEIGHT_DECAY
}, {
'params': get_lr_params(model.module, key='20x'),
'lr': 20 * CONFIG.LR,
'weight_decay': 0.0
}],
momentum=CONFIG.MOMENTUM,
),
}.get(CONFIG.OPTIMIZER)
# Loss definition
criterion = CrossEntropyLoss2d(ignore_index=CONFIG.IGNORE_LABEL)
if cuda:
criterion.cuda()
# TensorBoard Logger
writer = SummaryWriter(CONFIG.LOG_DIR)
loss_meter = MovingAverageValueMeter(20)
model.train()
model.module.scale.freeze_bn()
for iteration in tqdm(
range(1, CONFIG.ITER_MAX + 1),
total=CONFIG.ITER_MAX,
leave=False,
dynamic_ncols=True,
):
# Set a learning rate
poly_lr_scheduler(
optimizer=optimizer,
init_lr=CONFIG.LR,
iter=iteration - 1,
lr_decay_iter=CONFIG.LR_DECAY,
max_iter=CONFIG.ITER_MAX,
power=CONFIG.POLY_POWER,
)
# Clear gradients (ready to accumulate)
optimizer.zero_grad()
iter_loss = 0
for i in range(1, CONFIG.ITER_SIZE + 1):
print(i)
data, target = next(loader_iter)
# Image
data = data.cuda() if cuda else data
data = Variable(data)
# Propagate forward
outputs = model(data)
# Loss
loss = 0
for output in outputs:
# Resize target for {100%, 75%, 50%, Max} outputs
target_ = resize_target(target, output.size(2))
target_ = target_.cuda() if cuda else target_
target_ = Variable(target_)
# Compute crossentropy loss
loss += criterion(output, target_)
# Backpropagate (just compute gradients wrt the loss)
loss /= float(CONFIG.ITER_SIZE)
loss.backward()
iter_loss += loss.data[0]
# Reload dataloader
if ((iteration - 1) * CONFIG.ITER_SIZE + i) % len(loader) == 0:
loader_iter = iter(loader)
loss_meter.add(iter_loss)
# Update weights with accumulated gradients
optimizer.step()
# TensorBoard
if iteration % CONFIG.ITER_TF == 0:
writer.add_scalar('train_loss', loss_meter.value()[0], iteration)
for i, o in enumerate(optimizer.param_groups):
writer.add_scalar('train_lr_group{}'.format(i), o['lr'], iteration)
if iteration % 1000 != 0:
continue
for name, param in model.named_parameters():
name = name.replace('.', '/')
writer.add_histogram(name, param, iteration, bins="auto")
if param.requires_grad:
writer.add_histogram(name + '/grad', param.grad, iteration, bins="auto")
# Save a model
if iteration % CONFIG.ITER_SNAP == 0:
torch.save(
model.module.state_dict(),
osp.join(CONFIG.SAVE_DIR, 'checkpoint_{}.pth'.format(iteration)),
)
# Save a model
if iteration % 100 == 0:
torch.save(
model.module.state_dict(),
osp.join(CONFIG.SAVE_DIR, 'checkpoint_current.pth'),
)
torch.save(
model.module.state_dict(),
osp.join(CONFIG.SAVE_DIR, 'checkpoint_final.pth'),
)
if __name__ == '__main__':
main()
| [
"tensorboardX.SummaryWriter",
"torch.utils.data.DataLoader",
"libs.models.DeepLabV2_ResNet101_MSC",
"torch.load",
"torchnet.meter.MovingAverageValueMeter",
"numpy.zeros",
"click.option",
"libs.utils.loss.CrossEntropyLoss2d",
"click.command",
"torch.autograd.Variable",
"torch.cuda.is_available",
... | [((1990, 2005), 'click.command', 'click.command', ([], {}), '()\n', (2003, 2005), False, 'import click\n'), ((2007, 2062), 'click.option', 'click.option', (['"""--config"""', '"""-c"""'], {'type': 'str', 'required': '(True)'}), "('--config', '-c', type=str, required=True)\n", (2019, 2062), False, 'import click\n'), ((2064, 2110), 'click.option', 'click.option', (['"""--cuda/--no-cuda"""'], {'default': '(True)'}), "('--cuda/--no-cuda', default=True)\n", (2076, 2110), False, 'import click\n'), ((1757, 1806), 'numpy.zeros', 'np.zeros', (['(target.shape[0], size, size)', 'np.int32'], {}), '((target.shape[0], size, size), np.int32)\n', (1765, 1806), True, 'import numpy as np\n'), ((2456, 2580), 'libs.datasets.CocoStuff10k', 'CocoStuff10k', ([], {'root': 'CONFIG.ROOT', 'split': '"""train"""', 'image_size': '(513)', 'crop_size': 'CONFIG.IMAGE.SIZE.TRAIN', 'scale': '(True)', 'flip': '(True)'}), "(root=CONFIG.ROOT, split='train', image_size=513, crop_size=\n CONFIG.IMAGE.SIZE.TRAIN, scale=True, flip=True)\n", (2468, 2580), False, 'from libs.datasets import CocoStuff10k\n'), ((2662, 2786), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'CONFIG.BATCH_SIZE', 'num_workers': 'CONFIG.NUM_WORKERS', 'shuffle': '(True)'}), '(dataset=dataset, batch_size=CONFIG.BATCH_SIZE,\n num_workers=CONFIG.NUM_WORKERS, shuffle=True)\n', (2689, 2786), False, 'import torch\n'), ((2878, 2929), 'libs.models.DeepLabV2_ResNet101_MSC', 'DeepLabV2_ResNet101_MSC', ([], {'n_classes': 'CONFIG.N_CLASSES'}), '(n_classes=CONFIG.N_CLASSES)\n', (2901, 2929), False, 'from libs.models import DeepLabV2_ResNet101_MSC\n'), ((2947, 2976), 'torch.load', 'torch.load', (['CONFIG.INIT_MODEL'], {}), '(CONFIG.INIT_MODEL)\n', (2957, 2976), False, 'import torch\n'), ((3062, 3084), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (3077, 3084), True, 'import torch.nn as nn\n'), ((3954, 4006), 'libs.utils.loss.CrossEntropyLoss2d', 'CrossEntropyLoss2d', ([], {'ignore_index': 'CONFIG.IGNORE_LABEL'}), '(ignore_index=CONFIG.IGNORE_LABEL)\n', (3972, 4006), False, 'from libs.utils.loss import CrossEntropyLoss2d\n'), ((4084, 4113), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['CONFIG.LOG_DIR'], {}), '(CONFIG.LOG_DIR)\n', (4097, 4113), False, 'from tensorboardX import SummaryWriter\n'), ((4131, 4158), 'torchnet.meter.MovingAverageValueMeter', 'MovingAverageValueMeter', (['(20)'], {}), '(20)\n', (4154, 4158), False, 'from torchnet.meter import MovingAverageValueMeter\n'), ((1879, 1938), 'cv2.resize', 'cv2.resize', (['t', '((size,) * 2)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(t, (size,) * 2, interpolation=cv2.INTER_NEAREST)\n', (1889, 1938), False, 'import cv2\n'), ((2236, 2261), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2259, 2261), False, 'import torch\n'), ((2319, 2346), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (2344, 2346), False, 'import torch\n'), ((7048, 7097), 'os.path.join', 'osp.join', (['CONFIG.SAVE_DIR', '"""checkpoint_final.pth"""'], {}), "(CONFIG.SAVE_DIR, 'checkpoint_final.pth')\n", (7056, 7097), True, 'import os.path as osp\n'), ((1951, 1979), 'torch.from_numpy', 'torch.from_numpy', (['new_target'], {}), '(new_target)\n', (1967, 1979), False, 'import torch\n'), ((2375, 2417), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['current_device'], {}), '(current_device)\n', (2401, 2417), False, 'import torch\n'), ((4955, 4969), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (4963, 4969), False, 'from torch.autograd import Variable\n'), ((5331, 5348), 'torch.autograd.Variable', 'Variable', (['target_'], {}), '(target_)\n', (5339, 5348), False, 'from torch.autograd import Variable\n'), ((6921, 6972), 'os.path.join', 'osp.join', (['CONFIG.SAVE_DIR', '"""checkpoint_current.pth"""'], {}), "(CONFIG.SAVE_DIR, 'checkpoint_current.pth')\n", (6929, 6972), True, 'import os.path as osp\n')] |
#!/usr/bin/env python3
from ctypes import *
import numpy as np
import os
import sys
try:
shared_library = None
shared_library = os.environ['LIB_DARKNET']
if not os.path.exists(shared_library):
raise ValueError(f'Path "{shared_library}" does not exist.')
else:
import fnmatch
if not fnmatch.fnmatch(shared_library, '*.so'):
raise ValueError(f'{shared_library} is not a shared_library')
except KeyError as exception:
sys.exit('LIB_DARKNET variable is not set.')
except ValueError as exception:
sys.exit(exception)
class BOX(Structure):
_fields_ = [('x', c_float),
('y', c_float),
('w', c_float),
('h', c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int),
("uc", POINTER(c_float)),
("points", c_int),
("embeddings", POINTER(c_float)),
("embedding_size", c_int),
("sim", c_float),
("track_id", c_int)]
class IMAGE(Structure):
_fields_ = [('w', c_int),
('h', c_int),
('c', c_int),
('data', POINTER(c_float))]
class METADATA(Structure):
_fields_ = [('classes', c_int),
('names', POINTER(c_char_p))]
lib = CDLL(shared_library, RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = \
[c_void_p, c_int, c_int, c_float, c_float, POINTER(
c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def array_to_image(arr):
arr = arr.transpose(2, 0, 1)
c = arr.shape[0]
h = arr.shape[1]
w = arr.shape[2]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w, h, c, data)
return im, arr
| [
"numpy.ascontiguousarray",
"os.path.exists",
"fnmatch.fnmatch",
"sys.exit"
] | [((177, 207), 'os.path.exists', 'os.path.exists', (['shared_library'], {}), '(shared_library)\n', (191, 207), False, 'import os\n'), ((479, 523), 'sys.exit', 'sys.exit', (['"""LIB_DARKNET variable is not set."""'], {}), "('LIB_DARKNET variable is not set.')\n", (487, 523), False, 'import sys\n'), ((560, 579), 'sys.exit', 'sys.exit', (['exception'], {}), '(exception)\n', (568, 579), False, 'import sys\n'), ((3802, 3850), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['arr.flat'], {'dtype': 'np.float32'}), '(arr.flat, dtype=np.float32)\n', (3822, 3850), True, 'import numpy as np\n'), ((330, 369), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['shared_library', '"""*.so"""'], {}), "(shared_library, '*.so')\n", (345, 369), False, 'import fnmatch\n')] |
import cv2
import tensorflow as tf
import pytesseract
from PIL import Image
import numpy as np
import json
from django.http import JsonResponse
from django.http import HttpResponse
import pathlib
pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
path_to_classifier = pathlib.Path("../model/classifier2.h5").resolve()
image_container = "none"
last_text = "No card detected <br><br><br><br><br> Please Hold the card steadily for few seconds while aligned to the white guide on the camera view to the left <br><br><br><br> This screen will automatically populate with the detected text <br><br><br><br>"
last_key = {"status":"No-Key-detected"}
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
global image_container
success, image = self.video.read()
print("video-ok")
image_container = image
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg
def detectcard(card):
tmep = "holder"
global last_text
global last_key
if type(image_container) == type(tmep):
return JsonResponse({'last_text': last_text, "last_key": last_key, "first": "1"})
else:
print("=======================Request handelled======================")
gray = cv2.cvtColor(card, cv2.COLOR_BGR2GRAY)
valid = model.predict(np.expand_dims(np.expand_dims(gray, 2),0))
if valid >= 0.5:
print("|||||||||||||||||||||||||||||TEXT DETECTED|||||||||||||||||||||||||||||||||")
last_text, last_key = extract_text(card)
return JsonResponse({'last_text': last_text, "last_key": last_key, "first": "0"})
else:
return JsonResponse({'last_text': last_text, "last_key": last_key, "first": "1"})
def extract_text(card):
cam_pic_size = (1280, 720)
print(type(card), card.shape)
frame = cv2.resize(card, dsize=cam_pic_size, interpolation=cv2.INTER_CUBIC)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
adaptive_threshold = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 85, 8)
text = pytesseract.image_to_string(adaptive_threshold, lang = "eng+hin")
key_values = extract_key(text)
print(str(text))
return str(text), key_values
def extract_key(sample, extract = None):
if extract == None:
extract = {
'gujarat state': (1, 'ba', None),
'maharashtra state': (1, 'ba', None),
'driving licence': (1, 'ba', None),
'name': (2, None, None),
'address': (3, None, None),
'dob': (1, 'bg', None),
'bg': (1, None, None),
'जन्म तारीख': (1, None, "dob"),
'पुरुष': (1, None, "gender"),
'special': (3, 's', 'name')
}
lines = sample.split("\n")
flines = [l for l in lines if len(l) > 5]
track = []
for i, line in enumerate(flines):
for key in extract:
result = line.lower().find(key)
if result != -1: track.append({
"key": key,
"index": result,
"position": i
})
this = {}
for t in track:
count = extract[t['key']][0]
val = extract[t['key']][1]
alt = extract[t['key']][2]
if val == 'ba':
this[t['key']] = flines[t['position']][t["index"]:t["index"]+len(t['key'])]
elif val == None:
put = flines[t['position']][t["index"]+len(t['key']):]
put = put + " ".join(flines[t['position']+1: t['position']+count])
if alt:
this[alt] = put
else:
this[t['key']] = put
else:
here = flines[t['position']].lower().find(val)
put = flines[t['position']][t["index"]+len(t['key']):here]
this[t['key']] = put
this["potential interest"] = [x for x in flines if len(x.split()) == 3]
return this
def gen(camera):
global model
model = tf.keras.models.load_model(path_to_classifier)
while True:
frame = camera.get_frame()
frame = frame.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def get_image():
return image_container | [
"tensorflow.keras.models.load_model",
"cv2.cvtColor",
"numpy.expand_dims",
"pytesseract.image_to_string",
"cv2.adaptiveThreshold",
"cv2.VideoCapture",
"django.http.JsonResponse",
"pathlib.Path",
"cv2.imencode",
"cv2.resize"
] | [((1975, 2042), 'cv2.resize', 'cv2.resize', (['card'], {'dsize': 'cam_pic_size', 'interpolation': 'cv2.INTER_CUBIC'}), '(card, dsize=cam_pic_size, interpolation=cv2.INTER_CUBIC)\n', (1985, 2042), False, 'import cv2\n'), ((2054, 2093), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (2066, 2093), False, 'import cv2\n'), ((2119, 2214), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(85)', '(8)'], {}), '(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 85, 8)\n', (2140, 2214), False, 'import cv2\n'), ((2221, 2284), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['adaptive_threshold'], {'lang': '"""eng+hin"""'}), "(adaptive_threshold, lang='eng+hin')\n", (2248, 2284), False, 'import pytesseract\n'), ((4093, 4139), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['path_to_classifier'], {}), '(path_to_classifier)\n', (4119, 4139), True, 'import tensorflow as tf\n'), ((307, 346), 'pathlib.Path', 'pathlib.Path', (['"""../model/classifier2.h5"""'], {}), "('../model/classifier2.h5')\n", (319, 346), False, 'import pathlib\n'), ((754, 773), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (770, 773), False, 'import cv2\n'), ((1013, 1040), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'image'], {}), "('.jpg', image)\n", (1025, 1040), False, 'import cv2\n'), ((1204, 1278), 'django.http.JsonResponse', 'JsonResponse', (["{'last_text': last_text, 'last_key': last_key, 'first': '1'}"], {}), "({'last_text': last_text, 'last_key': last_key, 'first': '1'})\n", (1216, 1278), False, 'from django.http import JsonResponse\n'), ((1384, 1422), 'cv2.cvtColor', 'cv2.cvtColor', (['card', 'cv2.COLOR_BGR2GRAY'], {}), '(card, cv2.COLOR_BGR2GRAY)\n', (1396, 1422), False, 'import cv2\n'), ((1690, 1764), 'django.http.JsonResponse', 'JsonResponse', (["{'last_text': last_text, 'last_key': last_key, 'first': '0'}"], {}), "({'last_text': last_text, 'last_key': last_key, 'first': '0'})\n", (1702, 1764), False, 'from django.http import JsonResponse\n'), ((1798, 1872), 'django.http.JsonResponse', 'JsonResponse', (["{'last_text': last_text, 'last_key': last_key, 'first': '1'}"], {}), "({'last_text': last_text, 'last_key': last_key, 'first': '1'})\n", (1810, 1872), False, 'from django.http import JsonResponse\n'), ((1468, 1491), 'numpy.expand_dims', 'np.expand_dims', (['gray', '(2)'], {}), '(gray, 2)\n', (1482, 1491), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import pickle5 as pickle
import argparse
import time
import json
import os
from sklearn.tree import export_graphviz
from imblearn.ensemble import BalancedRandomForestClassifier
import matplotlib.pyplot as plt
from sklearn import tree
# Currently use sys to get other script - in Future use package
import os
import sys
path_main = ("/".join(os.path.realpath(__file__).split("/")[:-2]))
sys.path.append(path_main + '/Utils/')
from help_functions import mean, str_to_bool, str_none_check
def treeplotting(model, cache=False, Path=False, save=False, show=True):
'''
'''
if Path != False:
if not Path.endswith("Model/"):
Path = Path + "Model/"
if not Path.endswith(".pkl"):
Path = Path + "RF_model.sav"
model = pd.read_pickle(Path)
if cache != False:
if not cache.endswith("Cache/"):
cache = cache + "Cache/"
if not Path.endswith(".json"):
cache = cache + "cache_features.json"
with open(cache) as json_file:
data = json.loads(json_file.read())
data = data["cache_all"] + data["cache_media"]
dotfile = open("tree.dot", 'w')
tree.export_graphviz(model.estimators_[0], out_file = dotfile, feature_names = np.asarray(data), filled = True, max_depth=12, label='all', impurity=False, precision=2, leaves_parallel=False, rotate=False)
dotfile.close()
if save != False:
if not save.endswith("Figures/"):
save = save + "Figures/"
if not save.endswith(".png"):
save = save + "TreeFig.png"
os.system(f'dot -Tpng tree.dot -o {save}')
os.system('rm tree.dot')
fig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (4,4), dpi=800)
tree.plot_tree(model.estimators_[0],
feature_names = np.asarray(data));
fig.savefig(f'{save}.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Plot a descision tree from random forest")
parser.add_argument("Path", help="Path to random forest sav output")
parser.add_argument("Cache", help="Path to cache of features json")
parser.add_argument("-s", dest="Save", nargs='?', default=False, help="location of file")
parser.add_argument("-d", dest="Show", nargs='?', default=True, help="Do you want to show the plot?")
args = parser.parse_args()
start = time.time()
print(bool(args.Show))
treeplotting(model=False, cache=args.Cache, Path=args.Path, save=args.Save, show=str_to_bool(args.Show))
end = time.time()
print('completed in {} seconds'.format(end-start)) | [
"sys.path.append",
"help_functions.str_to_bool",
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.realpath",
"os.system",
"time.time",
"pandas.read_pickle",
"matplotlib.pyplot.subplots"
] | [((426, 464), 'sys.path.append', 'sys.path.append', (["(path_main + '/Utils/')"], {}), "(path_main + '/Utils/')\n", (441, 464), False, 'import sys\n'), ((1683, 1707), 'os.system', 'os.system', (['"""rm tree.dot"""'], {}), "('rm tree.dot')\n", (1692, 1707), False, 'import os\n'), ((1725, 1780), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(4, 4)', 'dpi': '(800)'}), '(nrows=1, ncols=1, figsize=(4, 4), dpi=800)\n', (1737, 1780), True, 'import matplotlib.pyplot as plt\n'), ((1951, 2030), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot a descision tree from random forest"""'}), "(description='Plot a descision tree from random forest')\n", (1974, 2030), False, 'import argparse\n'), ((2420, 2431), 'time.time', 'time.time', ([], {}), '()\n', (2429, 2431), False, 'import time\n'), ((2578, 2589), 'time.time', 'time.time', ([], {}), '()\n', (2587, 2589), False, 'import time\n'), ((819, 839), 'pandas.read_pickle', 'pd.read_pickle', (['Path'], {}), '(Path)\n', (833, 839), True, 'import pandas as pd\n'), ((1635, 1677), 'os.system', 'os.system', (['f"""dot -Tpng tree.dot -o {save}"""'], {}), "(f'dot -Tpng tree.dot -o {save}')\n", (1644, 1677), False, 'import os\n'), ((1299, 1315), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1309, 1315), True, 'import numpy as np\n'), ((1860, 1876), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1870, 1876), True, 'import numpy as np\n'), ((2544, 2566), 'help_functions.str_to_bool', 'str_to_bool', (['args.Show'], {}), '(args.Show)\n', (2555, 2566), False, 'from help_functions import mean, str_to_bool, str_none_check\n'), ((381, 407), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (397, 407), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun May 30 00:05:15 2021
@author: Antho
"""
# Imports
from datetime import timedelta
import numpy as np
def daterange(start_date, end_date):
"""
Create a list day by day from start_date to end_date.
Parameters
----------
start_date : datetime.date
Start date of the daterange
end_date : datetime.date
End date of the daterange (excluded)
"""
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def getLaggedReturns_fromPrice(x, lag):
"""
Get lagged log returns for an x pandas Serie.
Parameters
----------
x : pd.Series
Data serie
lag : int
Number of periods to be lagged
"""
price = x
weekly_price = price.rolling(lag).mean()
weekly_returns = np.log(weekly_price).diff(lag)
weekly_returns = np.exp(weekly_returns)-1
return weekly_returns
def getLaggedReturns_fromReturns(x, lag):
"""
Get lagged log returns for an x pandas Serie.
Parameters
----------
x : pd.Series
Data serie
lag : int
Number of periods to be lagged
"""
weekly_price = x.rolling(lag).sum()
weekly_returns = weekly_price.diff(lag)
return weekly_returns | [
"numpy.log",
"datetime.timedelta",
"numpy.exp"
] | [((954, 976), 'numpy.exp', 'np.exp', (['weekly_returns'], {}), '(weekly_returns)\n', (960, 976), True, 'import numpy as np\n'), ((901, 921), 'numpy.log', 'np.log', (['weekly_price'], {}), '(weekly_price)\n', (907, 921), True, 'import numpy as np\n'), ((536, 548), 'datetime.timedelta', 'timedelta', (['n'], {}), '(n)\n', (545, 548), False, 'from datetime import timedelta\n')] |
import numpy as np
def gradient_descent(w0, optimizer, regularizer, opts=dict()):
# f, gradf
# n_iter, eta0, algorithm='GD', batch_size=1, learning_rate_scheduling=None):
w = w0
dim = w0.size
eta = opts.get('eta0', 0.01)
n_iter = opts.get('n_iter', 10)
batch_size = opts.get('batch_size', 1)
algorithm = opts.get('algorithm', 'GD')
n_samples = opts.get('n_samples', optimizer.get_number_samples()) # X.shape[0]
indexes = np.arange(0, n_samples, 1)
if algorithm == 'GD':
batch_size = n_samples
trajectory = np.zeros((n_iter + 1, dim))
trajectory[0, :] = w
f_val = optimizer.loss(w, indexes)
f_old = f_val
grad_sum = 0
index_traj = np.zeros((n_iter, batch_size), dtype=np.int)
for it in range(n_iter):
# Sample indexes.
# sampling_opts = {'algorithm': opts.get('algorithm', 'GD')}
# i = sample_indexes(n_samples, batch_size, sampling_opts)
np.random.shuffle(indexes)
i = indexes[0:batch_size]
index_traj[it, :] = i
# Compute Gradient
gradient = optimizer.gradient(w, i)
reg_gradient = regularizer.gradient(w)
grad_sum += np.sum(np.square(gradient + reg_gradient))
# Update learning rate.
learning_rate_opts = {'learning_rate_scheduling': opts.get('learning_rate_scheduling', None),
'eta0': opts.get('eta0', 0.01),
'it': it,
'f_increased': (f_val > f_old),
'grad_sum': grad_sum}
eta = compute_learning_rate(eta, learning_rate_opts)
# Perform gradient step.
w = w - eta * gradient
# Regularization
if opts.get('shrinkage', False):
wplus = np.abs(w) - eta * regularizer.get_lambda()
wplus[wplus<0] = 0
wplus[-1] = np.abs(w[-1])
w = np.sign(w) * wplus
else:
w = w - eta * reg_gradient
# Compute new cost and save weights.
f_old = f_val
f_val = optimizer.loss(w, indexes)
trajectory[it + 1, :] = w
return trajectory, index_traj
def sample_indexes(n_samples, batch_size, opts):
algorithm = opts.get('algorithm', 'GD')
if algorithm == 'GD':
i = np.arange(0, n_samples, 1)
elif algorithm == 'SGD':
i = np.random.randint(0, n_samples, batch_size)
else:
raise ValueError('Algorithm {} not understood'.format(method))
return i
def compute_learning_rate(eta, opts=dict()):
learning_rate_scheduling = opts.get('learning_rate_scheduling', None)
eta0 = opts.get('eta0', eta)
f_increased = opts.get('f_increased', False)
grad_sum = opts.get('grad_sum', 0)
it = opts.get('it', 0)
if learning_rate_scheduling == None:
eta = eta0 # keep it constant.
elif learning_rate_scheduling == 'Annealing':
eta = eta0 / np.power(it + 1, 0.6)
elif learning_rate_scheduling == 'Bold driver':
eta = (eta / 5) if (f_increased) else (eta * 1.1)
elif learning_rate_scheduling == 'AdaGrad':
eta = eta0 / np.sqrt(grad_sum)
elif learning_rate_scheduling == 'Annealing2':
eta = min([eta0, 100. / (it + 1.)])
else:
raise ValueError('Learning rate scheduling {} not understood'.format(method))
return eta
def dist(X1, X2=None):
# Build a distance matrix between the elements of X1 and X2.
if X2 is None:
X2 = X1
rows = X1.shape[0]
if X2.shape[0] == X1.shape[1]:
cols = 1
else:
cols = X2.shape[0]
D = np.zeros((rows, cols))
for row in range(rows):
for col in range(cols):
if X1.shape[0] == X1.size:
x1 = X1[row]
else:
x1 = X1[row, :]
if X2.shape[0] == X2.size:
if cols == 1:
x2 = X2
else:
x2 = X2[col]
else:
x2 = X2[col, :]
D[row, col] = np.linalg.norm(x1 - x2)
return D
def generate_polynomial_data(num_points, noise, w):
dim = w.size - 1
# Generate feature vector
x = np.random.normal(size=(num_points, 1))
x1 = np.power(x, 0)
for d in range(dim):
x1 = np.concatenate((np.power(x, 1 + d), x1), axis=1) # X = [x, 1].
y = np.dot(x1, w) + np.random.normal(size=(num_points,)) * noise # y = Xw + eps
return x1, y
def generate_linear_separable_data(num_positive, num_negative=None, noise=0., offset=1, dim=2):
if num_negative is None:
num_negative = num_positive
x = offset + noise * np.random.randn(num_positive, dim)
y = 1 * np.ones((num_positive,), dtype=np.int)
x = np.concatenate((x, noise * np.random.randn(num_negative, dim)), axis=0)
y = np.concatenate((y, -1 * np.ones((num_negative,), dtype=np.int)), axis=0)
x = np.concatenate((x, np.ones((num_positive + num_negative, 1))), axis=1)
return x, y
def generate_circular_separable_data(num_positive, num_negative=None, noise=0., offset=1, dim=2):
if num_negative is None:
num_negative = num_positive
x = np.random.randn(num_positive, dim)
x = offset * x / np.linalg.norm(x, axis=1, keepdims=True) # Normalize datapoints to have norm 1.
x += np.random.randn(num_positive, 2) * noise;
y = 1 * np.ones((num_positive,), dtype=np.int)
x = np.concatenate((x, noise * np.random.randn(num_negative, dim)), axis=0)
y = np.concatenate((y, -1 * np.ones((num_negative,), dtype=np.int)), axis=0)
x = np.concatenate((x, np.ones((num_positive + num_negative, 1))), axis=1)
return x, y
| [
"numpy.abs",
"numpy.random.randn",
"numpy.power",
"numpy.square",
"numpy.zeros",
"numpy.ones",
"numpy.random.randint",
"numpy.arange",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.sign",
"numpy.dot",
"numpy.random.shuffle",
"numpy.sqrt"
] | [((466, 492), 'numpy.arange', 'np.arange', (['(0)', 'n_samples', '(1)'], {}), '(0, n_samples, 1)\n', (475, 492), True, 'import numpy as np\n'), ((568, 595), 'numpy.zeros', 'np.zeros', (['(n_iter + 1, dim)'], {}), '((n_iter + 1, dim))\n', (576, 595), True, 'import numpy as np\n'), ((714, 758), 'numpy.zeros', 'np.zeros', (['(n_iter, batch_size)'], {'dtype': 'np.int'}), '((n_iter, batch_size), dtype=np.int)\n', (722, 758), True, 'import numpy as np\n'), ((3614, 3636), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (3622, 3636), True, 'import numpy as np\n'), ((4197, 4235), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_points, 1)'}), '(size=(num_points, 1))\n', (4213, 4235), True, 'import numpy as np\n'), ((4245, 4259), 'numpy.power', 'np.power', (['x', '(0)'], {}), '(x, 0)\n', (4253, 4259), True, 'import numpy as np\n'), ((5172, 5206), 'numpy.random.randn', 'np.random.randn', (['num_positive', 'dim'], {}), '(num_positive, dim)\n', (5187, 5206), True, 'import numpy as np\n'), ((959, 985), 'numpy.random.shuffle', 'np.random.shuffle', (['indexes'], {}), '(indexes)\n', (976, 985), True, 'import numpy as np\n'), ((2312, 2338), 'numpy.arange', 'np.arange', (['(0)', 'n_samples', '(1)'], {}), '(0, n_samples, 1)\n', (2321, 2338), True, 'import numpy as np\n'), ((4370, 4383), 'numpy.dot', 'np.dot', (['x1', 'w'], {}), '(x1, w)\n', (4376, 4383), True, 'import numpy as np\n'), ((4701, 4739), 'numpy.ones', 'np.ones', (['(num_positive,)'], {'dtype': 'np.int'}), '((num_positive,), dtype=np.int)\n', (4708, 4739), True, 'import numpy as np\n'), ((5228, 5268), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (5242, 5268), True, 'import numpy as np\n'), ((5318, 5350), 'numpy.random.randn', 'np.random.randn', (['num_positive', '(2)'], {}), '(num_positive, 2)\n', (5333, 5350), True, 'import numpy as np\n'), ((5372, 5410), 'numpy.ones', 'np.ones', (['(num_positive,)'], {'dtype': 'np.int'}), '((num_positive,), dtype=np.int)\n', (5379, 5410), True, 'import numpy as np\n'), ((1196, 1230), 'numpy.square', 'np.square', (['(gradient + reg_gradient)'], {}), '(gradient + reg_gradient)\n', (1205, 1230), True, 'import numpy as np\n'), ((1894, 1907), 'numpy.abs', 'np.abs', (['w[-1]'], {}), '(w[-1])\n', (1900, 1907), True, 'import numpy as np\n'), ((2380, 2423), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_samples', 'batch_size'], {}), '(0, n_samples, batch_size)\n', (2397, 2423), True, 'import numpy as np\n'), ((4046, 4069), 'numpy.linalg.norm', 'np.linalg.norm', (['(x1 - x2)'], {}), '(x1 - x2)\n', (4060, 4069), True, 'import numpy as np\n'), ((4386, 4422), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(num_points,)'}), '(size=(num_points,))\n', (4402, 4422), True, 'import numpy as np\n'), ((4654, 4688), 'numpy.random.randn', 'np.random.randn', (['num_positive', 'dim'], {}), '(num_positive, dim)\n', (4669, 4688), True, 'import numpy as np\n'), ((4930, 4971), 'numpy.ones', 'np.ones', (['(num_positive + num_negative, 1)'], {}), '((num_positive + num_negative, 1))\n', (4937, 4971), True, 'import numpy as np\n'), ((5600, 5641), 'numpy.ones', 'np.ones', (['(num_positive + num_negative, 1)'], {}), '((num_positive + num_negative, 1))\n', (5607, 5641), True, 'import numpy as np\n'), ((1796, 1805), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (1802, 1805), True, 'import numpy as np\n'), ((1924, 1934), 'numpy.sign', 'np.sign', (['w'], {}), '(w)\n', (1931, 1934), True, 'import numpy as np\n'), ((2941, 2962), 'numpy.power', 'np.power', (['(it + 1)', '(0.6)'], {}), '(it + 1, 0.6)\n', (2949, 2962), True, 'import numpy as np\n'), ((4314, 4332), 'numpy.power', 'np.power', (['x', '(1 + d)'], {}), '(x, 1 + d)\n', (4322, 4332), True, 'import numpy as np\n'), ((4776, 4810), 'numpy.random.randn', 'np.random.randn', (['num_negative', 'dim'], {}), '(num_negative, dim)\n', (4791, 4810), True, 'import numpy as np\n'), ((4853, 4891), 'numpy.ones', 'np.ones', (['(num_negative,)'], {'dtype': 'np.int'}), '((num_negative,), dtype=np.int)\n', (4860, 4891), True, 'import numpy as np\n'), ((5447, 5481), 'numpy.random.randn', 'np.random.randn', (['num_negative', 'dim'], {}), '(num_negative, dim)\n', (5462, 5481), True, 'import numpy as np\n'), ((5524, 5562), 'numpy.ones', 'np.ones', (['(num_negative,)'], {'dtype': 'np.int'}), '((num_negative,), dtype=np.int)\n', (5531, 5562), True, 'import numpy as np\n'), ((3142, 3159), 'numpy.sqrt', 'np.sqrt', (['grad_sum'], {}), '(grad_sum)\n', (3149, 3159), True, 'import numpy as np\n')] |
from numpy import arange, outer
from scipy.stats import norm as normdistr
from functools import partial
from .trialbased import (
MODELS,
linear_gaussian_per_finger,
linear_separate_gaussians_per_finger,
)
def add_gauss(seed, X, n_points, fun):
v = fun(seed[:-2], X)
t = arange(n_points)
response = normdistr.pdf(t, loc=seed[-2], scale=seed[-1])
est = outer(response, v)
return est
MODELS['linear_gaussian_per_finger_with_gauss'] = {
'type': 'time-based',
'doc': MODELS['linear_gaussian_per_finger']['doc'] + ' multiplied by a Gaussian',
'function': partial(add_gauss, fun=linear_gaussian_per_finger),
'design_matrix': MODELS['linear_gaussian_per_finger']['design_matrix'],
'parameters': {
**MODELS['linear_gaussian_per_finger']['parameters'],
'delay': {
'seed': 20,
'bounds': (-5, 80), # this depends on the number of data points
'to_plot': True,
},
'spread': {
'seed': 10,
'bounds': (0.1, 40), # this depends on the number of data points
'to_plot': False,
},
},
}
MODELS['linear_separate_gaussians_per_finger_with_gauss'] = {
'type': 'time-based',
'doc': MODELS['linear_separate_gaussians_per_finger']['doc'] + ' multiplied by a Gaussian',
'function': partial(add_gauss, fun=linear_separate_gaussians_per_finger),
'design_matrix': MODELS['linear_separate_gaussians_per_finger']['design_matrix'],
'parameters': {
**MODELS['linear_separate_gaussians_per_finger']['parameters'],
'delay': {
'seed': 20,
'bounds': (-5, 80), # this depends on the number of data points
'to_plot': True,
},
'spread': {
'seed': 10,
'bounds': (0.1, 40), # this depends on the number of data points
'to_plot': False,
},
},
}
MODELS['group_separate_gaussians_per_finger_with_gauss'] = {
'type': 'time-based',
'grouped': True,
'doc': MODELS['linear_separate_gaussians_per_finger']['doc'] + ' multiplied by a Gaussian',
'function': partial(add_gauss, fun=linear_separate_gaussians_per_finger),
'design_matrix': MODELS['linear_separate_gaussians_per_finger']['design_matrix'],
'parameters': {
**MODELS['linear_separate_gaussians_per_finger']['parameters'],
'delay': {
'seed': 20,
'bounds': (-5, 80), # this depends on the number of data points
'to_plot': True,
},
'spread': {
'seed': 10,
'bounds': (0.1, 40), # this depends on the number of data points
'to_plot': False,
},
},
}
| [
"functools.partial",
"numpy.outer",
"numpy.arange",
"scipy.stats.norm.pdf"
] | [((298, 314), 'numpy.arange', 'arange', (['n_points'], {}), '(n_points)\n', (304, 314), False, 'from numpy import arange, outer\n'), ((330, 376), 'scipy.stats.norm.pdf', 'normdistr.pdf', (['t'], {'loc': 'seed[-2]', 'scale': 'seed[-1]'}), '(t, loc=seed[-2], scale=seed[-1])\n', (343, 376), True, 'from scipy.stats import norm as normdistr\n'), ((387, 405), 'numpy.outer', 'outer', (['response', 'v'], {}), '(response, v)\n', (392, 405), False, 'from numpy import arange, outer\n'), ((603, 653), 'functools.partial', 'partial', (['add_gauss'], {'fun': 'linear_gaussian_per_finger'}), '(add_gauss, fun=linear_gaussian_per_finger)\n', (610, 653), False, 'from functools import partial\n'), ((1363, 1423), 'functools.partial', 'partial', (['add_gauss'], {'fun': 'linear_separate_gaussians_per_finger'}), '(add_gauss, fun=linear_separate_gaussians_per_finger)\n', (1370, 1423), False, 'from functools import partial\n'), ((2173, 2233), 'functools.partial', 'partial', (['add_gauss'], {'fun': 'linear_separate_gaussians_per_finger'}), '(add_gauss, fun=linear_separate_gaussians_per_finger)\n', (2180, 2233), False, 'from functools import partial\n')] |
from os.path import join, isdir
import os
import argparse
import numpy as np
import json
import cv2
from tqdm import tqdm
def crop_patch(im, img_sz):
w = float((img_sz[0] / 2) * (1 + args.padding))
h = float((img_sz[1] / 2) * (1 + args.padding))
x = float((img_sz[0] / 2) - w / 2)
y = float((img_sz[1] / 2) - h / 2)
a = (args.output_size - 1) / w
b = (args.output_size - 1) / h
c = -a * x
d = -b * y
mapping = np.array([[a, 0, c],
[0, b, d]], dtype=np.float)
return cv2.warpAffine(im, mapping, (args.output_size, args.output_size),
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0))
def main():
os.chdir(args.base_path)
vid = json.load(open('vid.json', 'r'))
num_all_frame = 1298523
num_val = 3000
# crop image
lmdb = {
'down_index': np.zeros(num_all_frame, np.int), # buff
'up_index': np.zeros(num_all_frame, np.int),
}
crop_base_path = f'crop_{args.output_size:d}_{args.padding:1.1f}'
if not isdir(crop_base_path):
os.mkdir(crop_base_path)
count = 0
with open("log.txt", "w", encoding="utf8") as logf:
for subset in vid:
total = 0
for v in subset:
total += len(v['frame'])
progress = tqdm(total=total)
for video in subset:
frames = video['frame']
n_frames = len(frames)
for f, frame in enumerate(frames):
img_path = join(video['base_path'], frame['img_path'])
out_path = join(crop_base_path, '{:08d}.jpg'.format(count))
if not os.path.exists(out_path):
# read, crop, write
cv2.imwrite(out_path, crop_patch(cv2.imread(img_path), frame['frame_sz']))
logf.write("processed ")
logf.write(out_path)
logf.write('\n')
else:
logf.write("skipped ")
logf.write(out_path)
logf.write('\n')
# how many frames to the first frame
lmdb['down_index'][count] = f
# how many frames to the last frame
lmdb['up_index'][count] = n_frames - f
count += 1
progress.update()
template_id = np.where(lmdb['up_index'] > 1)[0] # NEVER use the last frame as template! I do not like bidirectional
rand_split = np.random.choice(len(template_id), len(template_id))
lmdb['train_set'] = template_id[rand_split[:(len(template_id) - num_val)]]
lmdb['val_set'] = template_id[rand_split[(len(template_id) - num_val):]]
print(len(lmdb['train_set']))
print(len(lmdb['val_set']))
# to list for json
lmdb['train_set'] = lmdb['train_set'].tolist()
lmdb['val_set'] = lmdb['val_set'].tolist()
lmdb['down_index'] = lmdb['down_index'].tolist()
lmdb['up_index'] = lmdb['up_index'].tolist()
print('lmdb json, please wait 5 seconds~')
json.dump(lmdb, open('dataset.json', 'w'), indent=2)
print('done!')
if __name__ == '__main__':
parse = argparse.ArgumentParser(description='Generate training data (cropped) for DCFNet_pytorch')
parse.add_argument('-d', '--dir', dest='base_path',required=True, type=str, help='working directory')
parse.add_argument('-v', '--visual', dest='visual', action='store_true', help='whether visualise crop')
parse.add_argument('-o', '--output_size', dest='output_size', default=125, type=int, help='crop output size')
parse.add_argument('-p', '--padding', dest='padding', default=2, type=float, help='crop padding size')
args = parse.parse_args()
print(args)
main()
| [
"os.mkdir",
"tqdm.tqdm",
"argparse.ArgumentParser",
"os.path.isdir",
"numpy.zeros",
"os.path.exists",
"cv2.warpAffine",
"cv2.imread",
"numpy.where",
"numpy.array",
"os.path.join",
"os.chdir"
] | [((451, 499), 'numpy.array', 'np.array', (['[[a, 0, c], [0, b, d]]'], {'dtype': 'np.float'}), '([[a, 0, c], [0, b, d]], dtype=np.float)\n', (459, 499), True, 'import numpy as np\n'), ((536, 660), 'cv2.warpAffine', 'cv2.warpAffine', (['im', 'mapping', '(args.output_size, args.output_size)'], {'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': '(0, 0, 0)'}), '(im, mapping, (args.output_size, args.output_size),\n borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0))\n', (550, 660), False, 'import cv2\n'), ((727, 751), 'os.chdir', 'os.chdir', (['args.base_path'], {}), '(args.base_path)\n', (735, 751), False, 'import os\n'), ((3323, 3418), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate training data (cropped) for DCFNet_pytorch"""'}), "(description=\n 'Generate training data (cropped) for DCFNet_pytorch')\n", (3346, 3418), False, 'import argparse\n'), ((895, 926), 'numpy.zeros', 'np.zeros', (['num_all_frame', 'np.int'], {}), '(num_all_frame, np.int)\n', (903, 926), True, 'import numpy as np\n'), ((956, 987), 'numpy.zeros', 'np.zeros', (['num_all_frame', 'np.int'], {}), '(num_all_frame, np.int)\n', (964, 987), True, 'import numpy as np\n'), ((1076, 1097), 'os.path.isdir', 'isdir', (['crop_base_path'], {}), '(crop_base_path)\n', (1081, 1097), False, 'from os.path import join, isdir\n'), ((1107, 1131), 'os.mkdir', 'os.mkdir', (['crop_base_path'], {}), '(crop_base_path)\n', (1115, 1131), False, 'import os\n'), ((1346, 1363), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total'}), '(total=total)\n', (1350, 1363), False, 'from tqdm import tqdm\n'), ((2488, 2518), 'numpy.where', 'np.where', (["(lmdb['up_index'] > 1)"], {}), "(lmdb['up_index'] > 1)\n", (2496, 2518), True, 'import numpy as np\n'), ((1558, 1601), 'os.path.join', 'join', (["video['base_path']", "frame['img_path']"], {}), "(video['base_path'], frame['img_path'])\n", (1562, 1601), False, 'from os.path import join, isdir\n'), ((1710, 1734), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (1724, 1734), False, 'import os\n'), ((1837, 1857), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1847, 1857), False, 'import cv2\n')] |
# Copyright 2020 <NAME> (<EMAIL>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from evaluate import calculate_total_pckh
from save_result_as_image import save_result_image
from configparser import ConfigParser
import getopt
import sys
from common import get_time_and_step_interval
import numpy as np
import os
import datetime
import tensorflow as tf
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
tf.random.set_seed(3)
print("tensorflow version :", tf.__version__) # 2.1.0
print("keras version :", tf.keras.__version__) # 2.2.4-tf
strategy = tf.distribute.TPUStrategy(resolver)
"""
python train.py --dataset_config=config/dataset/coco2017-gpu.cfg --experiment_config=config/training/experiment01.cfg
python train.py --dataset_config=config/dataset/ai_challenger-gpu.cfg --experiment_config=config/training/experiment01.cfg
"""
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(
argv, "d:e:", ["dataset_config=", "experiment_config="])
except getopt.GetoptError:
print('train_hourglass.py --dataset_config <inputfile> --experiment_config <outputfile>')
sys.exit(2)
dataset_config_file_path = "config/dataset/ai_challenger-colab.cfg"
experiment_config_file_path = "config/training/experiment04-cpm-sg4-colab.cfg"
for opt, arg in opts:
if opt == '-h':
print('train_middlelayer.py --dataset_config <inputfile> --experiment_config <outputfile>')
sys.exit()
elif opt in ("-d", "--dataset_config"):
dataset_config_file_path = arg
elif opt in ("-e", "--experiment_config"):
experiment_config_file_path = arg
parser = ConfigParser()
# get dataset config
print(dataset_config_file_path)
parser.read(dataset_config_file_path)
config_dataset = {}
for key in parser["dataset"]:
config_dataset[key] = eval(parser["dataset"][key])
# get training config
print(experiment_config_file_path)
parser.read(experiment_config_file_path)
config_preproc = {}
for key in parser["preprocessing"]:
config_preproc[key] = eval(parser["preprocessing"][key])
config_model = {}
for key in parser["model"]:
config_model[key] = eval(parser["model"][key])
config_extra = {}
for key in parser["extra"]:
config_extra[key] = eval(parser["extra"][key])
config_training = {}
for key in parser["training"]:
config_training[key] = eval(parser["training"][key])
config_output = {}
for key in parser["output"]:
config_output[key] = eval(parser["output"][key])
# "/Volumes/tucan-SSD/datasets"
dataset_root_path = config_dataset["dataset_root_path"]
# "coco_dataset"
dataset_directory_name = config_dataset["dataset_directory_name"]
dataset_path = os.path.join(dataset_root_path, dataset_directory_name)
# "/home/outputs" # "/Volumes/tucan-SSD/ml-project/outputs"
output_root_path = config_output["output_root_path"]
output_experiment_name = config_output["experiment_name"] # "experiment01"
sub_experiment_name = config_output["sub_experiment_name"] # "basic"
current_time = datetime.datetime.now().strftime("%m%d%H%M")
model_name = config_model["model_name"] # "simplepose"
model_subname = config_model["model_subname"]
output_name = f"{current_time}_{model_name}_{sub_experiment_name}"
output_path = os.path.join(
output_root_path, output_experiment_name, dataset_directory_name)
output_log_path = os.path.join(output_path, "logs", output_name)
# =================================================
# ============== prepare training =================
# =================================================
train_summary_writer = tf.summary.create_file_writer(output_log_path)
@tf.function
def train_step(model, images, labels):
with tf.GradientTape() as tape:
model_output = model(images)
predictions_layers = model_output
losses = [loss_object(labels, predictions)
for predictions in predictions_layers]
total_loss = tf.math.add_n(losses)
max_val = tf.math.reduce_max(predictions_layers[-1])
gradients = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(total_loss)
return total_loss, losses[-1], max_val
def val_step(step, images, heamaps):
predictions = model(images, training=False)
predictions = np.array(predictions)
save_image_results(step, images, heamaps, predictions)
@tf.function
def valid_step(model, images, labels):
predictions = model(images, training=False)
v_loss = loss_object(labels, predictions)
valid_loss(v_loss)
# valid_accuracy(labels, predictions)
return v_loss
def save_image_results(step, images, true_heatmaps, predicted_heatmaps):
val_image_results_directory = "val_image_results"
if not os.path.exists(output_path):
os.mkdir(output_path)
if not os.path.exists(os.path.join(output_path, output_name)):
os.mkdir(os.path.join(output_path, output_name))
if not os.path.exists(os.path.join(output_path, output_name, val_image_results_directory)):
os.mkdir(os.path.join(output_path, output_name,
val_image_results_directory))
for i in range(images.shape[0]):
image = images[i, :, :, :]
heamap = true_heatmaps[i, :, :, :]
prediction = predicted_heatmaps[-1][i, :, :, :]
# result_image = display(i, image, heamap, prediction)
result_image_path = os.path.join(
output_path, output_name, val_image_results_directory, f"result{i}-{step:0>6d}.jpg")
save_result_image(result_image_path, image, heamap,
prediction, title=f"step:{int(step/1000)}k")
# print("val_step: save result image on \"" + result_image_path + "\"")
def save_model(model, step=None, label=None):
saved_model_directory = "saved_model"
if step is not None:
saved_model_directory = saved_model_directory + f"-{step:0>6d}"
if label is not None:
saved_model_directory = saved_model_directory + "-" + label
if not os.path.exists(output_path):
os.mkdir(output_path)
if not os.path.exists(os.path.join(output_path, output_name)):
os.mkdir(os.path.join(output_path, output_name))
if not os.path.exists(os.path.join(output_path, output_name, saved_model_directory)):
os.mkdir(os.path.join(output_path, output_name, saved_model_directory))
saved_model_path = os.path.join(
output_path, output_name, saved_model_directory)
print("-"*20 + " MODEL SAVE!! " + "-"*20)
print("saved model path: " + saved_model_path)
model.save(saved_model_path)
print("-"*18 + " MODEL SAVE DONE!! " + "-"*18)
return saved_model_path
if __name__ == '__main__':
# ================================================
# ============= load hyperparams =================
# ================================================
# config_dataset = ...
# config_model = ...
# config_output = ...
# ================================================
# =============== load dataset ===================
# ================================================
from data_loader.data_loader import DataLoader
# dataloader instance gen
train_images = config_dataset["train_images"]
train_annotation = config_dataset["train_annotation"]
train_images_dir_path = os.path.join(dataset_path, train_images)
train_annotation_json_filepath = os.path.join(
dataset_path, train_annotation)
print(">> LOAD TRAIN DATASET FORM:", train_annotation_json_filepath)
dataloader_train = DataLoader(
images_dir_path=train_images_dir_path,
annotation_json_path=train_annotation_json_filepath,
config_training=config_training,
config_model=config_model,
config_preproc=config_preproc)
valid_images = config_dataset["valid_images"]
valid_annotation = config_dataset["valid_annotation"]
valid_images_dir_path = os.path.join(dataset_path, valid_images)
valid_annotation_json_filepath = os.path.join(
dataset_path, valid_annotation)
print(">> LOAD VALID DATASET FORM:", valid_annotation_json_filepath)
dataloader_valid = DataLoader(
images_dir_path=valid_images_dir_path,
annotation_json_path=valid_annotation_json_filepath,
config_training=config_training,
config_model=config_model,
config_preproc=config_preproc)
number_of_keypoints = dataloader_train.number_of_keypoints # 17
# train dataset
dataset_train = dataloader_train.input_fn()
dataset_valid = dataloader_valid.input_fn()
# validation images
val_images, val_heatmaps = dataloader_valid.get_images(
0, batch_size=25) # from 22 index 6 images and 6 labels
# ================================================
# =============== build model ====================
# ================================================
from model_provider import get_model
model = get_model(model_name=model_name,
model_subname=model_subname,
number_of_keypoints=number_of_keypoints,
config_extra=config_extra)
loss_object = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.Adam(
config_training["learning_rate"], epsilon=config_training["epsilon"])
train_loss = tf.keras.metrics.Mean(name="train_loss")
valid_loss = tf.keras.metrics.Mean(name="valid_loss")
valid_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name="valid_accuracy")
# ================================================
# ============== train the model =================
# ================================================
num_epochs = config_training["number_of_epoch"] # 550
number_of_echo_period = config_training["period_echo"] # 100
number_of_validimage_period = None # 1000
number_of_modelsave_period = config_training["period_save_model"] # 5000
tensorbaord_period = config_training["period_tensorboard"] # 100
# validation_period = 2 # 1000
valid_check = False
valid_pckh = config_training["valid_pckh"] # True
pckh_distance_ratio = config_training["pckh_distance_ratio"] # 0.5
step = 1
# TRAIN!!
get_time_and_step_interval(step, is_init=True)
for epoch in range(num_epochs):
print("-" * 10 + " " + str(epoch + 1) + " EPOCH " + "-" * 10)
for images, heatmaps in dataset_train:
# print(images.shape) # (32, 128, 128, 3)
# print(heatmaps.shape) # (32, 32, 32, 17)
total_loss, last_layer_loss, max_val = strategy.run(
train_step, args=(model, images, heatmaps))
step += 1
if number_of_echo_period is not None and step % number_of_echo_period == 0:
total_interval, per_step_interval = get_time_and_step_interval(
step)
echo_textes = []
if step is not None:
echo_textes.append(f"step: {step}")
if total_interval is not None:
echo_textes.append(f"total: {total_interval}")
if per_step_interval is not None:
echo_textes.append(f"per_step: {per_step_interval}")
if total_loss is not None:
echo_textes.append(f"total loss: {total_loss:.6f}")
if last_layer_loss is not None:
echo_textes.append(f"last loss: {last_layer_loss:.6f}")
print(">> " + ", ".join(echo_textes))
# validation phase
if number_of_validimage_period is not None and step % number_of_validimage_period == 0:
val_step(step, val_images, val_heatmaps)
if number_of_modelsave_period is not None and step % number_of_modelsave_period == 0:
saved_model_path = save_model(model, step=step)
if valid_pckh:
# print("calcuate pckh")
pckh_score = calculate_total_pckh(saved_model_path=saved_model_path,
annotation_path=valid_annotation_json_filepath,
images_path=valid_images_dir_path,
distance_ratio=pckh_distance_ratio)
with train_summary_writer.as_default():
tf.summary.scalar(
f'pckh@{pckh_distance_ratio:.1f}_score', pckh_score * 100, step=step)
if tensorbaord_period is not None and step % tensorbaord_period == 0:
with train_summary_writer.as_default():
tf.summary.scalar(
"total_loss", total_loss.numpy(), step=step)
tf.summary.scalar(
"max_value - last_layer_loss", max_val.numpy(), step=step)
if last_layer_loss is not None:
tf.summary.scalar("last_layer_loss",
last_layer_loss.numpy(), step=step)
# if not valid_check:
# continue
# for v_images, v_heatmaps in dataloader_valid:
# v_loss = valid_step(model, sv_images, v_heatmaps)
# last model save
saved_model_path = save_model(model, step=step, label="final")
# last pckh
pckh_score = calculate_total_pckh(saved_model_path=saved_model_path,
annotation_path=valid_annotation_json_filepath,
images_path=valid_images_dir_path,
distance_ratio=pckh_distance_ratio)
with train_summary_writer.as_default():
tf.summary.scalar(
f'pckh@{pckh_distance_ratio:.1f}_score', pckh_score * 100, step=step)
| [
"tensorflow.random.set_seed",
"os.mkdir",
"getopt.getopt",
"tensorflow.keras.metrics.Mean",
"evaluate.calculate_total_pckh",
"os.path.join",
"tensorflow.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.config.list_logical_devices",
"os.path.e... | [((979, 1079), 'tensorflow.distribute.cluster_resolver.TPUClusterResolver', 'tf.distribute.cluster_resolver.TPUClusterResolver', ([], {'tpu': "('grpc://' + os.environ['COLAB_TPU_ADDR'])"}), "(tpu='grpc://' + os.\n environ['COLAB_TPU_ADDR'])\n", (1028, 1079), True, 'import tensorflow as tf\n'), ((1075, 1126), 'tensorflow.config.experimental_connect_to_cluster', 'tf.config.experimental_connect_to_cluster', (['resolver'], {}), '(resolver)\n', (1116, 1126), True, 'import tensorflow as tf\n'), ((1198, 1249), 'tensorflow.tpu.experimental.initialize_tpu_system', 'tf.tpu.experimental.initialize_tpu_system', (['resolver'], {}), '(resolver)\n', (1239, 1249), True, 'import tensorflow as tf\n'), ((1313, 1334), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(3)'], {}), '(3)\n', (1331, 1334), True, 'import tensorflow as tf\n'), ((1471, 1506), 'tensorflow.distribute.TPUStrategy', 'tf.distribute.TPUStrategy', (['resolver'], {}), '(resolver)\n', (1496, 1506), True, 'import tensorflow as tf\n'), ((2509, 2523), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (2521, 2523), False, 'from configparser import ConfigParser\n'), ((3529, 3584), 'os.path.join', 'os.path.join', (['dataset_root_path', 'dataset_directory_name'], {}), '(dataset_root_path, dataset_directory_name)\n', (3541, 3584), False, 'import os\n'), ((4089, 4167), 'os.path.join', 'os.path.join', (['output_root_path', 'output_experiment_name', 'dataset_directory_name'], {}), '(output_root_path, output_experiment_name, dataset_directory_name)\n', (4101, 4167), False, 'import os\n'), ((4191, 4237), 'os.path.join', 'os.path.join', (['output_path', '"""logs"""', 'output_name'], {}), "(output_path, 'logs', output_name)\n", (4203, 4237), False, 'import os\n'), ((4419, 4465), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['output_log_path'], {}), '(output_log_path)\n', (4448, 4465), True, 'import tensorflow as tf\n'), ((1273, 1310), 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['"""TPU"""'], {}), "('TPU')\n", (1303, 1310), True, 'import tensorflow as tf\n'), ((1801, 1871), 'getopt.getopt', 'getopt.getopt', (['argv', '"""d:e:"""', "['dataset_config=', 'experiment_config=']"], {}), "(argv, 'd:e:', ['dataset_config=', 'experiment_config='])\n", (1814, 1871), False, 'import getopt\n'), ((4801, 4843), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['predictions_layers[-1]'], {}), '(predictions_layers[-1])\n', (4819, 4843), True, 'import tensorflow as tf\n'), ((5162, 5183), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (5170, 5183), True, 'import numpy as np\n'), ((7266, 7327), 'os.path.join', 'os.path.join', (['output_path', 'output_name', 'saved_model_directory'], {}), '(output_path, output_name, saved_model_directory)\n', (7278, 7327), False, 'import os\n'), ((8204, 8244), 'os.path.join', 'os.path.join', (['dataset_path', 'train_images'], {}), '(dataset_path, train_images)\n', (8216, 8244), False, 'import os\n'), ((8282, 8326), 'os.path.join', 'os.path.join', (['dataset_path', 'train_annotation'], {}), '(dataset_path, train_annotation)\n', (8294, 8326), False, 'import os\n'), ((8432, 8634), 'data_loader.data_loader.DataLoader', 'DataLoader', ([], {'images_dir_path': 'train_images_dir_path', 'annotation_json_path': 'train_annotation_json_filepath', 'config_training': 'config_training', 'config_model': 'config_model', 'config_preproc': 'config_preproc'}), '(images_dir_path=train_images_dir_path, annotation_json_path=\n train_annotation_json_filepath, config_training=config_training,\n config_model=config_model, config_preproc=config_preproc)\n', (8442, 8634), False, 'from data_loader.data_loader import DataLoader\n'), ((8804, 8844), 'os.path.join', 'os.path.join', (['dataset_path', 'valid_images'], {}), '(dataset_path, valid_images)\n', (8816, 8844), False, 'import os\n'), ((8882, 8926), 'os.path.join', 'os.path.join', (['dataset_path', 'valid_annotation'], {}), '(dataset_path, valid_annotation)\n', (8894, 8926), False, 'import os\n'), ((9032, 9234), 'data_loader.data_loader.DataLoader', 'DataLoader', ([], {'images_dir_path': 'valid_images_dir_path', 'annotation_json_path': 'valid_annotation_json_filepath', 'config_training': 'config_training', 'config_model': 'config_model', 'config_preproc': 'config_preproc'}), '(images_dir_path=valid_images_dir_path, annotation_json_path=\n valid_annotation_json_filepath, config_training=config_training,\n config_model=config_model, config_preproc=config_preproc)\n', (9042, 9234), False, 'from data_loader.data_loader import DataLoader\n'), ((9823, 9956), 'model_provider.get_model', 'get_model', ([], {'model_name': 'model_name', 'model_subname': 'model_subname', 'number_of_keypoints': 'number_of_keypoints', 'config_extra': 'config_extra'}), '(model_name=model_name, model_subname=model_subname,\n number_of_keypoints=number_of_keypoints, config_extra=config_extra)\n', (9832, 9956), False, 'from model_provider import get_model\n'), ((10038, 10072), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (10070, 10072), True, 'import tensorflow as tf\n'), ((10089, 10188), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (["config_training['learning_rate']"], {'epsilon': "config_training['epsilon']"}), "(config_training['learning_rate'], epsilon=\n config_training['epsilon'])\n", (10113, 10188), True, 'import tensorflow as tf\n'), ((10210, 10250), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""train_loss"""'}), "(name='train_loss')\n", (10231, 10250), True, 'import tensorflow as tf\n'), ((10268, 10308), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""valid_loss"""'}), "(name='valid_loss')\n", (10289, 10308), True, 'import tensorflow as tf\n'), ((10330, 10395), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""valid_accuracy"""'}), "(name='valid_accuracy')\n", (10372, 10395), True, 'import tensorflow as tf\n'), ((11111, 11157), 'common.get_time_and_step_interval', 'get_time_and_step_interval', (['step'], {'is_init': '(True)'}), '(step, is_init=True)\n', (11137, 11157), False, 'from common import get_time_and_step_interval\n'), ((14282, 14465), 'evaluate.calculate_total_pckh', 'calculate_total_pckh', ([], {'saved_model_path': 'saved_model_path', 'annotation_path': 'valid_annotation_json_filepath', 'images_path': 'valid_images_dir_path', 'distance_ratio': 'pckh_distance_ratio'}), '(saved_model_path=saved_model_path, annotation_path=\n valid_annotation_json_filepath, images_path=valid_images_dir_path,\n distance_ratio=pckh_distance_ratio)\n', (14302, 14465), False, 'from evaluate import calculate_total_pckh\n'), ((2006, 2017), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2014, 2017), False, 'import sys\n'), ((2316, 2326), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2324, 2326), False, 'import sys\n'), ((3861, 3884), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3882, 3884), False, 'import datetime\n'), ((4529, 4546), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4544, 4546), True, 'import tensorflow as tf\n'), ((4764, 4785), 'tensorflow.math.add_n', 'tf.math.add_n', (['losses'], {}), '(losses)\n', (4777, 4785), True, 'import tensorflow as tf\n'), ((5615, 5642), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (5629, 5642), False, 'import os\n'), ((5652, 5673), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (5660, 5673), False, 'import os\n'), ((6274, 6375), 'os.path.join', 'os.path.join', (['output_path', 'output_name', 'val_image_results_directory', 'f"""result{i}-{step:0>6d}.jpg"""'], {}), "(output_path, output_name, val_image_results_directory,\n f'result{i}-{step:0>6d}.jpg')\n", (6286, 6375), False, 'import os\n'), ((6889, 6916), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (6903, 6916), False, 'import os\n'), ((6926, 6947), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (6934, 6947), False, 'import os\n'), ((14623, 14714), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['f"""pckh@{pckh_distance_ratio:.1f}_score"""', '(pckh_score * 100)'], {'step': 'step'}), "(f'pckh@{pckh_distance_ratio:.1f}_score', pckh_score * 100,\n step=step)\n", (14640, 14714), True, 'import tensorflow as tf\n'), ((5700, 5738), 'os.path.join', 'os.path.join', (['output_path', 'output_name'], {}), '(output_path, output_name)\n', (5712, 5738), False, 'import os\n'), ((5758, 5796), 'os.path.join', 'os.path.join', (['output_path', 'output_name'], {}), '(output_path, output_name)\n', (5770, 5796), False, 'import os\n'), ((5824, 5891), 'os.path.join', 'os.path.join', (['output_path', 'output_name', 'val_image_results_directory'], {}), '(output_path, output_name, val_image_results_directory)\n', (5836, 5891), False, 'import os\n'), ((5911, 5978), 'os.path.join', 'os.path.join', (['output_path', 'output_name', 'val_image_results_directory'], {}), '(output_path, output_name, val_image_results_directory)\n', (5923, 5978), False, 'import os\n'), ((6974, 7012), 'os.path.join', 'os.path.join', (['output_path', 'output_name'], {}), '(output_path, output_name)\n', (6986, 7012), False, 'import os\n'), ((7032, 7070), 'os.path.join', 'os.path.join', (['output_path', 'output_name'], {}), '(output_path, output_name)\n', (7044, 7070), False, 'import os\n'), ((7098, 7159), 'os.path.join', 'os.path.join', (['output_path', 'output_name', 'saved_model_directory'], {}), '(output_path, output_name, saved_model_directory)\n', (7110, 7159), False, 'import os\n'), ((7179, 7240), 'os.path.join', 'os.path.join', (['output_path', 'output_name', 'saved_model_directory'], {}), '(output_path, output_name, saved_model_directory)\n', (7191, 7240), False, 'import os\n'), ((11713, 11745), 'common.get_time_and_step_interval', 'get_time_and_step_interval', (['step'], {}), '(step)\n', (11739, 11745), False, 'from common import get_time_and_step_interval\n'), ((12885, 13068), 'evaluate.calculate_total_pckh', 'calculate_total_pckh', ([], {'saved_model_path': 'saved_model_path', 'annotation_path': 'valid_annotation_json_filepath', 'images_path': 'valid_images_dir_path', 'distance_ratio': 'pckh_distance_ratio'}), '(saved_model_path=saved_model_path, annotation_path=\n valid_annotation_json_filepath, images_path=valid_images_dir_path,\n distance_ratio=pckh_distance_ratio)\n', (12905, 13068), False, 'from evaluate import calculate_total_pckh\n'), ((13306, 13397), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['f"""pckh@{pckh_distance_ratio:.1f}_score"""', '(pckh_score * 100)'], {'step': 'step'}), "(f'pckh@{pckh_distance_ratio:.1f}_score', pckh_score * 100,\n step=step)\n", (13323, 13397), True, 'import tensorflow as tf\n')] |
import os
import pickle
import random
import cv2
import h5py
import numpy as np
import numpy.random as npr
from train.h5py_utils import load_dict_from_hdf5
class DataGenerator:
def __init__(self, label_dataset_path, bboxes_dataset_path, landmarks_dataset_path, batch_size, im_size,
shuffle=False):
self.im_size = im_size
self.label_file = h5py.File(label_dataset_path, 'r')
self.bbox_file = h5py.File(bboxes_dataset_path, 'r')
self.landmark_file = h5py.File(landmarks_dataset_path, 'r')
self.batch_size = batch_size
self.label_len = len(self.label_file['labels'])
self.bbox_len = len(self.bbox_file['labels'])
self.landmark_len = len(self.landmark_file['labels'])
self.label_cursor = 0
self.bbox_cursor = 0
self.landmark_cursor = 0
self.steps_per_epoch = int((self.label_len + self.bbox_len + self.landmark_len) / self.batch_size)
self.index = 0
self.epoch_done = False
self._gen_sub_batch()
self.shuffle = shuffle
def _reset(self):
self.index = 0
self.epoch_done = False
self.label_cursor = 0
self.bbox_cursor = 0
self.landmark_cursor = 0
def _gen_sub_batch(self):
n = self.steps_per_epoch
self.label_batch_list = npr.multinomial(self.label_len, np.ones(n) / n, size=1)[0]
self.bbox_batch_list = npr.multinomial(self.bbox_len, np.ones(n) / n, size=1)[0]
self.landmark_batch_list = npr.multinomial(self.landmark_len, np.ones(n) / n, size=1)[0]
def _load_label_dataset(self):
if self.shuffle:
selected = random.sample(range(0, self.label_len), self.label_batch_list[self.index])
im_batch = []
labels_batch = []
for i in selected:
im_batch.append(self.label_file['ims'][i])
labels_batch.append(self.label_file['labels'][i])
im_batch = np.array(im_batch, dtype=np.float32)
else:
end = self.label_cursor + self.label_batch_list[self.index]
im_batch = self.label_file['ims'][self.label_cursor:end]
labels_batch = self.label_file['labels'][self.label_cursor:end]
self.label_cursor = end
self.epoch_done = True if self.label_cursor >= self.label_len else self.epoch_done
batch_size = im_batch.shape[0]
bboxes_batch = np.zeros(shape=(batch_size, 4), dtype=np.float32)
landmarks_batch = np.zeros(shape=(batch_size, 10), dtype=np.float32)
return im_batch, labels_batch, bboxes_batch, landmarks_batch
def _load_bbox_dataset(self):
if self.shuffle:
selected = random.sample(range(0, self.bbox_len), self.bbox_batch_list[self.index])
im_batch = []
box_batch = []
label_batch = []
for i in selected:
im_batch.append(self.bbox_file['ims'][i])
box_batch.append(self.bbox_file['bboxes'][i])
label_batch.append(self.bbox_file['labels'][i])
im_batch = np.array(im_batch, dtype=np.float32)
box_batch = np.array(box_batch, dtype=np.float32)
else:
end = self.bbox_cursor + self.bbox_batch_list[self.index]
im_batch = self.bbox_file['ims'][self.bbox_cursor:end]
box_batch = self.bbox_file['bboxes'][self.bbox_cursor:end]
label_batch = self.bbox_file['labels'][self.bbox_cursor:end]
self.bbox_cursor = end
self.epoch_done = True if self.bbox_cursor >= self.bbox_len else self.epoch_done
batch_size = im_batch.shape[0]
landmarks_batch = np.zeros(shape=(batch_size, 10), dtype=np.float32)
return im_batch, label_batch, box_batch, landmarks_batch
def _load_landmark_dataset(self):
if self.shuffle:
selected = random.sample(range(0, self.landmark_len), self.landmark_batch_list[self.index])
im_batch = []
landmark_batch = []
label_batch = []
for i in selected:
im_batch.append(self.landmark_file['ims'][i])
landmark_batch.append(self.landmark_file['landmarks'][i])
label_batch.append(self.landmark_file['labels'][i])
im_batch = np.array(im_batch, dtype=np.float32)
landmark_batch = np.array(landmark_batch, dtype=np.float32)
else:
end = self.landmark_cursor + self.landmark_batch_list[self.index]
im_batch = self.landmark_file['ims'][self.landmark_cursor:end]
landmark_batch = self.landmark_file['landmarks'][self.landmark_cursor:end]
label_batch = self.landmark_file['labels'][self.landmark_cursor:end]
self.landmark_cursor = end
self.epoch_done = True if self.landmark_cursor >= self.landmark_len else self.epoch_done
batch_size = im_batch.shape[0]
bboxes_batch = np.array([[0, 0, 0, 0]] * batch_size, np.float32)
return im_batch, label_batch, bboxes_batch, landmark_batch,
def im_show(self, n=3):
assert n < 15
l_ns = random.sample(range(0, len(self.label_file['ims'][:])), n)
b_ns = random.sample(range(0, len(self.bbox_file['ims'][:])), n)
m_ns = random.sample(range(0, len(self.landmark_file['ims'][:])), n)
for i in l_ns:
im = self.label_file['ims'][i]
label = self.label_file['labels'][i]
cv2.imshow('label_{}_{}'.format(i, label), im)
for i in b_ns:
im = self.bbox_file['ims'][i]
bboxes = self.bbox_file['bboxes'][i]
cv2.imshow('bbox_{}_{}'.format(i, bboxes), im)
for i in m_ns:
im = self.landmark_file['ims'][i]
landmarks = self.landmark_file['landmarks'][i]
cv2.imshow('landmark_{}_'.format(i, landmarks), im)
cv2.waitKey(0)
cv2.destroyAllWindows()
def generate(self):
while True:
if self.index >= self.steps_per_epoch or self.epoch_done:
self._reset()
self._gen_sub_batch()
im_batch1, labels_batch1, bboxes_batch1, landmarks_batch1 = self._load_label_dataset()
im_batch2, labels_batch2, bboxes_batch2, landmarks_batch2 = self._load_bbox_dataset()
im_batch3, labels_batch3, bboxes_batch3, landmarks_batch3 = self._load_landmark_dataset()
self.index += 1
x_batch = np.concatenate((im_batch1, im_batch2, im_batch3), axis=0)
x_batch = _process_im(x_batch)
if x_batch.shape[0] < 1:
self.epoch_done = True
continue
label_batch = np.concatenate((labels_batch1, labels_batch2, labels_batch3), axis=0)
label_batch = np.array(_process_label(label_batch))
bbox_batch = np.concatenate((bboxes_batch1, bboxes_batch2, bboxes_batch3), axis=0)
landmark_batch = np.concatenate((landmarks_batch1, landmarks_batch2, landmarks_batch3), axis=0)
y_batch = np.concatenate((label_batch, bbox_batch, landmark_batch), axis=1)
yield x_batch, y_batch
def __exit__(self, exc_type, exc_val, exc_tb):
self.label_file.close()
self.bbox_file.close()
self.landmark_file.close()
def _load_dataset(dataset_path):
ext = dataset_path.split(os.extsep)[-1]
if ext == 'pkl':
with open(dataset_path, 'rb') as f:
dataset = pickle.load(f)
elif ext == 'h5':
dataset = load_dict_from_hdf5(dataset_path)
else:
raise ValueError('Unsupported file type, only *.pkl and *.h5 are supported now.')
return dataset
def _process_im(im):
return (im.astype(np.float32) - 127.5) / 128
LABEL_DICT = {'0': [0, 0], '1': [1, 0], '-1': [-1, 0], '-2': [-2, 0]}
def _process_label(labels):
label = []
for ll in labels:
label.append(LABEL_DICT.get(str(ll)))
return label
def load_dataset(label_dataset_path, bbox_dataset_path, landmark_dataset_path, im_size=12):
images_x = np.empty((0, im_size, im_size, 3))
labels_y = np.empty((0, 2))
bboxes_y = np.empty((0, 4))
landmarks_y = np.empty((0, 10))
label_x, label_y = load_label_dataset(label_dataset_path)
len_labels = len(label_y)
images_x = np.concatenate((images_x, label_x), axis=0)
labels_y = np.concatenate((labels_y, label_y), axis=0)
bboxes_y = np.concatenate((bboxes_y, np.zeros((len_labels, 4), np.float32)), axis=0)
landmarks_y = np.concatenate((landmarks_y, np.zeros((len_labels, 10), np.float32)), axis=0)
bbox_x, bbox_y, b_label_y = load_bbox_dataset(bbox_dataset_path)
len_labels = len(b_label_y)
images_x = np.concatenate((images_x, bbox_x), axis=0)
labels_y = np.concatenate((labels_y, b_label_y), axis=0)
bboxes_y = np.concatenate((bboxes_y, bbox_y), axis=0)
landmarks_y = np.concatenate((landmarks_y, np.zeros((len_labels, 10), np.float32)), axis=0)
landmark_x, landmark_y, l_label_y = load_landmark_dataset(landmark_dataset_path)
len_labels = len(l_label_y)
images_x = np.concatenate((images_x, landmark_x), axis=0)
labels_y = np.concatenate((labels_y, l_label_y), axis=0)
bboxes_y = np.concatenate((bboxes_y, np.array([[0, 0, 0, 0]] * len_labels, np.float32)), axis=0)
landmarks_y = np.concatenate((landmarks_y, landmark_y), axis=0)
assert len(images_x) == len(labels_y) == len(bboxes_y) == len(landmarks_y)
print('Shape of all: \n')
print(images_x.shape)
print(labels_y.shape)
print(bboxes_y.shape)
print(landmarks_y.shape)
return images_x, labels_y, bboxes_y, landmarks_y
def load_label_dataset(label_dataset_path):
label_dataset = _load_dataset(label_dataset_path)
label_x = label_dataset['ims']
label_y = label_dataset['labels']
label_x = _process_im(np.array(label_x))
label_y = _process_label(label_y)
label_y = np.array(label_y).astype(np.int8)
return label_x, label_y
def load_bbox_dataset(bbox_dataset_path):
bbox_dataset = _load_dataset(bbox_dataset_path)
bbox_x = bbox_dataset['ims']
bbox_y = bbox_dataset['bboxes']
label_y = bbox_dataset['labels']
bbox_x = _process_im(np.array(bbox_x))
bbox_y = np.array(bbox_y).astype(np.float32)
label_y = _process_label(label_y)
label_y = np.array(label_y).astype(np.int8)
return bbox_x, bbox_y, label_y
def load_landmark_dataset(landmark_dataset_path):
landmark_dataset = _load_dataset(landmark_dataset_path)
landmark_x = landmark_dataset['ims']
landmark_y = landmark_dataset['landmarks']
label_y = landmark_dataset['labels']
landmark_x = _process_im(np.array(landmark_x))
landmark_y = np.array(landmark_y).astype(np.float32)
label_y = _process_label(label_y)
label_y = np.array(label_y).astype(np.int8)
return landmark_x, landmark_y, label_y
| [
"h5py.File",
"cv2.waitKey",
"numpy.empty",
"numpy.zeros",
"numpy.ones",
"train.h5py_utils.load_dict_from_hdf5",
"pickle.load",
"numpy.array",
"cv2.destroyAllWindows",
"numpy.concatenate"
] | [((8105, 8139), 'numpy.empty', 'np.empty', (['(0, im_size, im_size, 3)'], {}), '((0, im_size, im_size, 3))\n', (8113, 8139), True, 'import numpy as np\n'), ((8155, 8171), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (8163, 8171), True, 'import numpy as np\n'), ((8187, 8203), 'numpy.empty', 'np.empty', (['(0, 4)'], {}), '((0, 4))\n', (8195, 8203), True, 'import numpy as np\n'), ((8222, 8239), 'numpy.empty', 'np.empty', (['(0, 10)'], {}), '((0, 10))\n', (8230, 8239), True, 'import numpy as np\n'), ((8348, 8391), 'numpy.concatenate', 'np.concatenate', (['(images_x, label_x)'], {'axis': '(0)'}), '((images_x, label_x), axis=0)\n', (8362, 8391), True, 'import numpy as np\n'), ((8407, 8450), 'numpy.concatenate', 'np.concatenate', (['(labels_y, label_y)'], {'axis': '(0)'}), '((labels_y, label_y), axis=0)\n', (8421, 8450), True, 'import numpy as np\n'), ((8753, 8795), 'numpy.concatenate', 'np.concatenate', (['(images_x, bbox_x)'], {'axis': '(0)'}), '((images_x, bbox_x), axis=0)\n', (8767, 8795), True, 'import numpy as np\n'), ((8811, 8856), 'numpy.concatenate', 'np.concatenate', (['(labels_y, b_label_y)'], {'axis': '(0)'}), '((labels_y, b_label_y), axis=0)\n', (8825, 8856), True, 'import numpy as np\n'), ((8872, 8914), 'numpy.concatenate', 'np.concatenate', (['(bboxes_y, bbox_y)'], {'axis': '(0)'}), '((bboxes_y, bbox_y), axis=0)\n', (8886, 8914), True, 'import numpy as np\n'), ((9144, 9190), 'numpy.concatenate', 'np.concatenate', (['(images_x, landmark_x)'], {'axis': '(0)'}), '((images_x, landmark_x), axis=0)\n', (9158, 9190), True, 'import numpy as np\n'), ((9206, 9251), 'numpy.concatenate', 'np.concatenate', (['(labels_y, l_label_y)'], {'axis': '(0)'}), '((labels_y, l_label_y), axis=0)\n', (9220, 9251), True, 'import numpy as np\n'), ((9371, 9420), 'numpy.concatenate', 'np.concatenate', (['(landmarks_y, landmark_y)'], {'axis': '(0)'}), '((landmarks_y, landmark_y), axis=0)\n', (9385, 9420), True, 'import numpy as np\n'), ((382, 416), 'h5py.File', 'h5py.File', (['label_dataset_path', '"""r"""'], {}), "(label_dataset_path, 'r')\n", (391, 416), False, 'import h5py\n'), ((442, 477), 'h5py.File', 'h5py.File', (['bboxes_dataset_path', '"""r"""'], {}), "(bboxes_dataset_path, 'r')\n", (451, 477), False, 'import h5py\n'), ((507, 545), 'h5py.File', 'h5py.File', (['landmarks_dataset_path', '"""r"""'], {}), "(landmarks_dataset_path, 'r')\n", (516, 545), False, 'import h5py\n'), ((2444, 2493), 'numpy.zeros', 'np.zeros', ([], {'shape': '(batch_size, 4)', 'dtype': 'np.float32'}), '(shape=(batch_size, 4), dtype=np.float32)\n', (2452, 2493), True, 'import numpy as np\n'), ((2520, 2570), 'numpy.zeros', 'np.zeros', ([], {'shape': '(batch_size, 10)', 'dtype': 'np.float32'}), '(shape=(batch_size, 10), dtype=np.float32)\n', (2528, 2570), True, 'import numpy as np\n'), ((3707, 3757), 'numpy.zeros', 'np.zeros', ([], {'shape': '(batch_size, 10)', 'dtype': 'np.float32'}), '(shape=(batch_size, 10), dtype=np.float32)\n', (3715, 3757), True, 'import numpy as np\n'), ((4985, 5034), 'numpy.array', 'np.array', (['([[0, 0, 0, 0]] * batch_size)', 'np.float32'], {}), '([[0, 0, 0, 0]] * batch_size, np.float32)\n', (4993, 5034), True, 'import numpy as np\n'), ((5926, 5940), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5937, 5940), False, 'import cv2\n'), ((5949, 5972), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5970, 5972), False, 'import cv2\n'), ((9893, 9910), 'numpy.array', 'np.array', (['label_x'], {}), '(label_x)\n', (9901, 9910), True, 'import numpy as np\n'), ((10255, 10271), 'numpy.array', 'np.array', (['bbox_x'], {}), '(bbox_x)\n', (10263, 10271), True, 'import numpy as np\n'), ((10717, 10737), 'numpy.array', 'np.array', (['landmark_x'], {}), '(landmark_x)\n', (10725, 10737), True, 'import numpy as np\n'), ((1980, 2016), 'numpy.array', 'np.array', (['im_batch'], {'dtype': 'np.float32'}), '(im_batch, dtype=np.float32)\n', (1988, 2016), True, 'import numpy as np\n'), ((3117, 3153), 'numpy.array', 'np.array', (['im_batch'], {'dtype': 'np.float32'}), '(im_batch, dtype=np.float32)\n', (3125, 3153), True, 'import numpy as np\n'), ((3178, 3215), 'numpy.array', 'np.array', (['box_batch'], {'dtype': 'np.float32'}), '(box_batch, dtype=np.float32)\n', (3186, 3215), True, 'import numpy as np\n'), ((4337, 4373), 'numpy.array', 'np.array', (['im_batch'], {'dtype': 'np.float32'}), '(im_batch, dtype=np.float32)\n', (4345, 4373), True, 'import numpy as np\n'), ((4403, 4445), 'numpy.array', 'np.array', (['landmark_batch'], {'dtype': 'np.float32'}), '(landmark_batch, dtype=np.float32)\n', (4411, 4445), True, 'import numpy as np\n'), ((6507, 6564), 'numpy.concatenate', 'np.concatenate', (['(im_batch1, im_batch2, im_batch3)'], {'axis': '(0)'}), '((im_batch1, im_batch2, im_batch3), axis=0)\n', (6521, 6564), True, 'import numpy as np\n'), ((6736, 6805), 'numpy.concatenate', 'np.concatenate', (['(labels_batch1, labels_batch2, labels_batch3)'], {'axis': '(0)'}), '((labels_batch1, labels_batch2, labels_batch3), axis=0)\n', (6750, 6805), True, 'import numpy as np\n'), ((6895, 6964), 'numpy.concatenate', 'np.concatenate', (['(bboxes_batch1, bboxes_batch2, bboxes_batch3)'], {'axis': '(0)'}), '((bboxes_batch1, bboxes_batch2, bboxes_batch3), axis=0)\n', (6909, 6964), True, 'import numpy as np\n'), ((6994, 7072), 'numpy.concatenate', 'np.concatenate', (['(landmarks_batch1, landmarks_batch2, landmarks_batch3)'], {'axis': '(0)'}), '((landmarks_batch1, landmarks_batch2, landmarks_batch3), axis=0)\n', (7008, 7072), True, 'import numpy as np\n'), ((7096, 7161), 'numpy.concatenate', 'np.concatenate', (['(label_batch, bbox_batch, landmark_batch)'], {'axis': '(1)'}), '((label_batch, bbox_batch, landmark_batch), axis=1)\n', (7110, 7161), True, 'import numpy as np\n'), ((7513, 7527), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7524, 7527), False, 'import pickle\n'), ((7568, 7601), 'train.h5py_utils.load_dict_from_hdf5', 'load_dict_from_hdf5', (['dataset_path'], {}), '(dataset_path)\n', (7587, 7601), False, 'from train.h5py_utils import load_dict_from_hdf5\n'), ((8492, 8529), 'numpy.zeros', 'np.zeros', (['(len_labels, 4)', 'np.float32'], {}), '((len_labels, 4), np.float32)\n', (8500, 8529), True, 'import numpy as np\n'), ((8587, 8625), 'numpy.zeros', 'np.zeros', (['(len_labels, 10)', 'np.float32'], {}), '((len_labels, 10), np.float32)\n', (8595, 8625), True, 'import numpy as np\n'), ((8962, 9000), 'numpy.zeros', 'np.zeros', (['(len_labels, 10)', 'np.float32'], {}), '((len_labels, 10), np.float32)\n', (8970, 9000), True, 'import numpy as np\n'), ((9293, 9342), 'numpy.array', 'np.array', (['([[0, 0, 0, 0]] * len_labels)', 'np.float32'], {}), '([[0, 0, 0, 0]] * len_labels, np.float32)\n', (9301, 9342), True, 'import numpy as np\n'), ((9966, 9983), 'numpy.array', 'np.array', (['label_y'], {}), '(label_y)\n', (9974, 9983), True, 'import numpy as np\n'), ((10286, 10302), 'numpy.array', 'np.array', (['bbox_y'], {}), '(bbox_y)\n', (10294, 10302), True, 'import numpy as np\n'), ((10375, 10392), 'numpy.array', 'np.array', (['label_y'], {}), '(label_y)\n', (10383, 10392), True, 'import numpy as np\n'), ((10756, 10776), 'numpy.array', 'np.array', (['landmark_y'], {}), '(landmark_y)\n', (10764, 10776), True, 'import numpy as np\n'), ((10849, 10866), 'numpy.array', 'np.array', (['label_y'], {}), '(label_y)\n', (10857, 10866), True, 'import numpy as np\n'), ((1372, 1382), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1379, 1382), True, 'import numpy as np\n'), ((1461, 1471), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1468, 1471), True, 'import numpy as np\n'), ((1558, 1568), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1565, 1568), True, 'import numpy as np\n')] |
import os
import sys
import json
import csv
import random
import argparse
import torch
import dataloaders
import models
import inspect
import math
from datetime import datetime
from utils import losses
from utils import Logger
from utils.torchsummary import summary
from trainer import Trainer
from torchvision import transforms
from tqdm import tqdm
import torch.nn.functional as F
import numpy as np
import wandb
from wandb import AlertLevel
class Margin_Sampling():
def __init__(self):
pass
def get_instance(self, module, name, config, *args):
# GET THE CORRESPONDING CLASS / FCT
return getattr(module, config[name]['type'])(*args, **config[name]['args'])
def create_episodedir(self, cfg, episode):
episode_dir = os.path.join(cfg['exp_dir'], "episode"+str(episode))
if not os.path.exists(episode_dir):
os.mkdir(episode_dir)
else:
print("=============================")
print("Episode directory already exists: {}. Reusing it may lead to loss of old data in the directory.".format(episode_dir))
print("=============================")
cfg['episode'] = episode
cfg['episode_dir'] = episode_dir
cfg['trainer']['save_dir'] = os.path.join(episode_dir,cfg['trainer']['original_save_dir'])
cfg['trainer']['log_dir'] = os.path.join(episode_dir,cfg['trainer']['original_log_dir'])
cfg['labeled_loader']['args']['load_from'] = os.path.join(episode_dir, "labeled.txt")
cfg['unlabeled_loader']['args']['load_from'] = os.path.join(episode_dir, "unlabeled.txt")
return cfg
def train_model(self, args, config):
train_logger = Logger()
# DATA LOADERS
labeled_loader = self.get_instance(dataloaders, 'labeled_loader', config)
val_loader = self.get_instance(dataloaders, 'val_loader', config)
test_loader = self.get_instance(dataloaders, 'test_loader', config)
# MODEL
model = self.get_instance(models, 'arch', config, labeled_loader.dataset.num_classes)
#print(f'\n{model}\n')
# LOSS
loss = getattr(losses, config['loss'])(ignore_index = config['ignore_index'])
# TRAINING
trainer = Trainer(
model=model,
loss=loss,
resume=args.resume,
config=config,
train_loader=labeled_loader,
val_loader=val_loader,
test_loader=test_loader,
train_logger=train_logger)
trainer.train()
config['checkpoint_dir'] = trainer._get_checkpoint_dir()
config_save_path = os.path.join(config['checkpoint_dir'], 'updated_config.json')
with open(config_save_path, 'w') as handle:
json.dump(config, handle, indent=4, sort_keys=True)
return config
def margin_score(self, prob_map):
highest_score_idx = np.sort(prob_map, axis=0)[1, :, :]
second_highest_score_idx = np.sort(prob_map, axis=0)[0, :, :]
margin = highest_score_idx - second_highest_score_idx
margin = margin.mean()
return margin
def update_pools(self, args, config, episode):
unlabeled_loader = self.get_instance(dataloaders, 'unlabeled_loader', config)
unlabeled_file = os.path.join(config["episode_dir"],"unlabeled.txt")
unlabeled_reader = csv.reader(open(unlabeled_file, 'rt'))
unlabeled_image_set = [r[0] for r in unlabeled_reader]
# Model
model = self.get_instance(models, 'arch',
config, unlabeled_loader.dataset.num_classes)
availble_gpus = list(range(torch.cuda.device_count()))
device = torch.device('cuda:0' if len(availble_gpus) > 0 else 'cpu')
# Load checkpoint
checkpoint = torch.load(os.path.join(config['exp_dir'],
"best_model.pth"), map_location=device)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint.keys():
checkpoint = checkpoint['state_dict']
# If during training, we used data parallel
if 'module' in list(checkpoint.keys())[0] and not isinstance(model,
torch.nn.DataParallel):
# for gpu inference, use data parallel
if "cuda" in device.type:
model = torch.nn.DataParallel(model)
else:
# for cpu inference, remove module
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k[7:]
new_state_dict[name] = v
checkpoint = new_state_dict
# load
model.load_state_dict(checkpoint)
model.to(device)
model.eval()
loss = getattr(losses, config['loss'])(ignore_index = config['ignore_index'])
information_content = []
tbar = tqdm(unlabeled_loader, ncols=130)
with torch.no_grad():
for img_idx, (data, target) in enumerate(tbar):
data, target = data.to(device), target.to(device)
output, _ = model(data)
output = output.squeeze(0).cpu().numpy()
output = F.softmax(torch.from_numpy(output))
uncertainty_score = self.margin_score(output.numpy())
information_content.append([unlabeled_image_set[img_idx],
uncertainty_score])
information_content = sorted(information_content,
key= lambda x: x[1], reverse=False)
information_content = information_content[:args.batch_size]
new_batch = [x[0] for x in information_content]
labeled = os.path.join(config['episode_dir'],"labeled.txt")
labeled_reader = csv.reader(open(labeled, 'rt'))
labeled_image_set = [r[0] for r in labeled_reader]
new_labeled = labeled_image_set + new_batch
new_labeled.sort()
new_unlabeled = list(set(unlabeled_image_set) - set(new_batch))
new_unlabeled.sort()
config = self.create_episodedir(config, episode+1)
with open(os.path.join(config['episode_dir'], "labeled.txt"), 'w') as f:
writer = csv.writer(f)
for image in new_labeled:
writer.writerow([image])
with open(os.path.join(config['episode_dir'], "unlabeled.txt"), 'w') as f:
writer = csv.writer(f)
for image in new_unlabeled:
writer.writerow([image])
return config
| [
"os.mkdir",
"tqdm.tqdm",
"json.dump",
"torch.no_grad",
"csv.writer",
"os.path.exists",
"torch.cuda.device_count",
"numpy.sort",
"trainer.Trainer",
"torch.nn.DataParallel",
"utils.Logger",
"os.path.join",
"torch.from_numpy"
] | [((1264, 1326), 'os.path.join', 'os.path.join', (['episode_dir', "cfg['trainer']['original_save_dir']"], {}), "(episode_dir, cfg['trainer']['original_save_dir'])\n", (1276, 1326), False, 'import os\n'), ((1362, 1423), 'os.path.join', 'os.path.join', (['episode_dir', "cfg['trainer']['original_log_dir']"], {}), "(episode_dir, cfg['trainer']['original_log_dir'])\n", (1374, 1423), False, 'import os\n'), ((1477, 1517), 'os.path.join', 'os.path.join', (['episode_dir', '"""labeled.txt"""'], {}), "(episode_dir, 'labeled.txt')\n", (1489, 1517), False, 'import os\n'), ((1573, 1615), 'os.path.join', 'os.path.join', (['episode_dir', '"""unlabeled.txt"""'], {}), "(episode_dir, 'unlabeled.txt')\n", (1585, 1615), False, 'import os\n'), ((1702, 1710), 'utils.Logger', 'Logger', ([], {}), '()\n', (1708, 1710), False, 'from utils import Logger\n'), ((2249, 2428), 'trainer.Trainer', 'Trainer', ([], {'model': 'model', 'loss': 'loss', 'resume': 'args.resume', 'config': 'config', 'train_loader': 'labeled_loader', 'val_loader': 'val_loader', 'test_loader': 'test_loader', 'train_logger': 'train_logger'}), '(model=model, loss=loss, resume=args.resume, config=config,\n train_loader=labeled_loader, val_loader=val_loader, test_loader=\n test_loader, train_logger=train_logger)\n', (2256, 2428), False, 'from trainer import Trainer\n'), ((2635, 2696), 'os.path.join', 'os.path.join', (["config['checkpoint_dir']", '"""updated_config.json"""'], {}), "(config['checkpoint_dir'], 'updated_config.json')\n", (2647, 2696), False, 'import os\n'), ((3294, 3346), 'os.path.join', 'os.path.join', (["config['episode_dir']", '"""unlabeled.txt"""'], {}), "(config['episode_dir'], 'unlabeled.txt')\n", (3306, 3346), False, 'import os\n'), ((4843, 4876), 'tqdm.tqdm', 'tqdm', (['unlabeled_loader'], {'ncols': '(130)'}), '(unlabeled_loader, ncols=130)\n', (4847, 4876), False, 'from tqdm import tqdm\n'), ((5628, 5678), 'os.path.join', 'os.path.join', (["config['episode_dir']", '"""labeled.txt"""'], {}), "(config['episode_dir'], 'labeled.txt')\n", (5640, 5678), False, 'import os\n'), ((836, 863), 'os.path.exists', 'os.path.exists', (['episode_dir'], {}), '(episode_dir)\n', (850, 863), False, 'import os\n'), ((877, 898), 'os.mkdir', 'os.mkdir', (['episode_dir'], {}), '(episode_dir)\n', (885, 898), False, 'import os\n'), ((2761, 2812), 'json.dump', 'json.dump', (['config', 'handle'], {'indent': '(4)', 'sort_keys': '(True)'}), '(config, handle, indent=4, sort_keys=True)\n', (2770, 2812), False, 'import json\n'), ((2905, 2930), 'numpy.sort', 'np.sort', (['prob_map'], {'axis': '(0)'}), '(prob_map, axis=0)\n', (2912, 2930), True, 'import numpy as np\n'), ((2975, 3000), 'numpy.sort', 'np.sort', (['prob_map'], {'axis': '(0)'}), '(prob_map, axis=0)\n', (2982, 3000), True, 'import numpy as np\n'), ((3800, 3849), 'os.path.join', 'os.path.join', (["config['exp_dir']", '"""best_model.pth"""'], {}), "(config['exp_dir'], 'best_model.pth')\n", (3812, 3849), False, 'import os\n'), ((4890, 4905), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4903, 4905), False, 'import torch\n'), ((6141, 6154), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (6151, 6154), False, 'import csv\n'), ((6339, 6352), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (6349, 6352), False, 'import csv\n'), ((3636, 3661), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3659, 3661), False, 'import torch\n'), ((4292, 4320), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (4313, 4320), False, 'import torch\n'), ((6057, 6107), 'os.path.join', 'os.path.join', (["config['episode_dir']", '"""labeled.txt"""'], {}), "(config['episode_dir'], 'labeled.txt')\n", (6069, 6107), False, 'import os\n'), ((6253, 6305), 'os.path.join', 'os.path.join', (["config['episode_dir']", '"""unlabeled.txt"""'], {}), "(config['episode_dir'], 'unlabeled.txt')\n", (6265, 6305), False, 'import os\n'), ((5165, 5189), 'torch.from_numpy', 'torch.from_numpy', (['output'], {}), '(output)\n', (5181, 5189), False, 'import torch\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import numpy as np
def SPSP(x, P=1, method='avg'):
batch_size = x.size(0)
map_size = x.size()[-2:]
pool_features = []
for p in range(1, P+1):
pool_size = [np.int(d / p) for d in map_size]
if method == 'maxmin':
M = F.max_pool2d(x, pool_size)
m = -F.max_pool2d(-x, pool_size)
pool_features.append(torch.cat((M, m), 1).view(batch_size, -1)) # max & min pooling
elif method == 'max':
M = F.max_pool2d(x, pool_size)
pool_features.append(M.view(batch_size, -1)) # max pooling
elif method == 'min':
m = -F.max_pool2d(-x, pool_size)
pool_features.append(m.view(batch_size, -1)) # min pooling
elif method == 'avg':
a = F.avg_pool2d(x, pool_size)
pool_features.append(a.view(batch_size, -1)) # average pooling
else:
m1 = F.avg_pool2d(x, pool_size)
rm2 = torch.sqrt(F.relu(F.avg_pool2d(torch.pow(x, 2), pool_size) - torch.pow(m1, 2)))
if method == 'std':
pool_features.append(rm2.view(batch_size, -1)) # std pooling
else:
pool_features.append(torch.cat((m1, rm2), 1).view(batch_size, -1)) # statistical pooling: mean & std
return torch.cat(pool_features, dim=1)
class LinearityIQA(nn.Module):
def __init__(self, arch='resnext101_32x8d', pool='avg', use_bn_end=False, P6=1, P7=1):
super(LinearityIQA, self).__init__()
self.pool = pool
self.use_bn_end = use_bn_end
if pool in ['max', 'min', 'avg', 'std']:
c = 1
else:
c = 2
self.P6 = P6 #
self.P7 = P7 #
features = list(models.__dict__[arch](pretrained=True).children())[:-2]
if arch == 'alexnet':
in_features = [256, 256]
self.id1 = 9
self.id2 = 12
features = features[0]
elif arch == 'vgg16':
in_features = [512, 512]
self.id1 = 23
self.id2 = 30
features = features[0]
elif 'res' in arch:
self.id1 = 6
self.id2 = 7
if arch == 'resnet18' or arch == 'resnet34':
in_features = [256, 512]
else:
in_features = [1024, 2048]
else:
print('The arch is not implemented!')
self.features = nn.Sequential(*features)
self.dr6 = nn.Sequential(nn.Linear(in_features[0] * c * sum([p * p for p in range(1, self.P6+1)]), 1024),
nn.BatchNorm1d(1024),
nn.Linear(1024, 256),
nn.BatchNorm1d(256),
nn.Linear(256, 64),
nn.BatchNorm1d(64), nn.ReLU())
self.dr7 = nn.Sequential(nn.Linear(in_features[1] * c * sum([p * p for p in range(1, self.P7+1)]), 1024),
nn.BatchNorm1d(1024),
nn.Linear(1024, 256),
nn.BatchNorm1d(256),
nn.Linear(256, 64),
nn.BatchNorm1d(64), nn.ReLU())
if self.use_bn_end:
self.regr6 = nn.Sequential(nn.Linear(64, 1), nn.BatchNorm1d(1))
self.regr7 = nn.Sequential(nn.Linear(64, 1), nn.BatchNorm1d(1))
self.regression = nn.Sequential(nn.Linear(64 * 2, 1), nn.BatchNorm1d(1))
else:
self.regr6 = nn.Linear(64, 1)
self.regr7 = nn.Linear(64, 1)
self.regression = nn.Linear(64 * 2, 1)
def extract_features(self, x):
f, pq = [], []
for ii, model in enumerate(self.features):
x = model(x)
if ii == self.id1:
x6 = SPSP(x, P=self.P6, method=self.pool)
x6 = self.dr6(x6)
f.append(x6)
pq.append(self.regr6(x6))
if ii == self.id2:
x7 = SPSP(x, P=self.P7, method=self.pool)
x7 = self.dr7(x7)
f.append(x7)
pq.append(self.regr7(x7))
f = torch.cat(f, dim=1)
return f, pq
def forward(self, x):
f, pq = self.extract_features(x)
s = self.regression(f)
pq.append(s)
return pq, s
if __name__ == "__main__":
x = torch.rand((1,3,224,224))
net = LinearityIQA()
net.train(False)
# print(net.dr6)
# print(net.dr7)
y, pred = net(x)
print(pred)
| [
"torch.nn.ReLU",
"torch.rand",
"torch.nn.Sequential",
"torch.nn.functional.avg_pool2d",
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.nn.Linear",
"numpy.int",
"torch.pow",
"torch.nn.functional.max_pool2d"
] | [((1383, 1414), 'torch.cat', 'torch.cat', (['pool_features'], {'dim': '(1)'}), '(pool_features, dim=1)\n', (1392, 1414), False, 'import torch\n'), ((4492, 4520), 'torch.rand', 'torch.rand', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (4502, 4520), False, 'import torch\n'), ((2506, 2530), 'torch.nn.Sequential', 'nn.Sequential', (['*features'], {}), '(*features)\n', (2519, 2530), True, 'import torch.nn as nn\n'), ((4272, 4291), 'torch.cat', 'torch.cat', (['f'], {'dim': '(1)'}), '(f, dim=1)\n', (4281, 4291), False, 'import torch\n'), ((279, 292), 'numpy.int', 'np.int', (['(d / p)'], {}), '(d / p)\n', (285, 292), True, 'import numpy as np\n'), ((359, 385), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', 'pool_size'], {}), '(x, pool_size)\n', (371, 385), True, 'import torch.nn.functional as F\n'), ((2678, 2698), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1024)'], {}), '(1024)\n', (2692, 2698), True, 'import torch.nn as nn\n'), ((2733, 2753), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(256)'], {}), '(1024, 256)\n', (2742, 2753), True, 'import torch.nn as nn\n'), ((2788, 2807), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (2802, 2807), True, 'import torch.nn as nn\n'), ((2842, 2860), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(64)'], {}), '(256, 64)\n', (2851, 2860), True, 'import torch.nn as nn\n'), ((2895, 2913), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (2909, 2913), True, 'import torch.nn as nn\n'), ((2915, 2924), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2922, 2924), True, 'import torch.nn as nn\n'), ((3073, 3093), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1024)'], {}), '(1024)\n', (3087, 3093), True, 'import torch.nn as nn\n'), ((3128, 3148), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(256)'], {}), '(1024, 256)\n', (3137, 3148), True, 'import torch.nn as nn\n'), ((3183, 3202), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (3197, 3202), True, 'import torch.nn as nn\n'), ((3237, 3255), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(64)'], {}), '(256, 64)\n', (3246, 3255), True, 'import torch.nn as nn\n'), ((3290, 3308), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (3304, 3308), True, 'import torch.nn as nn\n'), ((3310, 3319), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3317, 3319), True, 'import torch.nn as nn\n'), ((3626, 3642), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (3635, 3642), True, 'import torch.nn as nn\n'), ((3668, 3684), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (3677, 3684), True, 'import torch.nn as nn\n'), ((3715, 3735), 'torch.nn.Linear', 'nn.Linear', (['(64 * 2)', '(1)'], {}), '(64 * 2, 1)\n', (3724, 3735), True, 'import torch.nn as nn\n'), ((403, 430), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['(-x)', 'pool_size'], {}), '(-x, pool_size)\n', (415, 430), True, 'import torch.nn.functional as F\n'), ((574, 600), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', 'pool_size'], {}), '(x, pool_size)\n', (586, 600), True, 'import torch.nn.functional as F\n'), ((3389, 3405), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (3398, 3405), True, 'import torch.nn as nn\n'), ((3407, 3424), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1)'], {}), '(1)\n', (3421, 3424), True, 'import torch.nn as nn\n'), ((3465, 3481), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (3474, 3481), True, 'import torch.nn as nn\n'), ((3483, 3500), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1)'], {}), '(1)\n', (3497, 3500), True, 'import torch.nn as nn\n'), ((3546, 3566), 'torch.nn.Linear', 'nn.Linear', (['(64 * 2)', '(1)'], {}), '(64 * 2, 1)\n', (3555, 3566), True, 'import torch.nn as nn\n'), ((3568, 3585), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1)'], {}), '(1)\n', (3582, 3585), True, 'import torch.nn as nn\n'), ((464, 484), 'torch.cat', 'torch.cat', (['(M, m)', '(1)'], {}), '((M, m), 1)\n', (473, 484), False, 'import torch\n'), ((720, 747), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['(-x)', 'pool_size'], {}), '(-x, pool_size)\n', (732, 747), True, 'import torch.nn.functional as F\n'), ((866, 892), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', 'pool_size'], {}), '(x, pool_size)\n', (878, 892), True, 'import torch.nn.functional as F\n'), ((1001, 1027), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', 'pool_size'], {}), '(x, pool_size)\n', (1013, 1027), True, 'import torch.nn.functional as F\n'), ((1107, 1123), 'torch.pow', 'torch.pow', (['m1', '(2)'], {}), '(m1, 2)\n', (1116, 1123), False, 'import torch\n'), ((1077, 1092), 'torch.pow', 'torch.pow', (['x', '(2)'], {}), '(x, 2)\n', (1086, 1092), False, 'import torch\n'), ((1291, 1314), 'torch.cat', 'torch.cat', (['(m1, rm2)', '(1)'], {}), '((m1, rm2), 1)\n', (1300, 1314), False, 'import torch\n')] |
from time import time
import numpy as np
import math
import random
from abc import ABCMeta, abstractmethod, abstractproperty
from copy import deepcopy
class GeneticThing():
__metaclass__ = ABCMeta
@abstractproperty
def fitness(self):
pass
@abstractmethod
def mutate(self, r_mutate):
pass
@abstractmethod
def crosswith(self, that_thing):
pass
@abstractmethod
def distanceto(self, that_thing):
pass
# These are for computing the mean individual
@abstractmethod
def add(self, that_thing):
pass
@abstractmethod
def subtract(self, that_thing):
pass
@abstractmethod
def divide_by(self, divisor):
pass
class GeneticAlgorithm():
def __init__(self, population_size,
r_mutation=0.04,
apex_stddev = 1):
self.generation = 0
# Steady state population size
self._population_size = population_size
self._r_mutation = r_mutation
self.apex_stddev = apex_stddev
self.population = []
self.apexes = []
@property
def population_size(self):
population_size = 10 * self._population_size * np.exp(-0.5 * self.generation) + self._population_size
return int(population_size)
def _selection_base(self, population_size):
selection_base = self._population_size - len(self.apexes)
if selection_base < 1:
selection_base = 1
return selection_base
p_selection = 0.8 - 0.5 * np.exp(-self.generation / 5.0)
selection_base = int(self.population_size * p_selection)
print("selection_base: " + str(selection_base))
return selection_base
def _mutation_rate(self):
r_mutation = (1.0 - self._r_mutation) * np.exp(-0.005 * self.generation) + self._r_mutation
return r_mutation
def append(self, thing):
self.population.append(thing)
def __iter__(self):
return iter(self.population)
def evolve(self):
population_size = self.population_size
selection_base = self._selection_base(population_size)
r_mutation = self._mutation_rate()
selection_size = int(population_size / 2.0)
apex_maxsize = int(0.2 * selection_base)
if selection_size < 1:
selection_size = 1
if apex_maxsize < 1:
apex_maxsize = 1
self.population.sort(key=lambda s: -s.fitness)
self.population = self.population[0:selection_base]
self.population.extend(self.apexes)
population_mean = deepcopy(self.population[0])
for thing in self.population[1:]:
population_mean.add(thing)
population_mean.divide_by(len(self.population))
fitness = [ thing.fitness for thing in self.population ]
sum_fitness = sum(fitness)
max_fitness = max(fitness)
mean_fitness = np.mean(fitness)
stddev_fitness = np.sqrt(np.var(fitness))
apex_cutoff = mean_fitness + self.apex_stddev * stddev_fitness
p_fitness = lambda i: fitness[i]/max_fitness
# Distance to mean individual is measure of "distance"
population_mean = deepcopy(self.population[0])
for thing in self.population[1:]:
population_mean.add(thing)
population_mean.divide_by(len(self.population))
distances = [ thing.distanceto(population_mean) for thing in self.population ]
max_distance = max(distances)
p_distance = lambda i: distances[i]/max_distance
# Rank function
f_rank = lambda i: p_fitness(i)* 0.7 + 0.3 * p_distance(i)
if max_distance == 0:
f_rank = lambda i: p_fitness(i)
rankings = [ f_rank(i) for i in range(len(self.population)) ]
i_apex = list(filter(lambda i: fitness[i] > apex_cutoff, range(len(self.population))))
if len(i_apex) > apex_maxsize:
i_apex = range(apex_maxsize)
self.apexes = [ deepcopy(self.population[i]) for i in i_apex ]
print("Generation: {}, mean(fitness): {:.2f}, stddev(fitness): {:.2f}, r_mutation: {:.2f}".format(self.generation,
mean_fitness,
stddev_fitness,
r_mutation))
for i in i_apex:
print(" apex - fitness: {:.2f}, distance: {:.2f}, rank: {:.2f}".format(fitness[i], distances[i], rankings[i]))
next_generation = []
trials = 0
if self.generation < 3:
i_selections = []
i_selections += i_apex
while len(i_selections) < selection_size and (trials < (100 * population_size)):
trials += 1
i = random.randint(0, len(self.population)-1)
if i in i_selections:
continue
p_selection = rankings[i]
if random.random() < p_selection:
i_selections.append(i)
for i1 in i_selections:
ancestor1 = self.population[i1]
fitness1 = p_fitness(i1)
mutant1 = deepcopy(ancestor1)
mutant1.mutate(r_mutation * (1 - 0.5*fitness1))
next_generation.append(ancestor1)
next_generation.append(mutant1)
else:
while len(next_generation) < population_size:
i1 = random.randint(0, len(self.population)-1)
i2 = random.randint(0, len(self.population)-1)
p_selection1 = rankings[i1]
p_selection2 = rankings[i2]
if random.random() < p_selection1 and random.random() < p_selection2:
ancestor1 = self.population[i1]
ancestor2 = self.population[i2]
fitness1 = p_fitness(i1)
fitness2 = p_fitness(i2)
offspring1 = ancestor1.crosswith(ancestor2, fitness2/(fitness1+fitness2))
# offspring2 = ancestor2.crosswith(ancestor1, fitness1/(fitness1+fitness2))
# offspring2.mutate(1 - np.sqrt(fitness1 * fitness2))
next_generation.append(offspring1)
# next_generation.append(offspring2)
self.population = next_generation
self.generation += 1
return sum_fitness
| [
"copy.deepcopy",
"random.random",
"numpy.mean",
"numpy.exp",
"numpy.var"
] | [((2619, 2647), 'copy.deepcopy', 'deepcopy', (['self.population[0]'], {}), '(self.population[0])\n', (2627, 2647), False, 'from copy import deepcopy\n'), ((2953, 2969), 'numpy.mean', 'np.mean', (['fitness'], {}), '(fitness)\n', (2960, 2969), True, 'import numpy as np\n'), ((3235, 3263), 'copy.deepcopy', 'deepcopy', (['self.population[0]'], {}), '(self.population[0])\n', (3243, 3263), False, 'from copy import deepcopy\n'), ((3003, 3018), 'numpy.var', 'np.var', (['fitness'], {}), '(fitness)\n', (3009, 3018), True, 'import numpy as np\n'), ((4023, 4051), 'copy.deepcopy', 'deepcopy', (['self.population[i]'], {}), '(self.population[i])\n', (4031, 4051), False, 'from copy import deepcopy\n'), ((1214, 1244), 'numpy.exp', 'np.exp', (['(-0.5 * self.generation)'], {}), '(-0.5 * self.generation)\n', (1220, 1244), True, 'import numpy as np\n'), ((1558, 1588), 'numpy.exp', 'np.exp', (['(-self.generation / 5.0)'], {}), '(-self.generation / 5.0)\n', (1564, 1588), True, 'import numpy as np\n'), ((1823, 1855), 'numpy.exp', 'np.exp', (['(-0.005 * self.generation)'], {}), '(-0.005 * self.generation)\n', (1829, 1855), True, 'import numpy as np\n'), ((5393, 5412), 'copy.deepcopy', 'deepcopy', (['ancestor1'], {}), '(ancestor1)\n', (5401, 5412), False, 'from copy import deepcopy\n'), ((5167, 5182), 'random.random', 'random.random', ([], {}), '()\n', (5180, 5182), False, 'import random\n'), ((5882, 5897), 'random.random', 'random.random', ([], {}), '()\n', (5895, 5897), False, 'import random\n'), ((5917, 5932), 'random.random', 'random.random', ([], {}), '()\n', (5930, 5932), False, 'import random\n')] |
import numpy as np
import matplotlib.pyplot as plt
x1 = np.linspace(-10, -0.1, 30)
y1 = 1 / x1
x2 = np.linspace(0.1, 10, 30)
y2 = 1 / x2
plt.xlim(-10, 10)
plt.plot(x1, y1)
plt.plot(x2, y2)
plt.show()
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.linspace"
] | [((57, 83), 'numpy.linspace', 'np.linspace', (['(-10)', '(-0.1)', '(30)'], {}), '(-10, -0.1, 30)\n', (68, 83), True, 'import numpy as np\n'), ((101, 125), 'numpy.linspace', 'np.linspace', (['(0.1)', '(10)', '(30)'], {}), '(0.1, 10, 30)\n', (112, 125), True, 'import numpy as np\n'), ((138, 155), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-10)', '(10)'], {}), '(-10, 10)\n', (146, 155), True, 'import matplotlib.pyplot as plt\n'), ((156, 172), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1'], {}), '(x1, y1)\n', (164, 172), True, 'import matplotlib.pyplot as plt\n'), ((173, 189), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2'], {}), '(x2, y2)\n', (181, 189), True, 'import matplotlib.pyplot as plt\n'), ((190, 200), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (198, 200), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entities representing board game pieces."""
import itertools
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.observation import observable
import numpy as np
_VISIBLE_SITE_GROUP = 0
_INVISIBLE_SITE_GROUP = 3
_RED = (1., 0., 0., 0.5)
_BLUE = (0., 0, 1., 0.5)
_INVALID_PLAYER_ID = '`player_id` must be between 0 and {}, got {}.'
_NO_MORE_MARKERS_AVAILABLE = (
'All {} markers for player {} have already been placed.')
class Markers(composer.Entity):
"""A collection of non-physical entities for marking board positions."""
def _build(self,
num_per_player,
player_colors=(_RED, _BLUE),
halfwidth=0.025,
height=0.01,
board_size=7):
"""Builds a `Markers` entity.
Args:
num_per_player: Integer, the total number of markers to create per player.
player_colors: Sequence of (R, G, B, A) values specifying the marker
colors for each player.
halfwidth: Scalar, the halfwidth of each marker.
height: Scalar, height of each marker.
board_size: Integer, optional if using the integer indexing.
"""
root = mjcf.RootElement(model='markers')
root.default.site.set_attributes(type='cylinder', size=(halfwidth, height))
all_markers = []
for i, color in enumerate(player_colors):
player_name = 'player_{}'.format(i)
# TODO(alimuldal): Would look cool if these were textured.
material = root.asset.add('material', name=player_name, rgba=color)
player_markers = []
for j in range(num_per_player):
player_markers.append(
root.worldbody.add(
'site',
name='player_{}_move_{}'.format(i, j),
material=material))
all_markers.append(player_markers)
self._num_players = len(player_colors)
self._mjcf_model = root
self._all_markers = all_markers
self._move_counts = [0] * self._num_players
# To go from integer position to marker index in the all_markers array
self._marker_ids = np.zeros((2, board_size, board_size))
self._board_size = board_size
def _build_observables(self):
return MarkersObservables(self)
@property
def mjcf_model(self):
"""`mjcf.RootElement` for this entity."""
return self._mjcf_model
@property
def markers(self):
"""Marker sites belonging to all players.
Returns:
A nested list, where `markers[i][j]` contains the `mjcf.Element`
corresponding to player i's jth marker.
"""
return self._all_markers
def initialize_episode(self, physics, random_state):
"""Resets the markers at the start of an episode."""
del random_state # Unused.
self._reset(physics)
def _reset(self, physics):
for player_markers in self._all_markers:
for marker in player_markers:
bound_marker = physics.bind(marker)
bound_marker.pos = 0. # Markers are initially placed at the origin.
bound_marker.group = _INVISIBLE_SITE_GROUP
self._move_counts = [0] * self._num_players
self._marker_ids = np.zeros((2, self._board_size, self._board_size),
dtype=np.int32)
def make_all_invisible(self, physics):
for player_markers in self._all_markers:
for marker in player_markers:
bound_marker = physics.bind(marker)
bound_marker.group = _INVISIBLE_SITE_GROUP
def make_visible_by_bpos(self, physics, player_id, all_bpos):
for bpos in all_bpos:
marker_id = self._marker_ids[player_id][bpos[0]][bpos[1]]
marker = self._all_markers[player_id][marker_id]
bound_marker = physics.bind(marker)
bound_marker.group = _VISIBLE_SITE_GROUP
def mark(self, physics, player_id, pos, bpos=None):
"""Enables the visibility of a marker, moves it to the specified position.
Args:
physics: `mjcf.Physics` instance.
player_id: Integer specifying the ID of the player whose marker to use.
pos: Array-like object specifying the cartesian position of the marker.
bpos: Board position, optional integer coordinates to index the markers.
Raises:
ValueError: If `player_id` is invalid.
RuntimeError: If `player_id` has no more available markers.
"""
if not 0 <= player_id < self._num_players:
raise ValueError(
_INVALID_PLAYER_ID.format(self._num_players - 1, player_id))
markers = self._all_markers[player_id]
move_count = self._move_counts[player_id]
if move_count >= len(markers):
raise RuntimeError(
_NO_MORE_MARKERS_AVAILABLE.format(move_count, player_id))
bound_marker = physics.bind(markers[move_count])
bound_marker.pos = pos
# TODO(alimuldal): Set orientation as well (random? same as contact frame?)
bound_marker.group = _VISIBLE_SITE_GROUP
self._move_counts[player_id] += 1
if bpos:
self._marker_ids[player_id][bpos[0]][bpos[1]] = move_count
class MarkersObservables(composer.Observables):
"""Observables for a `Markers` entity."""
@composer.observable
def position(self):
"""Cartesian positions of all marker sites.
Returns:
An `observable.MJCFFeature` instance. When called with an instance of
`physics` as the argument, this will return a numpy float64 array of shape
(num_players * num_markers, 3) where each row contains the cartesian
position of a marker. Unplaced markers will have position (0, 0, 0).
"""
return observable.MJCFFeature(
'xpos', list(itertools.chain.from_iterable(self._entity.markers)))
| [
"itertools.chain.from_iterable",
"dm_control.mjcf.RootElement",
"numpy.zeros"
] | [((1839, 1872), 'dm_control.mjcf.RootElement', 'mjcf.RootElement', ([], {'model': '"""markers"""'}), "(model='markers')\n", (1855, 1872), False, 'from dm_control import mjcf\n'), ((2737, 2774), 'numpy.zeros', 'np.zeros', (['(2, board_size, board_size)'], {}), '((2, board_size, board_size))\n', (2745, 2774), True, 'import numpy as np\n'), ((3761, 3826), 'numpy.zeros', 'np.zeros', (['(2, self._board_size, self._board_size)'], {'dtype': 'np.int32'}), '((2, self._board_size, self._board_size), dtype=np.int32)\n', (3769, 3826), True, 'import numpy as np\n'), ((6183, 6234), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['self._entity.markers'], {}), '(self._entity.markers)\n', (6212, 6234), False, 'import itertools\n')] |
import numpy as np
from matplotlib import pyplot as plt
from spikewidgets.widgets.basewidget import BaseMultiWidget
import spiketoolkit as st
def plot_pca_features(recording, sorting, unit_ids=None, max_spikes_per_unit=100, nproj=4, colormap=None,
figure=None, ax=None, axes=None, **pca_kwargs):
"""
Plots unit PCA features on best projections.
Parameters
----------
recording: RecordingExtractor
The recordng extractor object
sorting: SortingExtractor
The sorting extractor object
unit_ids: list
List of unit ids
max_spikes_per_unit: int
Maximum number of spikes to display per unit
nproj: int
Number of best projections to display
colormap: matplotlib colormap
The colormap to be used. If not given default is used
figure: matplotlib figure
The figure to be used. If not given a figure is created
ax: matplotlib axis
The axis to be used. If not given an axis is created
axes: list of matplotlib axes
The axes to be used for the individual plots. If not given the required axes are created. If provided, the ax
and figure parameters are ignored
pca_kwargs: keyword arguments for st.postprocessing.compute_unit_pca_scores()
Returns
-------
W: PCAWidget
The output widget
"""
W = PCAWidget(
sorting=sorting,
recording=recording,
unit_ids=unit_ids,
max_spikes_per_unit=max_spikes_per_unit,
nproj=nproj,
colormap=colormap,
figure=figure,
ax=ax,
axes=axes,
**pca_kwargs
)
W.plot()
return W
class PCAWidget(BaseMultiWidget):
def __init__(self, *, recording, sorting, unit_ids=None, nproj=4, colormap=None,
figure=None, ax=None, axes=None, **pca_kwargs):
BaseMultiWidget.__init__(self, figure, ax, axes)
self._sorting = sorting
self._recording = recording
self._unit_ids = unit_ids
self._pca_scores = None
self._nproj = nproj
self._colormap = colormap
self._pca_kwargs = pca_kwargs
self.name = 'Feature'
def _compute_pca(self):
self._pca_scores = st.postprocessing.compute_unit_pca_scores(recording=self._recording,
sorting=self._sorting,
**self._pca_kwargs)
def plot(self):
self._do_plot()
def _do_plot(self):
units = self._unit_ids
if units is None:
units = self._sorting.get_unit_ids()
self._units = units
if self._pca_scores is None:
self._compute_pca()
# find projections with best separation
n_pc = self._pca_scores[0].shape[2]
n_ch = self._pca_scores[0].shape[1]
distances = []
proj = []
for ch1 in range(n_ch):
for pc1 in range(n_pc):
for ch2 in range(n_ch):
for pc2 in range(n_pc):
if ch1 != ch2 or pc1 != pc2:
dist = self.compute_cluster_average_distance(
pc1, ch1, pc2, ch2)
if [ch1, pc1, ch2, pc2] not in proj and [
ch2, pc2, ch1, pc1] not in proj:
distances.append(dist)
proj.append([ch1, pc1, ch2, pc2])
list_best_proj = np.array(proj)[np.argsort(distances)[::-1][:self._nproj]]
self._plot_proj_multi(list_best_proj)
def compute_cluster_average_distance(self, pc1, ch1, pc2, ch2):
centroids = np.zeros((len(self._pca_scores), 2))
for i, pcs in enumerate(self._pca_scores):
centroids[i, 0] = np.median(pcs[:, ch1, pc1], axis=0)
centroids[i, 1] = np.median(pcs[:, ch2, pc2], axis=0)
dist = []
for i, c1 in enumerate(centroids):
for j, c2 in enumerate(centroids):
if i > j:
dist.append(np.linalg.norm(c2 - c1))
return np.mean(dist)
def _plot_proj_multi(self, best_proj, ncols=5):
if len(best_proj) < ncols:
ncols = len(best_proj)
nrows = np.ceil(len(best_proj) / ncols)
for i, bp in enumerate(best_proj):
ax = self.get_tiled_ax(i, nrows, ncols, hspace=0.3)
self._plot_proj(proj=bp, ax=ax)
def _plot_proj(self, *, proj, ax, title=''):
ch1, pc1, ch2, pc2 = proj
if self._colormap is not None:
cm = plt.get_cmap(self._colormap)
colors = [cm(i / len(self._pca_scores))
for i in np.arange(len(self._pca_scores))]
else:
colors = [None] * len(self._pca_scores)
for i, pc in enumerate(self._pca_scores):
if self._sorting.get_unit_ids()[i] in self._units:
ax.plot(pc[:, ch1, pc1], pc[:, ch2, pc2],
'*', color=colors[i], alpha=0.3)
ax.set_yticks([])
ax.set_xticks([])
ax.set_xlabel('ch {} - pc {}'.format(ch1, pc1))
ax.set_ylabel('ch {} - pc {}'.format(ch2, pc2))
if title:
ax.set_title(title, color='gray')
| [
"matplotlib.pyplot.get_cmap",
"spiketoolkit.postprocessing.compute_unit_pca_scores",
"numpy.median",
"spikewidgets.widgets.basewidget.BaseMultiWidget.__init__",
"numpy.argsort",
"numpy.mean",
"numpy.array",
"numpy.linalg.norm"
] | [((1863, 1911), 'spikewidgets.widgets.basewidget.BaseMultiWidget.__init__', 'BaseMultiWidget.__init__', (['self', 'figure', 'ax', 'axes'], {}), '(self, figure, ax, axes)\n', (1887, 1911), False, 'from spikewidgets.widgets.basewidget import BaseMultiWidget\n'), ((2232, 2347), 'spiketoolkit.postprocessing.compute_unit_pca_scores', 'st.postprocessing.compute_unit_pca_scores', ([], {'recording': 'self._recording', 'sorting': 'self._sorting'}), '(recording=self._recording,\n sorting=self._sorting, **self._pca_kwargs)\n', (2273, 2347), True, 'import spiketoolkit as st\n'), ((4173, 4186), 'numpy.mean', 'np.mean', (['dist'], {}), '(dist)\n', (4180, 4186), True, 'import numpy as np\n'), ((3552, 3566), 'numpy.array', 'np.array', (['proj'], {}), '(proj)\n', (3560, 3566), True, 'import numpy as np\n'), ((3863, 3898), 'numpy.median', 'np.median', (['pcs[:, ch1, pc1]'], {'axis': '(0)'}), '(pcs[:, ch1, pc1], axis=0)\n', (3872, 3898), True, 'import numpy as np\n'), ((3929, 3964), 'numpy.median', 'np.median', (['pcs[:, ch2, pc2]'], {'axis': '(0)'}), '(pcs[:, ch2, pc2], axis=0)\n', (3938, 3964), True, 'import numpy as np\n'), ((4649, 4677), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['self._colormap'], {}), '(self._colormap)\n', (4661, 4677), True, 'from matplotlib import pyplot as plt\n'), ((3567, 3588), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (3577, 3588), True, 'import numpy as np\n'), ((4132, 4155), 'numpy.linalg.norm', 'np.linalg.norm', (['(c2 - c1)'], {}), '(c2 - c1)\n', (4146, 4155), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import shutil
import json
import cv2
import numpy as np
classname2id = {'tampered':1} #0是背景
class txt2coco:
def __init__(self, image_dir, mask_file):
self.images = []
self.annotations = []
self.categories = []
self.img_id = 0
self.ann_id = 0
self.image_dir = image_dir
self.total_annos = {}
self.mask_file = mask_file
# 构建类别
def _init_categories(self):
for k, v in classname2id.items():
category = {}
category['id'] = v
category['name'] = k
self.categories.append(category)
# 构建COCO的image字段
def _image(self, path):
image = {}
print(path)
img = cv2.imread(os.path.join(self.image_dir, path))
image['height'] = img.shape[0]
image['width'] = img.shape[1]
image['id'] = self.img_id
image['file_name'] = path
return image
# 构建COCO的annotation字段
def _annotation(self, points, label=None):
label = 'tampered' if label is None else label
annotation = {}
annotation['id'] = self.ann_id
annotation['image_id'] = self.img_id
annotation['category_id'] = int(classname2id[label])
annotation['segmentation'] = self._get_seg(points)
box, area = self._get_box_area(points)
annotation['bbox'] = box
annotation['iscrowd'] = 0
annotation['area'] = area
return annotation
# COCO的格式: [x1,y1,w,h] 对应COCO的bbox格式
def _get_box_area(self, points):
#points [x1,y1,x2,y2,....]
x, y, w, h = cv2.boundingRect(np.array(points).reshape(-1, 2).astype(int)) # x,y是矩阵左上点的坐标,w,h是矩阵的宽和高
area = w * h
return [x, y, w, h], area
# segmentation
def _get_seg(self, points):
return [points]
def savejson(self, instance, save_pth):
json.dump(instance, open(save_pth, 'w'), ensure_ascii=False, indent=2)
# 由txt文件构建COCO
def to_coco(self):
self._init_categories()
for key in os.listdir(self.image_dir):
self.images.append(self._image(key))
annos = self.total_annos[key.split('.')[0]]
for point in annos:
annotation = self._annotation(point)
self.annotations.append(annotation)
self.ann_id += 1
self.img_id += 1
instance = {}
instance['images'] = self.images
instance['annotations'] = self.annotations
instance['categories'] = self.categories
return instance
def parse_mask_img(self):
for mask_file in os.listdir(self.mask_file):
self.total_annos[mask_file.split('.')[0]] = []
gray = cv2.imread(os.path.join(self.mask_file, mask_file), 0)
# Grey2Binary
ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
# for i in range(4):
# binary = cv2.dilate(binary, kernel)
# 轮廓检测
contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
points = []
# for p in contour:
# points.extend([float(p[0][0]), float(p[0][1])])
rect = cv2.minAreaRect(contour) # 得到最小外接矩形的(中心(x,y), (宽,高), 旋转角度)
box = cv2.boxPoints(rect) # 获取最小外接矩形的4个顶点坐标
for p in box:
points.extend([float(p[0]), float(p[1])])
self.total_annos[mask_file.split('.')[0]].append(points)
def parse_txt(self):
for txt_file in os.listdir(self.gt):
self.total_annos[txt_file.split('.')[0]] = []
with open(os.path.join(self.gt, txt_file), 'r') as f:
for line in f.readlines():
line = line.strip()
points = line.split('\t')[:8]
points = list(map(float, points))
self.total_annos[txt_file.split('.')[0]].append(points)
def main(mask_file, imgfile, save_pth):
func = txt2coco(imgfile, mask_file)
func.parse_mask_img()
instance = func.to_coco()
func.savejson(instance, save_pth)
if __name__ == '__main__':
mask_file = '/Users/duoduo/Desktop/天池图片篡改检测/s2_data/data/small_train/mask'#'/liangxiaoyun583/data/FakeImg_Detection_Competition/data/tianchi_1_data/crop_mask_polygon'
imgfile = '/Users/duoduo/Desktop/天池图片篡改检测/s2_data/data/small_train/images'#'/liangxiaoyun583/data/FakeImg_Detection_Competition/data/tianchi_1_data/crop_images'
save_pth = '/Users/duoduo/Desktop/天池图片篡改检测/s2_data/data/small_train/train.json'
func = txt2coco(imgfile, mask_file)
# func.parse_mask_img()
func.parse_txt()
instance = func.to_coco()
func.savejson(instance, save_pth) | [
"cv2.threshold",
"cv2.boxPoints",
"numpy.array",
"cv2.minAreaRect",
"os.path.join",
"os.listdir",
"cv2.findContours"
] | [((2063, 2089), 'os.listdir', 'os.listdir', (['self.image_dir'], {}), '(self.image_dir)\n', (2073, 2089), False, 'import os\n'), ((2638, 2664), 'os.listdir', 'os.listdir', (['self.mask_file'], {}), '(self.mask_file)\n', (2648, 2664), False, 'import os\n'), ((3708, 3727), 'os.listdir', 'os.listdir', (['self.gt'], {}), '(self.gt)\n', (3718, 3727), False, 'import os\n'), ((757, 791), 'os.path.join', 'os.path.join', (['self.image_dir', 'path'], {}), '(self.image_dir, path)\n', (769, 791), False, 'import os\n'), ((2851, 2899), 'cv2.threshold', 'cv2.threshold', (['gray', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray, 127, 255, cv2.THRESH_BINARY)\n', (2864, 2899), False, 'import cv2\n'), ((3113, 3177), 'cv2.findContours', 'cv2.findContours', (['binary', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3129, 3177), False, 'import cv2\n'), ((2755, 2794), 'os.path.join', 'os.path.join', (['self.mask_file', 'mask_file'], {}), '(self.mask_file, mask_file)\n', (2767, 2794), False, 'import os\n'), ((3372, 3396), 'cv2.minAreaRect', 'cv2.minAreaRect', (['contour'], {}), '(contour)\n', (3387, 3396), False, 'import cv2\n'), ((3454, 3473), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3467, 3473), False, 'import cv2\n'), ((3809, 3840), 'os.path.join', 'os.path.join', (['self.gt', 'txt_file'], {}), '(self.gt, txt_file)\n', (3821, 3840), False, 'import os\n'), ((1642, 1658), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1650, 1658), True, 'import numpy as np\n')] |
#!usr/bin/env python
#coding:utf8
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from pathlib import Path
import numpy as np
from PIL import Image
from PIL import ImageFilter
from keras.utils import plot_model
from ann_visualizer.visualize import ann_viz
from keras.models import load_model
def plotWordcloud(s,t):
if(t!=""):
mask = np.array(Image.open('../data/pictures/'+t))
else:
mask=None
contents = Path('../build/preprocessed/'+s+".csv").read_text()
wordcloud = WordCloud(background_color='black',
width=1920,
height=1080,
mask=mask
).generate(contents)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.margins(x=0, y=0)
plt.savefig("../build/plots/"+s+"_wordcloud.pdf")
plt.clf()
#Visualierung der Zeitungsueberschriften
plotWordcloud("fake_news_titles_stem","trump_silhouette.png")
plotWordcloud("fake_news_titles_lem", "trump_silhouette.png")
#plotWordcloud("real_news_titles_lem","statue_of_liberty.png")
plotWordcloud("bow_feature_names","")
#falsch klassifizierte RNN-Texte
plotWordcloud("false_classified_rnn","")
X_train = np.genfromtxt("../build/preprocessed/bow_X_train.txt")
size = len(X_train)
X_train = np.sum(X_train, axis=0)/size
y_train = np.genfromtxt("../build/preprocessed/bow_y_train.txt", unpack=True)
X_test = np.genfromtxt("../build/preprocessed/bow_X_test.txt")
size = len(X_test)
X_test = np.sum(X_test, axis=0)/size
y_test = np.genfromtxt("../build/preprocessed/bow_y_test.txt", unpack=True)
keys=[]
dim=1000
with open("../build/preprocessed/bow_feature_names.csv", "r") as file:
for line in file:
current = line[:-1]
keys.append(current)
weights=np.arange(0,dim)
plt.bar(weights,X_train,alpha=0.8,label="train", color="r")
plt.bar(weights,X_test, alpha=0.4, label="test", color="b")
plt.xticks(range(len(keys)), keys, rotation='vertical', size='small')
plt.legend()
plt.tight_layout()
plt.savefig("../build/plots/comparisonX_train_test.pdf")
plt.clf()
plt.hist(y_train,density=True,color="r",alpha=0.4,label="train")
plt.hist(y_test, density=True, color="b", alpha=0.4,label="test")
plt.xticks(range(2), ["fake","real"])
plt.legend()
plt.tight_layout()
plt.savefig("../build/plots/comparisonY_train_test.pdf")
plt.clf()
X_train = np.genfromtxt("../build/preprocessed/bow_X_train.txt")
X_fake=[]
X_real=[]
for i in range(len(X_train)):
if(y_train[i]==0):
X_fake.append(X_train[i,:])
else:
X_real.append(X_train[i,:])
size = len(X_fake)
X_fake=np.sum(X_fake,axis=0)/size
size = len(X_real)
X_real = np.sum(X_real, axis=0)/size
fig = plt.figure(figsize=(24, 20))
plt.bar(weights, X_fake, alpha=0.8, label="fake", color="r")
plt.bar(weights, X_real, alpha=0.4, label="real", color="b")
plt.xticks(range(len(keys)), keys, rotation=30, size=1)
plt.legend()
plt.tight_layout()
plt.savefig("../build/plots/comparisonX_fake_real.pdf")
plt.clf()
model = load_model('../model/best_Hyperopt_NN_bow_regularization.hdf5')
plot_model(model, to_file='../build/plots/opt_model_bow.pdf',
show_shapes=True, show_layer_names=True)
ann_viz(model, title="BoW-DNN",filename="../build/plots/bow_dnn_graph")
| [
"keras.models.load_model",
"numpy.sum",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.bar",
"wordcloud.WordCloud",
"ann_visualizer.visualize.ann_viz",
"matplotlib.pyplot.figure",
"pathlib.Path",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ims... | [((1217, 1271), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../build/preprocessed/bow_X_train.txt"""'], {}), "('../build/preprocessed/bow_X_train.txt')\n", (1230, 1271), True, 'import numpy as np\n'), ((1341, 1408), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../build/preprocessed/bow_y_train.txt"""'], {'unpack': '(True)'}), "('../build/preprocessed/bow_y_train.txt', unpack=True)\n", (1354, 1408), True, 'import numpy as np\n'), ((1418, 1471), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../build/preprocessed/bow_X_test.txt"""'], {}), "('../build/preprocessed/bow_X_test.txt')\n", (1431, 1471), True, 'import numpy as np\n'), ((1537, 1603), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../build/preprocessed/bow_y_test.txt"""'], {'unpack': '(True)'}), "('../build/preprocessed/bow_y_test.txt', unpack=True)\n", (1550, 1603), True, 'import numpy as np\n'), ((1779, 1796), 'numpy.arange', 'np.arange', (['(0)', 'dim'], {}), '(0, dim)\n', (1788, 1796), True, 'import numpy as np\n'), ((1804, 1866), 'matplotlib.pyplot.bar', 'plt.bar', (['weights', 'X_train'], {'alpha': '(0.8)', 'label': '"""train"""', 'color': '"""r"""'}), "(weights, X_train, alpha=0.8, label='train', color='r')\n", (1811, 1866), True, 'import matplotlib.pyplot as plt\n'), ((1864, 1924), 'matplotlib.pyplot.bar', 'plt.bar', (['weights', 'X_test'], {'alpha': '(0.4)', 'label': '"""test"""', 'color': '"""b"""'}), "(weights, X_test, alpha=0.4, label='test', color='b')\n", (1871, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1994, 2006), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2004, 2006), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2025), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2023, 2025), True, 'import matplotlib.pyplot as plt\n'), ((2026, 2082), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../build/plots/comparisonX_train_test.pdf"""'], {}), "('../build/plots/comparisonX_train_test.pdf')\n", (2037, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2083, 2092), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2090, 2092), True, 'import matplotlib.pyplot as plt\n'), ((2093, 2161), 'matplotlib.pyplot.hist', 'plt.hist', (['y_train'], {'density': '(True)', 'color': '"""r"""', 'alpha': '(0.4)', 'label': '"""train"""'}), "(y_train, density=True, color='r', alpha=0.4, label='train')\n", (2101, 2161), True, 'import matplotlib.pyplot as plt\n'), ((2158, 2224), 'matplotlib.pyplot.hist', 'plt.hist', (['y_test'], {'density': '(True)', 'color': '"""b"""', 'alpha': '(0.4)', 'label': '"""test"""'}), "(y_test, density=True, color='b', alpha=0.4, label='test')\n", (2166, 2224), True, 'import matplotlib.pyplot as plt\n'), ((2262, 2274), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2272, 2274), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2293), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2291, 2293), True, 'import matplotlib.pyplot as plt\n'), ((2294, 2350), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../build/plots/comparisonY_train_test.pdf"""'], {}), "('../build/plots/comparisonY_train_test.pdf')\n", (2305, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2360), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2358, 2360), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2426), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../build/preprocessed/bow_X_train.txt"""'], {}), "('../build/preprocessed/bow_X_train.txt')\n", (2385, 2426), True, 'import numpy as np\n'), ((2698, 2726), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 20)'}), '(figsize=(24, 20))\n', (2708, 2726), True, 'import matplotlib.pyplot as plt\n'), ((2727, 2787), 'matplotlib.pyplot.bar', 'plt.bar', (['weights', 'X_fake'], {'alpha': '(0.8)', 'label': '"""fake"""', 'color': '"""r"""'}), "(weights, X_fake, alpha=0.8, label='fake', color='r')\n", (2734, 2787), True, 'import matplotlib.pyplot as plt\n'), ((2788, 2848), 'matplotlib.pyplot.bar', 'plt.bar', (['weights', 'X_real'], {'alpha': '(0.4)', 'label': '"""real"""', 'color': '"""b"""'}), "(weights, X_real, alpha=0.4, label='real', color='b')\n", (2795, 2848), True, 'import matplotlib.pyplot as plt\n'), ((2905, 2917), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2915, 2917), True, 'import matplotlib.pyplot as plt\n'), ((2918, 2936), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2934, 2936), True, 'import matplotlib.pyplot as plt\n'), ((2937, 2992), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../build/plots/comparisonX_fake_real.pdf"""'], {}), "('../build/plots/comparisonX_fake_real.pdf')\n", (2948, 2992), True, 'import matplotlib.pyplot as plt\n'), ((2993, 3002), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3000, 3002), True, 'import matplotlib.pyplot as plt\n'), ((3012, 3075), 'keras.models.load_model', 'load_model', (['"""../model/best_Hyperopt_NN_bow_regularization.hdf5"""'], {}), "('../model/best_Hyperopt_NN_bow_regularization.hdf5')\n", (3022, 3075), False, 'from keras.models import load_model\n'), ((3076, 3183), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': '"""../build/plots/opt_model_bow.pdf"""', 'show_shapes': '(True)', 'show_layer_names': '(True)'}), "(model, to_file='../build/plots/opt_model_bow.pdf', show_shapes=\n True, show_layer_names=True)\n", (3086, 3183), False, 'from keras.utils import plot_model\n'), ((3191, 3263), 'ann_visualizer.visualize.ann_viz', 'ann_viz', (['model'], {'title': '"""BoW-DNN"""', 'filename': '"""../build/plots/bow_dnn_graph"""'}), "(model, title='BoW-DNN', filename='../build/plots/bow_dnn_graph')\n", (3198, 3263), False, 'from ann_visualizer.visualize import ann_viz\n'), ((703, 750), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (713, 750), True, 'import matplotlib.pyplot as plt\n'), ((755, 770), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (763, 770), True, 'import matplotlib.pyplot as plt\n'), ((775, 796), 'matplotlib.pyplot.margins', 'plt.margins', ([], {'x': '(0)', 'y': '(0)'}), '(x=0, y=0)\n', (786, 796), True, 'import matplotlib.pyplot as plt\n'), ((801, 854), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../build/plots/' + s + '_wordcloud.pdf')"], {}), "('../build/plots/' + s + '_wordcloud.pdf')\n", (812, 854), True, 'import matplotlib.pyplot as plt\n'), ((855, 864), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (862, 864), True, 'import matplotlib.pyplot as plt\n'), ((1302, 1325), 'numpy.sum', 'np.sum', (['X_train'], {'axis': '(0)'}), '(X_train, axis=0)\n', (1308, 1325), True, 'import numpy as np\n'), ((1500, 1522), 'numpy.sum', 'np.sum', (['X_test'], {'axis': '(0)'}), '(X_test, axis=0)\n', (1506, 1522), True, 'import numpy as np\n'), ((2609, 2631), 'numpy.sum', 'np.sum', (['X_fake'], {'axis': '(0)'}), '(X_fake, axis=0)\n', (2615, 2631), True, 'import numpy as np\n'), ((2664, 2686), 'numpy.sum', 'np.sum', (['X_real'], {'axis': '(0)'}), '(X_real, axis=0)\n', (2670, 2686), True, 'import numpy as np\n'), ((372, 407), 'PIL.Image.open', 'Image.open', (["('../data/pictures/' + t)"], {}), "('../data/pictures/' + t)\n", (382, 407), False, 'from PIL import Image\n'), ((451, 494), 'pathlib.Path', 'Path', (["('../build/preprocessed/' + s + '.csv')"], {}), "('../build/preprocessed/' + s + '.csv')\n", (455, 494), False, 'from pathlib import Path\n'), ((519, 590), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""black"""', 'width': '(1920)', 'height': '(1080)', 'mask': 'mask'}), "(background_color='black', width=1920, height=1080, mask=mask)\n", (528, 590), False, 'from wordcloud import WordCloud\n')] |
import argparse
import itertools
import pprint
import progressbar
import random
import requests
import time
import cv2
import numpy as np
class APIError(Exception):
"""An API Error Exception"""
def __init__(self, status):
self.status = status
def __str__(self):
return "APIError: {}".format(self.status)
class FirenodejsAPI:
def __init__(self, args):
self._url = args.url
self._fake_move = args.fakemove
if args.tv is not None:
self.position({'sys':{'tv': args.tv}})
if args.mv is not None:
self.position({'sys':{'mv': args.mv}})
if not self._fake_move:
response = self.position(request={'sys':''})
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(response)
def camera(self, src='video0'):
'''Get image from firenodejs REST API.'''
resp = requests.get(self._url + '/camera/' + src + '/image.jpg')
if resp.status_code != 200:
raise APIError('GET /camera/{}/image.jpg {}'.format(src, resp.status_code))
return cv2.imdecode(np.frombuffer(resp.content, np.uint8), -1)
def position(self, request={}):
if request:
if self._fake_move:
print(request)
return
resp = requests.post(self._url + '/firestep', json=request)
if resp.status_code != 200:
raise APIError('POST {}/firestep json={} -> {}'.format(self._url, request, resp.status_code))
#time.sleep(0.01)
return resp.json()
#resp = requests.get('http://10.0.0.10:8080/images/default/image.jpg')
#resp = requests.get('http://10.0.0.10:8080/firestep/model')
#if resp.status_code != 200:
# raise APIError('GET /firestep/model/ {}'.format(resp.status_code))
#print(resp.content)
#for item in resp.json():
# print('{}: {}'.format(item, resp.json()[item]))
def set_z(api, z):
api.position({'mov': {'z': z}})
def move_to_xy(api, x, y):
api.position({'mov': {'x': x, 'y': y, 'lpp': False}})
def move_to_xyz(api, x, y, z):
api.position({'mov': {'x': x, 'y': y, 'z': z, 'lpp': False}})
def draw_rectangle(api, x0, y0, x1, y1, *, fill=False):
# outline
move_to_xy(api, x0, y0)
set_z(api, -10)
move_to_xy(api, x0, y1)
move_to_xy(api, x1, y1)
move_to_xy(api, x1, y0)
move_to_xy(api, x0, y0)
# fill
if fill:
dx = 1
move_to_xy(api, min(x0, x1), y0)
for x in range(min(x0, x1), max(x0, x1)+dx, dx):
move_to_xy(api, x, y0)
move_to_xy(api, x, y1)
set_z(api, 0)
def draw_chessboard(api, args):
api.position(request={'hom':''})
chessboard(api, (0, 0), args.size)
def chessboard(center, size):
'''Draws a 8x8 chessboard.'''
a = size / 8
x0 = int(floor(center[0] - size / 2))
y0 = int(floor(center[1] - size / 2))
x1 = int(ceil(center[0] + size / 2))
y1 = int(ceil(center[1] + size / 2))
draw_rectangle(x0, y0, x1, y1)
for x, y in itertools.product(range(-4, 4), range(-4, 4)):
if (x + y) % 2 != 0:
continue
draw_rectangle(x * a, y * a, (x + 1) * a, (y + 1) * a, fill=True)
def draw_bitmap(api, args):
api.position(request={'hom':''})
img = cv2.imread(args.image, cv2.IMREAD_GRAYSCALE)
if args.hflip:
img = img[:, ::-1]
if args.dots:
draw_bitmap_dots(api, img, args)
if args.lines:
draw_bitmap_lines(api, img, args)
def draw_bitmap_dots(api, img, args):
height, width = img.shape[:2]
dots = bitmap_2_dots(img, threshold=args.threshold)
x0, y0 = tuple(map(float, args.offset.split(':')))
if args.center:
x0 = x0 - width / args.dpmm / 2
y0 = y0 - height / args.dpmm / 2
# apply scale and offset
dots = list(((x / args.dpmm + x0, y / args.dpmm + y0) for (x, y) in dots))
print('Drawing bitmap: {}x{}px, {}x{}mm, offset: {}:{}mm'.format(
width, height, width / args.dpmm, height /args.dpmm, x0, y0
))
print('# dots:', len(dots))
time.sleep(2)
if args.random:
random.shuffle(dots)
widgets=[
' [', progressbar.Timer(), '] ',
progressbar.Bar(),
' (', progressbar.ETA(), ') ',
]
bar = progressbar.ProgressBar(max_value=len(dots), widgets=widgets).start()
draw_dots(api, dots, z_up=args.z_up, z_down=args.z_down, cback=bar.update)
bar.finish()
def bitmap_2_dots(img, *, threshold=128):
"""Convert bitmap image into a generator of point coordinates."""
assert len(img.shape) == 2
h, w = img.shape
for r, c in itertools.product(range(h), range(w)):
if img[r, c] < threshold:
yield (c, r)
def draw_dots(api, dots, *, z_up, z_down, cback=None):
# make a separate move between dots if max(abs(px-x), abs(py-y)) is greater than threshold
px = 0
py = 0
adj_threshold = 4 # [mm]
for i, (x, y) in enumerate(dots):
if max(abs(px - x), abs(py - y)) > adj_threshold:
move_to_xyz(api, x, y, z_up)
move_to_xyz(api, x, y, z_down)
move_to_xyz(api, x, y, z_up)
px = x
py = y
if cback is not None:
cback(i)
def draw_bitmap_lines(api, img, args):
height, width = img.shape[:2]
lines = bitmap_2_lines(img, threshold=args.threshold)
off_x, off_y = tuple(map(float, args.offset.split(':')))
if args.center:
off_x = off_x - width / args.dpmm / 2
off_y = off_y - height / args.dpmm / 2
print('Drawing bitmap: {}x{}px, {}x{}mm, offset: {}:{}mm'.format(
width, height, width / args.dpmm, height /args.dpmm, off_x, off_y
))
time.sleep(2)
# apply scale and offset
lines = list((
((x0 / args.dpmm + off_x, y0 / args.dpmm + off_y), (x1 / args.dpmm + off_x, y1 / args.dpmm + off_y))
for ((x0, y0), (x1, y1)) in lines
))
if args.random:
random.shuffle(lines)
widgets=[
' [', progressbar.Timer(), '] ',
progressbar.Bar(),
' (', progressbar.AdaptiveETA(), ') ',
]
bar = progressbar.ProgressBar(max_value=len(lines), widgets=widgets).start()
draw_lines(api, lines, z_up=args.z_up, z_down=args.z_down, cback=bar.update)
bar.finish()
def bitmap_2_lines(img, *, threshold=128):
"""Convert a bitmap to a set of horizontal lines."""
assert len(img.shape) == 2
h, w = img.shape
for r in range(h):
begin = None
end = None
for c in range(w):
if img[r, c] < threshold:
if begin is None:
begin = c
elif end is not None:
yield ((begin, r), (end - 1, r))
begin = None
end = None
if img[r, c] >= threshold and begin is not None and end is None:
end = c
if begin is not None and end is not None:
yield ((begin, r), (end - 1, r))
def draw_lines(api, lines, *, z_up, z_down, cback=None):
set_z(api, z_up)
for i, ((x0, y0), (x1, y1)) in enumerate(lines):
api.position({'mov': {'x': x0, 'y': y0}})
set_z(api, z_down)
api.position({'mov': {'x': x1, 'y': y1}})
set_z(api, z_up)
if cback is not None:
cback(i)
parser = argparse.ArgumentParser(description='Tool for drawing via firenodejs', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers()
parser.add_argument('--url', default='http://10.0.0.10:8080', help='URL of firenodejs')
parser.add_argument('--fakemove', action='store_true', help='Fake movement')
parser.add_argument('--z-up', type=float, default=15, help='Retract tip height')
parser.add_argument('--z-down', type=float, default=-10, help='Draw tip height')
parser.add_argument('--tv', type=float, default=None, help='Set seconds to reach maximum velocity')
parser.add_argument('--mv', type=float, default=None, help='Set maximum velocity (pulses/second)')
parser_bitmap = subparsers.add_parser('bitmap', help='Bitmap drawing')
parser_bitmap.add_argument('--image', help='Path to image', required=True)
parser_bitmap.add_argument('--dots', help='Draw using dots', action='store_true')
parser_bitmap.add_argument('--lines', help='Draw using lines', action='store_true')
parser_bitmap.add_argument('--hflip', help='Flip the image horizontally (around vertical axis)', action='store_true')
parser_bitmap.add_argument('--threshold', help='Threshold for binarizing an image', default=128, type=int)
parser_bitmap.add_argument('--dpmm', type=float, help='Dots per mm', default=1)
parser_bitmap.add_argument('--offset', help='Offset the image [mm]', default='0:0')
parser_bitmap.add_argument('--center', help='Center image around 0:0', action='store_true')
parser_bitmap.add_argument('--random', help='Draw the dots in a random order', action='store_true')
parser_bitmap.set_defaults(func=draw_bitmap)
parser_chess = subparsers.add_parser('chess', help='Chessboard drawing')
parser_chess.add_argument('--size', help='Width of the chessboard', default=80)
parser_chess.set_defaults(func=draw_chessboard)
args = parser.parse_args()
api = FirenodejsAPI(args)
if hasattr(args, 'func'):
args.func(api, args)
#feed = 'video0'
#print('Press ESC to end')
#while True:
# image = api.camera(src=feed)
# cv2.imshow(feed, image)
# k = cv2.waitKey(1)
# if k == 27:
# break
| [
"argparse.ArgumentParser",
"random.shuffle",
"numpy.frombuffer",
"progressbar.Bar",
"progressbar.ETA",
"time.sleep",
"cv2.imread",
"progressbar.AdaptiveETA",
"pprint.PrettyPrinter",
"requests.get",
"progressbar.Timer",
"requests.post"
] | [((7304, 7434), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tool for drawing via firenodejs"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Tool for drawing via firenodejs',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (7327, 7434), False, 'import argparse\n'), ((3276, 3320), 'cv2.imread', 'cv2.imread', (['args.image', 'cv2.IMREAD_GRAYSCALE'], {}), '(args.image, cv2.IMREAD_GRAYSCALE)\n', (3286, 3320), False, 'import cv2\n'), ((4064, 4077), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4074, 4077), False, 'import time\n'), ((5673, 5686), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5683, 5686), False, 'import time\n'), ((901, 958), 'requests.get', 'requests.get', (["(self._url + '/camera/' + src + '/image.jpg')"], {}), "(self._url + '/camera/' + src + '/image.jpg')\n", (913, 958), False, 'import requests\n'), ((4107, 4127), 'random.shuffle', 'random.shuffle', (['dots'], {}), '(dots)\n', (4121, 4127), False, 'import random\n'), ((4157, 4176), 'progressbar.Timer', 'progressbar.Timer', ([], {}), '()\n', (4174, 4176), False, 'import progressbar\n'), ((4192, 4209), 'progressbar.Bar', 'progressbar.Bar', ([], {}), '()\n', (4207, 4209), False, 'import progressbar\n'), ((4225, 4242), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (4240, 4242), False, 'import progressbar\n'), ((5923, 5944), 'random.shuffle', 'random.shuffle', (['lines'], {}), '(lines)\n', (5937, 5944), False, 'import random\n'), ((5974, 5993), 'progressbar.Timer', 'progressbar.Timer', ([], {}), '()\n', (5991, 5993), False, 'import progressbar\n'), ((6009, 6026), 'progressbar.Bar', 'progressbar.Bar', ([], {}), '()\n', (6024, 6026), False, 'import progressbar\n'), ((6042, 6067), 'progressbar.AdaptiveETA', 'progressbar.AdaptiveETA', ([], {}), '()\n', (6065, 6067), False, 'import progressbar\n'), ((731, 761), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (751, 761), False, 'import pprint\n'), ((1111, 1148), 'numpy.frombuffer', 'np.frombuffer', (['resp.content', 'np.uint8'], {}), '(resp.content, np.uint8)\n', (1124, 1148), True, 'import numpy as np\n'), ((1317, 1369), 'requests.post', 'requests.post', (["(self._url + '/firestep')"], {'json': 'request'}), "(self._url + '/firestep', json=request)\n", (1330, 1369), False, 'import requests\n')] |
import numpy as np
import torch
import os
import pickle
from numba import jit
import torch.nn as nn
from .ucb import UCB
from .utils import Model
@jit(nopython=True)
def calc_confidence_multiplier(confidence_scaling_factor, approximator_dim, iteration, bound_features,
reg_factor, delta):
return confidence_scaling_factor * np.sqrt(approximator_dim * np.log(
1 + iteration * bound_features ** 2 / (reg_factor * approximator_dim)) + 2 * np.log(1 / delta))
class NeuralUCB(UCB):
"""Neural UCB.
"""
def __init__(self,
bandit,
hidden_size=20,
n_layers=2,
reg_factor=1.0,
delta=0.01,
confidence_scaling_factor=-1.0,
training_window=100,
p=0.0,
learning_rate=0.01,
epochs=1,
train_every=1,
save_path=None,
load_from=None,
guide_for=0
):
self.iteration = 0
self.save_path = save_path
self.rhs = np.sqrt(hidden_size)
if load_from is None:
# hidden size of the NN layers
self.hidden_size = hidden_size
# number of layers
self.n_layers = n_layers
# number of rewards in the training buffer
self.training_window = training_window
# NN parameters
self.learning_rate = learning_rate
self.epochs = epochs
self.device = 'cpu'#torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# dropout rate
self.p = p
# neural network
self.model = Model(input_size=bandit.n_features,
hidden_size=self.hidden_size,
n_layers=self.n_layers,
p=self.p
).to(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
# maximum L2 norm for the features across all arms and all rounds
self.bound_features = np.max(np.linalg.norm(bandit.features, ord=2, axis=-1))
super().__init__(bandit,
reg_factor=reg_factor,
confidence_scaling_factor=confidence_scaling_factor,
delta=delta,
train_every=train_every,
guide_for=guide_for
)
else:
raise NotImplementedError
def save(self, postfix=''):
return
@property
def approximator_dim(self):
"""Sum of the dimensions of all trainable layers in the network.
"""
return sum(w.numel() for w in self.model.parameters() if w.requires_grad)
def update_output_gradient(self):
"""Get gradient of network prediction w.r.t network weights.
Gradient for each arm.
"""
for a in self.bandit.arms:
x = torch.FloatTensor(
self.bandit.features[self.iteration % self.bandit.T, a].reshape(1, -1)
).to(self.device)
self.model.zero_grad()
y = self.model(x)
y.backward()
self.grad_approx[a] = torch.cat(
[w.grad.detach().cpu().flatten() / self.rhs for w in self.model.parameters() if w.requires_grad]
).numpy()
def evaluate_output_gradient(self, features):
"""For linear approximators, simply returns the features.
"""
for a in self.bandit.arms:
x = torch.FloatTensor(
features[0, a].reshape(1, -1)
).to(self.device)
self.model.zero_grad()
y = self.model(x)
y.backward()
self.grad_approx[a] = torch.cat(
[w.grad.detach().cpu().flatten() / self.rhs for w in self.model.parameters() if w.requires_grad]
).numpy()
def reset(self):
"""Return the internal estimates.
Initialize SINGLE PREDICTOR NN.
REMEMBER TO USE ONLY THE FIRST A_INV
"""
self.reset_upper_confidence_bounds()
self.reset_actions()
self.reset_A_inv()
self.reset_grad_approx()
@property
def confidence_multiplier(self):
"""Confidence interval multiplier.
"""
return calc_confidence_multiplier(self.confidence_scaling_factor, self.approximator_dim, self.iteration,
self.bound_features, self.reg_factor, self.delta)
def train(self):
"""
Train neural approximator.
"""
iterations_so_far = range(np.max([0, (self.iteration % self.bandit.T) - self.training_window]), (self.iteration % self.bandit.T)+1)
actions_so_far = self.actions[np.max([0, (self.iteration % self.bandit.T) - self.training_window]):(self.iteration % self.bandit.T)+1]
x_train = torch.FloatTensor(self.bandit.features[iterations_so_far, actions_so_far]).to(self.device)
y_train = torch.FloatTensor(self.bandit.rewards[iterations_so_far, actions_so_far]).squeeze().to(self.device)
# train mode
self.model.train()
for _ in range(self.epochs):
y_pred = self.model.forward(x_train).squeeze()
loss = nn.MSELoss()(y_train, y_pred)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def predict(self):
"""Predict reward.
"""
# eval mode
self.model.eval()
self.mu_hat = self.model.forward(
torch.FloatTensor(self.bandit.features[self.iteration % self.bandit.T]).to(self.device)
).detach().squeeze().cpu().numpy()
def evaluate(self, features):
# eval mode
self.model.eval()
self.mu_hat = self.model.forward(
torch.FloatTensor(features[0]).to(self.device)
).detach().squeeze().cpu().numpy() | [
"torch.nn.MSELoss",
"numpy.log",
"torch.FloatTensor",
"numpy.max",
"numpy.linalg.norm",
"numba.jit",
"numpy.sqrt"
] | [((148, 166), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (151, 166), False, 'from numba import jit\n'), ((1126, 1146), 'numpy.sqrt', 'np.sqrt', (['hidden_size'], {}), '(hidden_size)\n', (1133, 1146), True, 'import numpy as np\n'), ((4820, 4886), 'numpy.max', 'np.max', (['[0, self.iteration % self.bandit.T - self.training_window]'], {}), '([0, self.iteration % self.bandit.T - self.training_window])\n', (4826, 4886), True, 'import numpy as np\n'), ((2204, 2251), 'numpy.linalg.norm', 'np.linalg.norm', (['bandit.features'], {'ord': '(2)', 'axis': '(-1)'}), '(bandit.features, ord=2, axis=-1)\n', (2218, 2251), True, 'import numpy as np\n'), ((4964, 5030), 'numpy.max', 'np.max', (['[0, self.iteration % self.bandit.T - self.training_window]'], {}), '([0, self.iteration % self.bandit.T - self.training_window])\n', (4970, 5030), True, 'import numpy as np\n'), ((5088, 5162), 'torch.FloatTensor', 'torch.FloatTensor', (['self.bandit.features[iterations_so_far, actions_so_far]'], {}), '(self.bandit.features[iterations_so_far, actions_so_far])\n', (5105, 5162), False, 'import torch\n'), ((5461, 5473), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5471, 5473), True, 'import torch.nn as nn\n'), ((387, 464), 'numpy.log', 'np.log', (['(1 + iteration * bound_features ** 2 / (reg_factor * approximator_dim))'], {}), '(1 + iteration * bound_features ** 2 / (reg_factor * approximator_dim))\n', (393, 464), True, 'import numpy as np\n'), ((480, 497), 'numpy.log', 'np.log', (['(1 / delta)'], {}), '(1 / delta)\n', (486, 497), True, 'import numpy as np\n'), ((5197, 5270), 'torch.FloatTensor', 'torch.FloatTensor', (['self.bandit.rewards[iterations_so_far, actions_so_far]'], {}), '(self.bandit.rewards[iterations_so_far, actions_so_far])\n', (5214, 5270), False, 'import torch\n'), ((5755, 5826), 'torch.FloatTensor', 'torch.FloatTensor', (['self.bandit.features[self.iteration % self.bandit.T]'], {}), '(self.bandit.features[self.iteration % self.bandit.T])\n', (5772, 5826), False, 'import torch\n'), ((6022, 6052), 'torch.FloatTensor', 'torch.FloatTensor', (['features[0]'], {}), '(features[0])\n', (6039, 6052), False, 'import torch\n')] |
import keras
import numpy as np
import pandas as pd
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPool2D
train_data= pd.read_csv(r"train.csv")
X_test= pd.read_csv(r"test.csv")
train_data.shape, X_test.shape
y_train= train_data['label'].values
X_train = train_data.drop(labels=['label'], axis=1)
X_train.shape, X_test.shape
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
num_classes= 10
y_train = keras.utils.to_categorical(y_train, num_classes)
X_train /= 255
X_test /= 255
X_train = X_train.values.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.values.reshape(X_test.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=3, activation="relu", padding='same',input_shape=input_shape))
model.add(MaxPool2D())
model.add(Conv2D(filters=64, kernel_size=3, activation="relu", padding='same'))
model.add(MaxPool2D())
model.add(Conv2D(filters=128, kernel_size=3, activation="relu", padding='same'))
model.add(MaxPool2D())
model.add(Flatten())
model.add(Dense(units=256, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(units=num_classes, activation="softmax"))
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
history = model.fit(
X_train, y_train,
batch_size=128,
epochs=20,
verbose=1,
)
import cv2
img = cv2.imread(r'img.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_arr = np.array(gray)
#print(img_arr)
flattened_array = img_arr.flatten()
flattened_array.shape
flattened_array = np.reshape(img_arr[0], 28, 28, 1)
| [
"pandas.read_csv",
"cv2.cvtColor",
"keras.layers.Dropout",
"keras.layers.MaxPool2D",
"keras.layers.Flatten",
"keras.optimizers.Adam",
"cv2.imread",
"keras.layers.Dense",
"numpy.array",
"numpy.reshape",
"keras.layers.Conv2D",
"keras.models.Sequential",
"keras.utils.to_categorical"
] | [((228, 252), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (239, 252), True, 'import pandas as pd\n'), ((262, 285), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (273, 285), True, 'import pandas as pd\n'), ((534, 582), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (560, 582), False, 'import keras\n'), ((770, 782), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (780, 782), False, 'from keras.models import Sequential\n'), ((1512, 1533), 'cv2.imread', 'cv2.imread', (['"""img.png"""'], {}), "('img.png')\n", (1522, 1533), False, 'import cv2\n'), ((1542, 1579), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1554, 1579), False, 'import cv2\n'), ((1590, 1604), 'numpy.array', 'np.array', (['gray'], {}), '(gray)\n', (1598, 1604), True, 'import numpy as np\n'), ((1697, 1730), 'numpy.reshape', 'np.reshape', (['img_arr[0]', '(28)', '(28)', '(1)'], {}), '(img_arr[0], 28, 28, 1)\n', (1707, 1730), True, 'import numpy as np\n'), ((793, 890), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""', 'input_shape': 'input_shape'}), "(filters=32, kernel_size=3, activation='relu', padding='same',\n input_shape=input_shape)\n", (799, 890), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((897, 908), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (906, 908), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((920, 988), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), "(filters=64, kernel_size=3, activation='relu', padding='same')\n", (926, 988), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((1000, 1011), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (1009, 1011), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((1023, 1092), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), "(filters=128, kernel_size=3, activation='relu', padding='same')\n", (1029, 1092), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((1104, 1115), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (1113, 1115), False, 'from keras.layers import Conv2D, MaxPool2D\n'), ((1127, 1136), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1134, 1136), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1148, 1183), 'keras.layers.Dense', 'Dense', ([], {'units': '(256)', 'activation': '"""relu"""'}), "(units=256, activation='relu')\n", (1153, 1183), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1195, 1207), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1202, 1207), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1219, 1265), 'keras.layers.Dense', 'Dense', ([], {'units': 'num_classes', 'activation': '"""softmax"""'}), "(units=num_classes, activation='softmax')\n", (1224, 1265), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1345, 1368), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), '()\n', (1366, 1368), False, 'import keras\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2018 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Routines used in the analysis of shapiro steps experiments.
"""
import numpy as np
def shapiro_step(frequency):
""" Compute the amplitude of a Shapiro step at a given frequency.
"""
return 6.626e-34*frequency/(2*1.6e-19)
def normalize_db_power(power, norm_power):
"""Normalize a power in dB by a power in dB.
Because the quantities are expressed in dB, we need to first convert to
linearize power.
"""
lin_power = np.power(10, power/10)
lin_norm_power = np.power(10, norm_power/10)
return 10*np.log10(lin_power/lin_norm_power)
| [
"numpy.power",
"numpy.log10"
] | [((838, 862), 'numpy.power', 'np.power', (['(10)', '(power / 10)'], {}), '(10, power / 10)\n', (846, 862), True, 'import numpy as np\n'), ((882, 911), 'numpy.power', 'np.power', (['(10)', '(norm_power / 10)'], {}), '(10, norm_power / 10)\n', (890, 911), True, 'import numpy as np\n'), ((924, 960), 'numpy.log10', 'np.log10', (['(lin_power / lin_norm_power)'], {}), '(lin_power / lin_norm_power)\n', (932, 960), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import numpy as np
import yaml
import pickle
import argparse
def init_color_space(color_path):
# type: (str) -> None
"""
Initialization of color space from yaml or pickle.txt file
:param str color_path: path to file containing the accepted colors
:return: None
"""
color_space = np.zeros((256, 256, 256), dtype=np.uint8)
if color_path.endswith('.yaml'):
with open(color_path, 'r') as stream:
try:
color_values = yaml.safe_load(stream)
except yaml.YAMLError as exc:
# TODO: what now??? Handle the error?
pass
# pickle-file is stored as '.txt'
elif color_path.endswith('.txt'):
try:
with open(color_path, 'rb') as f:
color_values = pickle.load(f)
except pickle.PickleError as exc:
pass
# compatibility with colorpicker
if 'color_values' in color_values.keys():
color_values = color_values['color_values']['greenField']
length = len(color_values['red'])
if length == len(color_values['green']) and \
length == len(color_values['blue']):
# setting colors from yaml file to True in color space
for x in range(length):
color_space[color_values['blue'][x],
color_values['green'][x],
color_values['red'][x]] = 1
print("Imported color space")
return color_space
def compare(positive_color_space, negative_color_space):
mask = np.invert(np.array(negative_color_space, dtype=np.bool))
binary_color_space = np.logical_and(mask, positive_color_space)
return np.array(binary_color_space, dtype=np.uint8)
def generate_color_lists(color_space):
color_space_positions = np.where(color_space == 1)
color_lists = ( color_space_positions[0].tolist(),
color_space_positions[1].tolist(),
color_space_positions[2].tolist())
return color_lists
def save(filename, color_space):
red, green, blue = generate_color_lists(color_space)
output_type = "negative_filtered"
data = dict(
red = red,
green = green,
blue = blue
)
filename = '{}_{}.txt'.format(filename, output_type)
with open(filename, 'wb') as outfile:
pickle.dump(data, outfile, protocol=2)
# stores data of colorspace in file as pickle for efficient loading (yaml is too slow)
print("Output saved to '{}'.".format(filename))
def run(positive_color_space_path, negative_color_space_path, output_path):
print("Load positive color space '{}'".format(positive_color_space_path))
positive_color_space = init_color_space(positive_color_space_path)
print(np.count_nonzero(positive_color_space))
print("Load negative color space '{}'".format(negative_color_space_path))
negative_color_space = init_color_space(negative_color_space_path)
print(np.count_nonzero(negative_color_space))
print("Filter color spaces")
filtered_color_space = compare(positive_color_space, negative_color_space)
print(np.count_nonzero(filtered_color_space))
print("Finished filtering")
print("Save color space")
save(output_path, filtered_color_space)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--positive", help="Color space where the negative color space is subtracted from.")
parser.add_argument("-n", "--negative", help="Color space which is subtracted from the positive color space.")
parser.add_argument("-o", "--output", help="Saves the output in a Pickle file.")
args = parser.parse_args()
np.warnings.filterwarnings('ignore')
if args.positive and os.path.exists(args.positive):
if args.negative and os.path.exists(args.negative):
if args.output and os.path.exists(args.output):
run(args.positive, args.negative, args.output)
else:
print("Output path incorrect!")
else:
print("Negative color space path incorrect!")
else:
print("Positive color space path incorrect!")
| [
"pickle.dump",
"numpy.count_nonzero",
"argparse.ArgumentParser",
"numpy.logical_and",
"numpy.zeros",
"os.path.exists",
"numpy.where",
"numpy.array",
"yaml.safe_load",
"pickle.load",
"numpy.warnings.filterwarnings"
] | [((343, 384), 'numpy.zeros', 'np.zeros', (['(256, 256, 256)'], {'dtype': 'np.uint8'}), '((256, 256, 256), dtype=np.uint8)\n', (351, 384), True, 'import numpy as np\n'), ((1655, 1697), 'numpy.logical_and', 'np.logical_and', (['mask', 'positive_color_space'], {}), '(mask, positive_color_space)\n', (1669, 1697), True, 'import numpy as np\n'), ((1709, 1753), 'numpy.array', 'np.array', (['binary_color_space'], {'dtype': 'np.uint8'}), '(binary_color_space, dtype=np.uint8)\n', (1717, 1753), True, 'import numpy as np\n'), ((1822, 1848), 'numpy.where', 'np.where', (['(color_space == 1)'], {}), '(color_space == 1)\n', (1830, 1848), True, 'import numpy as np\n'), ((3348, 3373), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3371, 3373), False, 'import argparse\n'), ((3724, 3760), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (3750, 3760), True, 'import numpy as np\n'), ((1583, 1628), 'numpy.array', 'np.array', (['negative_color_space'], {'dtype': 'np.bool'}), '(negative_color_space, dtype=np.bool)\n', (1591, 1628), True, 'import numpy as np\n'), ((2372, 2410), 'pickle.dump', 'pickle.dump', (['data', 'outfile'], {'protocol': '(2)'}), '(data, outfile, protocol=2)\n', (2383, 2410), False, 'import pickle\n'), ((2800, 2838), 'numpy.count_nonzero', 'np.count_nonzero', (['positive_color_space'], {}), '(positive_color_space)\n', (2816, 2838), True, 'import numpy as np\n'), ((2999, 3037), 'numpy.count_nonzero', 'np.count_nonzero', (['negative_color_space'], {}), '(negative_color_space)\n', (3015, 3037), True, 'import numpy as np\n'), ((3161, 3199), 'numpy.count_nonzero', 'np.count_nonzero', (['filtered_color_space'], {}), '(filtered_color_space)\n', (3177, 3199), True, 'import numpy as np\n'), ((3787, 3816), 'os.path.exists', 'os.path.exists', (['args.positive'], {}), '(args.positive)\n', (3801, 3816), False, 'import os\n'), ((3847, 3876), 'os.path.exists', 'os.path.exists', (['args.negative'], {}), '(args.negative)\n', (3861, 3876), False, 'import os\n'), ((516, 538), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (530, 538), False, 'import yaml\n'), ((3909, 3936), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (3923, 3936), False, 'import os\n'), ((822, 836), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (833, 836), False, 'import pickle\n')] |
import pandas as pd
import numpy as np
import itertools
import shutil
from inferelator_ng.default import DEFAULT_METADATA_FOR_BATCH_CORRECTION
from inferelator_ng.default import DEFAULT_RANDOM_SEED
from inferelator_ng import utils
"""
This file is all preprocessing functions. All functions must take positional arguments expression_matrix and meta_data.
All other arguments must be keyword. All functions must return expression_matrix and meta_data (modified or unmodified).
Normalization functions take batch_factor_column [str] as a kwarg
Imputation functions take random_seed [int] and output_file [str] as a kwarg
Please note that there are a bunch of packages in here that aren't installed as part of the project dependencies
This is intentional; if you don't have these packages installed, don't try to use them
TODO: Put together a set of tests for this
"""
def normalize_expression_to_one(expression_matrix, meta_data, **kwargs):
"""
:param expression_matrix:
:param meta_data:
:param batch_factor_column:
:return expression_matrix, meta_data: pd.DataFrame, pd.DataFrame
"""
kwargs, batch_factor_column = process_normalize_args(**kwargs)
utils.Debug.vprint('Normalizing UMI counts per cell ... ')
# Get UMI counts for each cell
umi = expression_matrix.sum(axis=1)
# Divide each cell's raw count data by the total number of UMI counts for that cell
return expression_matrix.astype(float).divide(umi, axis=0), meta_data
def normalize_medians_for_batch(expression_matrix, meta_data, **kwargs):
"""
Calculate the median UMI count per cell for each batch. Transform all batches by dividing by a size correction
factor, so that all batches have the same median UMI count (which is the median batch median UMI count)
:param expression_matrix: pd.DataFrame
:param meta_data: pd.DataFrame
:param batch_factor_column: str
Which meta data column should be used to determine batches
:return expression_matrix, meta_data: pd.DataFrame, pd.DataFrame
"""
kwargs, batch_factor_column = process_normalize_args(**kwargs)
utils.Debug.vprint('Normalizing median counts between batches ... ')
# Get UMI counts for each cell
umi = expression_matrix.sum(axis=1)
# Create a new dataframe with the UMI counts and the factor to batch correct on
umi = pd.DataFrame({'umi': umi, batch_factor_column: meta_data[batch_factor_column]})
# Group and take the median UMI count for each batch
median_umi = umi.groupby(batch_factor_column).agg('median')
# Convert to a correction factor based on the median of the medians
median_umi = median_umi / median_umi['umi'].median()
umi = umi.join(median_umi, on=batch_factor_column, how="left", rsuffix="_mod")
# Apply the correction factor to all the data
return expression_matrix.divide(umi['umi_mod'], axis=0), meta_data
def normalize_sizes_within_batch(expression_matrix, meta_data, **kwargs):
"""
Calculate the median UMI count within each batch and then resize each sample so that each sample has the same total
UMI count
:param expression_matrix: pd.DataFrame
:param meta_data: pd.DataFrame
:param batch_factor_column: str
Which meta data column should be used to determine batches
:return expression_matrix, meta_data: pd.DataFrame, pd.DataFrame
"""
kwargs, batch_factor_column = process_normalize_args(**kwargs)
utils.Debug.vprint('Normalizing to median counts within batches ... ')
# Get UMI counts for each cell
umi = expression_matrix.sum(axis=1)
# Create a new dataframe with the UMI counts and the factor to batch correct on
umi = pd.DataFrame({'umi': umi, batch_factor_column: meta_data[batch_factor_column]})
# Group and take the median UMI count for each batch
median_umi = umi.groupby(batch_factor_column).agg('median')
# Convert to a correction factor based on the median of the medians
umi = umi.join(median_umi, on="Condition", how="left", rsuffix="_mod")
umi['umi_mod'] = umi['umi'] / umi['umi_mod']
# Apply the correction factor to all the data
return expression_matrix.divide(umi['umi_mod'], axis=0), meta_data
def normalize_multiBatchNorm(expression_matrix, meta_data, **kwargs):
"""
Normalize as multiBatchNorm from the R package scran
:param expression_matrix: pd.DataFrame
:param meta_data: pd.DataFrame
:param batch_factor_column: str
Which meta data column should be used to determine batches
:param minimum_mean: int
Minimum mean expression of a gene when considering if it should be included in the correction factor calc
:return expression_matrix, meta_data: pd.DataFrame, pd.DataFrame
"""
utils.Debug.vprint('Normalizing by multiBatchNorm ... ')
kwargs, batch_factor_column = process_normalize_args(**kwargs)
minimum_mean = kwargs.pop('minimum_mean', 50)
# Calculate size-corrected average gene expression for each batch
size_corrected_avg = pd.DataFrame(columns=expression_matrix.columns)
for batch in meta_data[batch_factor_column].unique().tolist():
batch_df = expression_matrix.loc[meta_data[batch_factor_column] == batch, :]
# Get UMI counts for each cell
umi = batch_df.sum(axis=1)
size_correction_factor = umi / umi.mean()
# Get the mean size-corrected count values for this batch
batch_df = batch_df.divide(size_correction_factor, axis=0).mean(axis=0).to_frame().transpose()
batch_df.index = pd.Index([batch])
# Append to the dataframe
size_corrected_avg = size_corrected_avg.append(batch_df)
# Calculate median ratios
inter_batch_coefficients = []
for b1, b2 in itertools.combinations_with_replacement(size_corrected_avg.index.tolist(), r=2):
# Get the mean size-corrected count values for this batch pair
b1_series, b2_series = size_corrected_avg.loc[b1, :], size_corrected_avg.loc[b2, :]
b1_sum, b2_sum = b1_series.sum(), b2_series.sum()
# calcAverage
combined_keep_index = ((b1_series / b1_sum + b2_series / b2_sum) / 2 * (b1_sum + b2_sum) / 2) > minimum_mean
coeff = (b2_series.loc[combined_keep_index] / b1_series.loc[combined_keep_index]).median()
# Keep track of the median ratios
inter_batch_coefficients.append((b1, b2, coeff))
inter_batch_coefficients.append((b2, b1, 1 / coeff))
inter_batch_coefficients = pd.DataFrame(inter_batch_coefficients, columns=["batch1", "batch2", "coeff"])
inter_batch_minimum = inter_batch_coefficients.loc[inter_batch_coefficients["coeff"].idxmin(), :]
min_batch = inter_batch_minimum["batch2"]
# Apply the correction factor to all the data batch-wise. Do this with numpy because pandas is a glacier.
normed_expression = np.ndarray((0, expression_matrix.shape[1]), dtype=np.dtype(float))
normed_meta = pd.DataFrame(columns=meta_data.columns)
for i, row in inter_batch_coefficients.loc[inter_batch_coefficients["batch2"] == min_batch, :].iterrows():
select_rows = meta_data[batch_factor_column] == row["batch1"]
umi = expression_matrix.loc[select_rows, :].sum(axis=1)
size_correction_factor = umi / umi.mean() / row["coeff"]
corrected_df = expression_matrix.loc[select_rows, :].divide(size_correction_factor, axis=0).values
normed_expression = np.vstack((normed_expression, corrected_df))
normed_meta = pd.concat([normed_meta, meta_data.loc[select_rows, :]])
return pd.DataFrame(normed_expression, index=normed_meta.index, columns=expression_matrix.columns), normed_meta
def impute_magic_expression(expression_matrix, meta_data, **kwargs):
"""
Use MAGIC (van Dijk et al Cell, 2018, 10.1016/j.cell.2018.05.061) to impute data
:param expression_matrix: pd.DataFrame
:param meta_data: pd.DataFrame
:return imputed, meta_data: pd.DataFrame, pd.DataFrame
"""
kwargs, random_seed, output_file = process_impute_args(**kwargs)
import magic
utils.Debug.vprint('Imputing data with MAGIC ... ')
imputed = pd.DataFrame(magic.MAGIC(random_state=random_seed, **kwargs).fit_transform(expression_matrix.values),
index=expression_matrix.index, columns=expression_matrix.columns)
if output_file is not None:
imputed.to_csv(output_file, sep="\t")
return imputed, meta_data
def impute_on_batches(expression_matrix, meta_data, **kwargs):
"""
Run imputation on separate batches
:param expression_matrix: pd.DataFrame
:param meta_data: pd.DataFrame
:param impute_method: func
An imputation function from inferelator_ng.single_cell
:param random_seed: int
Random seed to put into the imputation method
:param batch_factor_column: str
Which meta data column should be used to determine batches
:return expression_matrix, meta_data: pd.DataFrame, pd.DataFrame
"""
# Extract random_seed, batch_factor_column, and impute method for use. Extract and eat output_file.
kwargs, batch_factor_column = process_normalize_args(**kwargs)
kwargs, random_seed, _ = process_impute_args(**kwargs)
impute_method = kwargs.pop('impute_method', impute_magic_expression)
batches = meta_data[batch_factor_column].unique().tolist()
bc_expression = np.ndarray((0, expression_matrix.shape[1]), dtype=np.dtype(float))
bc_meta = pd.DataFrame(columns=meta_data.columns)
for batch in batches:
rows = meta_data[batch_factor_column] == batch
batch_corrected, _ = impute_method(expression_matrix.loc[rows, :], None, random_seed=random_seed, **kwargs)
bc_expression = np.vstack((bc_expression, batch_corrected))
bc_meta = pd.concat([bc_meta, meta_data.loc[rows, :]])
random_seed += 1
return pd.DataFrame(bc_expression, index=bc_meta.index, columns=expression_matrix.columns), bc_meta
def log10_data(expression_matrix, meta_data, **kwargs):
"""
Transform the expression data by adding one and then taking log10. Ignore any kwargs.
:param expression_matrix: pd.DataFrame
:param meta_data: pd.DataFrame
:return expression_matrix, meta_data: pd.DataFrame, pd.DataFrame
"""
utils.Debug.vprint('Logging data [log10+1] ... ')
return np.log10(expression_matrix + 1), meta_data
def log2_data(expression_matrix, meta_data, **kwargs):
"""
Transform the expression data by adding one and then taking log2. Ignore any kwargs.
:param expression_matrix: pd.DataFrame
:param meta_data: pd.DataFrame
:return expression_matrix, meta_data: pd.DataFrame, pd.DataFrame
"""
utils.Debug.vprint('Logging data [log2+1]... ')
return np.log2(expression_matrix + 1), meta_data
def ln_data(expression_matrix, meta_data, **kwargs):
"""
Transform the expression data by adding one and then taking ln. Ignore any kwargs.
:param expression_matrix: pd.DataFrame
:param meta_data: pd.DataFrame
:return expression_matrix, meta_data: pd.DataFrame, pd.DataFrame
"""
utils.Debug.vprint('Logging data [ln+1]... ')
return np.log1p(expression_matrix), meta_data
def filter_genes_for_var(expression_matrix, meta_data, **kwargs):
no_signal = (expression_matrix.max(axis=0) - expression_matrix.min(axis=0)) == 0
utils.Debug.vprint("Filtering {gn} genes [Var = 0]".format(gn=no_signal.sum()), level=1)
return expression_matrix.loc[:, ~no_signal], meta_data
def filter_genes_for_count(expression_matrix, meta_data, count_minimum=None, check_for_scaling=False):
expression_matrix, meta_data = filter_genes_for_var(expression_matrix, meta_data)
if count_minimum is None:
return expression_matrix, meta_data
else:
if check_for_scaling and (expression_matrix < 0).sum().sum() > 0:
raise ValueError("Negative values in the expression matrix. Count thresholding scaled data is unsupported.")
keep_genes = expression_matrix.sum(axis=0) >= (count_minimum * expression_matrix.shape[0])
utils.Debug.vprint("Filtering {gn} genes [Count]".format(gn=expression_matrix.shape[1] - keep_genes.sum()),
level=1)
return expression_matrix.loc[:, keep_genes], meta_data
def process_impute_args(**kwargs):
random_seed = kwargs.pop('random_seed', DEFAULT_RANDOM_SEED)
output_file = kwargs.pop('output_file', None)
return kwargs, random_seed, output_file
def process_normalize_args(**kwargs):
batch_factor_column = kwargs.pop('batch_factor_column', DEFAULT_METADATA_FOR_BATCH_CORRECTION)
return kwargs, batch_factor_column
| [
"pandas.DataFrame",
"magic.MAGIC",
"numpy.log2",
"numpy.dtype",
"pandas.Index",
"inferelator_ng.utils.Debug.vprint",
"numpy.log10",
"pandas.concat",
"numpy.vstack",
"numpy.log1p"
] | [((1189, 1247), 'inferelator_ng.utils.Debug.vprint', 'utils.Debug.vprint', (['"""Normalizing UMI counts per cell ... """'], {}), "('Normalizing UMI counts per cell ... ')\n", (1207, 1247), False, 'from inferelator_ng import utils\n'), ((2123, 2191), 'inferelator_ng.utils.Debug.vprint', 'utils.Debug.vprint', (['"""Normalizing median counts between batches ... """'], {}), "('Normalizing median counts between batches ... ')\n", (2141, 2191), False, 'from inferelator_ng import utils\n'), ((2363, 2442), 'pandas.DataFrame', 'pd.DataFrame', (["{'umi': umi, batch_factor_column: meta_data[batch_factor_column]}"], {}), "({'umi': umi, batch_factor_column: meta_data[batch_factor_column]})\n", (2375, 2442), True, 'import pandas as pd\n'), ((3450, 3520), 'inferelator_ng.utils.Debug.vprint', 'utils.Debug.vprint', (['"""Normalizing to median counts within batches ... """'], {}), "('Normalizing to median counts within batches ... ')\n", (3468, 3520), False, 'from inferelator_ng import utils\n'), ((3692, 3771), 'pandas.DataFrame', 'pd.DataFrame', (["{'umi': umi, batch_factor_column: meta_data[batch_factor_column]}"], {}), "({'umi': umi, batch_factor_column: meta_data[batch_factor_column]})\n", (3704, 3771), True, 'import pandas as pd\n'), ((4756, 4812), 'inferelator_ng.utils.Debug.vprint', 'utils.Debug.vprint', (['"""Normalizing by multiBatchNorm ... """'], {}), "('Normalizing by multiBatchNorm ... ')\n", (4774, 4812), False, 'from inferelator_ng import utils\n'), ((5026, 5073), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'expression_matrix.columns'}), '(columns=expression_matrix.columns)\n', (5038, 5073), True, 'import pandas as pd\n'), ((6481, 6558), 'pandas.DataFrame', 'pd.DataFrame', (['inter_batch_coefficients'], {'columns': "['batch1', 'batch2', 'coeff']"}), "(inter_batch_coefficients, columns=['batch1', 'batch2', 'coeff'])\n", (6493, 6558), True, 'import pandas as pd\n'), ((6928, 6967), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'meta_data.columns'}), '(columns=meta_data.columns)\n', (6940, 6967), True, 'import pandas as pd\n'), ((8055, 8106), 'inferelator_ng.utils.Debug.vprint', 'utils.Debug.vprint', (['"""Imputing data with MAGIC ... """'], {}), "('Imputing data with MAGIC ... ')\n", (8073, 8106), False, 'from inferelator_ng import utils\n'), ((9441, 9480), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'meta_data.columns'}), '(columns=meta_data.columns)\n', (9453, 9480), True, 'import pandas as pd\n'), ((10253, 10302), 'inferelator_ng.utils.Debug.vprint', 'utils.Debug.vprint', (['"""Logging data [log10+1] ... """'], {}), "('Logging data [log10+1] ... ')\n", (10271, 10302), False, 'from inferelator_ng import utils\n'), ((10670, 10717), 'inferelator_ng.utils.Debug.vprint', 'utils.Debug.vprint', (['"""Logging data [log2+1]... """'], {}), "('Logging data [log2+1]... ')\n", (10688, 10717), False, 'from inferelator_ng import utils\n'), ((11080, 11125), 'inferelator_ng.utils.Debug.vprint', 'utils.Debug.vprint', (['"""Logging data [ln+1]... """'], {}), "('Logging data [ln+1]... ')\n", (11098, 11125), False, 'from inferelator_ng import utils\n'), ((5546, 5563), 'pandas.Index', 'pd.Index', (['[batch]'], {}), '([batch])\n', (5554, 5563), True, 'import pandas as pd\n'), ((7414, 7458), 'numpy.vstack', 'np.vstack', (['(normed_expression, corrected_df)'], {}), '((normed_expression, corrected_df))\n', (7423, 7458), True, 'import numpy as np\n'), ((7481, 7536), 'pandas.concat', 'pd.concat', (['[normed_meta, meta_data.loc[select_rows, :]]'], {}), '([normed_meta, meta_data.loc[select_rows, :]])\n', (7490, 7536), True, 'import pandas as pd\n'), ((7549, 7645), 'pandas.DataFrame', 'pd.DataFrame', (['normed_expression'], {'index': 'normed_meta.index', 'columns': 'expression_matrix.columns'}), '(normed_expression, index=normed_meta.index, columns=\n expression_matrix.columns)\n', (7561, 7645), True, 'import pandas as pd\n'), ((9702, 9745), 'numpy.vstack', 'np.vstack', (['(bc_expression, batch_corrected)'], {}), '((bc_expression, batch_corrected))\n', (9711, 9745), True, 'import numpy as np\n'), ((9764, 9808), 'pandas.concat', 'pd.concat', (['[bc_meta, meta_data.loc[rows, :]]'], {}), '([bc_meta, meta_data.loc[rows, :]])\n', (9773, 9808), True, 'import pandas as pd\n'), ((9845, 9933), 'pandas.DataFrame', 'pd.DataFrame', (['bc_expression'], {'index': 'bc_meta.index', 'columns': 'expression_matrix.columns'}), '(bc_expression, index=bc_meta.index, columns=expression_matrix.\n columns)\n', (9857, 9933), True, 'import pandas as pd\n'), ((10314, 10345), 'numpy.log10', 'np.log10', (['(expression_matrix + 1)'], {}), '(expression_matrix + 1)\n', (10322, 10345), True, 'import numpy as np\n'), ((10729, 10759), 'numpy.log2', 'np.log2', (['(expression_matrix + 1)'], {}), '(expression_matrix + 1)\n', (10736, 10759), True, 'import numpy as np\n'), ((11137, 11164), 'numpy.log1p', 'np.log1p', (['expression_matrix'], {}), '(expression_matrix)\n', (11145, 11164), True, 'import numpy as np\n'), ((6893, 6908), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (6901, 6908), True, 'import numpy as np\n'), ((9410, 9425), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (9418, 9425), True, 'import numpy as np\n'), ((8134, 8181), 'magic.MAGIC', 'magic.MAGIC', ([], {'random_state': 'random_seed'}), '(random_state=random_seed, **kwargs)\n', (8145, 8181), False, 'import magic\n')] |
import numpy as np
import pandas as pd
from scipy.stats import norm
import unittest
import networkx as nx
from context import grama as gr
from context import models
## FD stepsize
h = 1e-8
## Core function tests
##################################################
class TestModel(unittest.TestCase):
"""Test implementation of model
"""
def setUp(self):
# Default model
self.df_wrong = pd.DataFrame(data={"z": [0.0, 1.0]})
# 2D identity model with permuted df inputs
domain_2d = gr.Domain(bounds={"x0": [-1.0, +1.0], "x1": [0.0, 1.0]})
marginals = {}
marginals["x0"] = gr.MarginalNamed(
d_name="uniform", d_param={"loc": -1, "scale": 2}
)
marginals["x1"] = gr.MarginalNamed(
sign=-1, d_name="uniform", d_param={"loc": 0, "scale": 1},
)
self.model_2d = gr.Model(
functions=[
gr.Function(
lambda x: [x[0], x[1]], ["x0", "x1"], ["y0", "y1"], "test", 0
),
],
domain=domain_2d,
density=gr.Density(marginals=marginals),
)
self.df_2d = pd.DataFrame(data={"x1": [0.0], "x0": [+1.0]})
self.res_2d = self.model_2d.evaluate_df(self.df_2d)
self.df_median_in = pd.DataFrame({"x0": [0.5], "x1": [0.5]})
self.df_median_out = pd.DataFrame({"x0": [0.0], "x1": [0.5]})
self.model_3d = gr.Model(
functions=[
gr.Function(
lambda x: x[0] + x[1] + x[2], ["x", "y", "z"], ["f"], "test", 0
)
],
density=gr.Density(marginals=marginals),
)
## Timing check
self.model_slow = gr.Model(
functions=[
gr.Function(lambda x: x, ["x0"], ["y0"], "f0", 1),
gr.Function(lambda x: x, ["x0"], ["y1"], "f1", 1),
]
)
def test_prints(self):
## Invoke printpretty
self.model_3d.printpretty()
def test_timings(self):
## Default is zero
self.assertTrue(self.model_2d.runtime(1) == 0)
## Estimation accounts for both functions
self.assertTrue(np.allclose(self.model_slow.runtime(1), 2))
## Fast function has empty message
self.assertTrue(self.model_2d.runtime_message(self.df_2d) is None)
## Slow function returns string message
msg = self.model_slow.runtime_message(pd.DataFrame({"x0": [0]}))
self.assertTrue(isinstance(msg, str))
## Basic functionality with default arguments
def test_catch_input_mismatch(self):
"""Checks that proper exception is thrown if evaluate(df) passed a
DataFrame without the proper columns.
"""
self.assertRaises(ValueError, self.model_2d.evaluate_df, self.df_wrong)
def test_var_outer(self):
## Test pass-throughs
df_test = pd.DataFrame(dict(x0=[0]))
md_no_rand = gr.Model() >> gr.cp_function(fun=lambda x: x, var=1, out=1)
md_no_rand.var_outer(pd.DataFrame(), df_det="nom")
md_no_det = md_no_rand >> gr.cp_marginals(
x0={"dist": "uniform", "loc": 0, "scale": 1}
)
md_no_det.var_outer(df_test, df_det="nom")
## Test assertions
with self.assertRaises(ValueError):
self.model_3d.var_outer(self.df_2d)
with self.assertRaises(ValueError):
self.model_3d.var_outer(self.df_2d, df_det="foo")
with self.assertRaises(ValueError):
self.model_3d.var_outer(self.df_2d, df_det=self.df_2d)
def test_drop_out(self):
"""Checks that output column names are properly dropped"""
md = gr.Model() >> gr.cp_function(lambda x: x[0] + 1, var=1, out=1)
df_in = gr.df_make(x0=[0, 1, 2], y0=[0, 1, 2])
df_true = gr.df_make(x0=[0, 1, 2], y0=[1, 2, 3])
df_res = md >> gr.ev_df(df=df_in)
self.assertTrue(gr.df_equal(df_res, df_true, close=True))
## Test re-ordering issues
def test_2d_output_names(self):
"""Checks that proper output names are assigned to resulting DataFrame
"""
self.assertEqual(
set(self.model_2d.evaluate_df(self.df_2d).columns), set(self.model_2d.out)
)
def test_quantile(self):
"""Checks that model.sample_quantile() evaluates correctly.
"""
df_res = self.model_2d.density.pr2sample(self.df_median_in)
self.assertTrue(gr.df_equal(df_res, self.df_median_out))
def test_empty_functions(self):
md = gr.Model() >> gr.cp_bounds(x=[-1, +1])
with self.assertRaises(ValueError):
gr.eval_nominal(md)
def test_nominal(self):
"""Checks the implementation of nominal values"""
md = gr.Model() >> gr.cp_bounds(
x0=[-1, +1], x1=[0.1, np.Inf], x2=[-np.Inf, -0.1],
)
df_true = gr.df_make(x0=0.0, x1=+0.1, x2=-0.1)
df_res = gr.eval_nominal(md, df_det="nom", skip=True)
self.assertTrue(gr.df_equal(df_res, df_true))
## Test sample transforms
def test_transforms(self):
## Setup
df_corr = pd.DataFrame(dict(var1=["x"], var2=["y"], corr=[0.5]))
Sigma_h = np.linalg.cholesky(np.array([[1.0, 0.5], [0.5, 1.0]]))
md = (
gr.Model()
>> gr.cp_marginals(
x=dict(dist="norm", loc=0, scale=1), y=dict(dist="norm", loc=0, scale=1)
)
>> gr.cp_copula_gaussian(df_corr=df_corr)
)
## Copula and marginals have same var_rand order
self.assertTrue(list(md.density.marginals) == md.density.copula.var_rand)
## Transforms invariant
z = np.array([0, 0])
x = md.z2x(z)
zp = md.x2z(x)
self.assertTrue(np.all(z == zp))
df_z = gr.df_make(x=0.0, y=0.0)
df_x = md.norm2rand(df_z)
df_zp = md.rand2norm(df_x)
self.assertTrue(gr.df_equal(df_z, df_zp))
## Jacobian accurate
dxdz_fd = np.zeros((2, 2))
dxdz_fd[0, :] = (md.z2x(z + np.array([h, 0])) - md.z2x(z)) / h
dxdz_fd[1, :] = (md.z2x(z + np.array([0, h])) - md.z2x(z)) / h
dxdz_p = md.dxdz(z)
self.assertTrue(np.allclose(dxdz_fd, dxdz_p))
## Test DAG construction
def test_dag(self):
md = (
gr.Model("model")
>> gr.cp_function(lambda x: x, var=1, out=1)
>> gr.cp_function(lambda x: x[0] + x[1], var=["x0", "y0"], out=1)
)
G_true = nx.DiGraph()
G_true.add_edge("(var)", "f0", label="{}".format({"x0"}))
G_true.add_edge("f0", "(out)", label="{}".format({"y0"}))
G_true.add_edge("(var)", "f1", label="{}".format({"x0"}))
G_true.add_edge("f0", "f1", label="{}".format({"y0"}))
G_true.add_edge("f1", "(out)", label="{}".format({"y1"}))
nx.set_node_attributes(G_true, "model", "parent")
self.assertTrue(
nx.is_isomorphic(
md.make_dag(),
G_true,
node_match=lambda u, v: u == v,
edge_match=lambda u, v: u == v,
)
)
class TestEvalDf(unittest.TestCase):
"""Test implementation of eval_df()
"""
def setUp(self):
self.model = models.make_test()
def test_catch_no_df(self):
"""Checks that eval_df() raises when no input df is given.
"""
self.assertRaises(ValueError, gr.eval_df, self.model)
class TestMarginal(unittest.TestCase):
def setUp(self):
self.marginal_named = gr.MarginalNamed(
d_name="norm", d_param={"loc": 0, "scale": 1}
)
def test_fcn(self):
## Invoke summary
self.marginal_named.summary()
## Correct values for normal distribution
self.assertTrue(self.marginal_named.l(0.5) == norm.pdf(0.5))
self.assertTrue(self.marginal_named.p(0.5) == norm.cdf(0.5))
self.assertTrue(self.marginal_named.q(0.5) == norm.ppf(0.5))
# --------------------------------------------------
class TestDomain(unittest.TestCase):
def setUp(self):
self.domain = gr.Domain(bounds={"x": (0, 1)})
def test_blank(self):
## Test blank domain valid
gr.Domain()
## Invoke summary
self.domain.bound_summary("x")
## Invoke summary;
self.assertTrue(self.domain.bound_summary("y").find("unbounded") > -1)
# --------------------------------------------------
class TestDensity(unittest.TestCase):
def setUp(self):
self.density = gr.Density(
marginals=dict(
x=gr.MarginalNamed(d_name="uniform", d_param={"loc": -1, "scale": 2}),
y=gr.MarginalNamed(d_name="uniform", d_param={"loc": -1, "scale": 2}),
),
copula=gr.CopulaGaussian(
["x", "y"], pd.DataFrame(dict(var1=["x"], var2=["y"], corr=[0.5]))
),
)
def test_copula_warning(self):
md = gr.Model()
with self.assertRaises(ValueError):
md.density.sample()
def test_CopulaIndependence(self):
copula = gr.CopulaIndependence(var_rand=["x", "y"])
df_res = copula.sample(seed=101)
self.assertTrue(set(df_res.columns) == set(["x", "y"]))
## Transforms invariant
z = np.array([0, 0])
u = copula.z2u(z)
zp = copula.u2z(u)
self.assertTrue(np.all(z == zp))
## Jacobian accurate
dudz_fd = np.zeros((2, 2))
dudz_fd[0, :] = (copula.z2u(z + np.array([h, 0])) - copula.z2u(z)) / h
dudz_fd[1, :] = (copula.z2u(z + np.array([0, h])) - copula.z2u(z)) / h
dudz_p = copula.dudz(z)
self.assertTrue(np.allclose(dudz_fd, dudz_p))
def test_CopulaGaussian(self):
df_corr = pd.DataFrame(dict(var1=["x"], var2=["y"], corr=[0.5]))
Sigma_h = np.linalg.cholesky(np.array([[1.0, 0.5], [0.5, 1.0]]))
copula = gr.CopulaGaussian(["x", "y"], df_corr=df_corr)
df_res = copula.sample(seed=101)
self.assertTrue(np.isclose(copula.Sigma_h, Sigma_h).all)
self.assertTrue(set(df_res.columns) == set(["x", "y"]))
## Test raises
df_corr_invalid = pd.DataFrame(
dict(var1=["x", "x"], var2=["y", "z"], corr=[0, 0])
)
with self.assertRaises(ValueError):
gr.CopulaGaussian(["x", "y"], df_corr=df_corr_invalid)
## Transforms invariant
z = np.array([0, 0])
u = copula.z2u(z)
zp = copula.u2z(u)
self.assertTrue(np.all(z == zp))
## Jacobian accurate
dudz_fd = np.zeros((2, 2))
dudz_fd[0, :] = (copula.z2u(z + np.array([h, 0])) - copula.z2u(z)) / h
dudz_fd[1, :] = (copula.z2u(z + np.array([0, h])) - copula.z2u(z)) / h
dudz_p = copula.dudz(z)
self.assertTrue(np.allclose(dudz_fd, dudz_p))
def test_conversion(self):
df_pr_true = pd.DataFrame(dict(x=[0.5], y=[0.5]))
df_sp_true = pd.DataFrame(dict(x=[0.0], y=[0.0]))
df_pr_res = self.density.sample2pr(df_sp_true)
df_sp_res = self.density.pr2sample(df_pr_true)
self.assertTrue(gr.df_equal(df_pr_true, df_pr_res))
self.assertTrue(gr.df_equal(df_sp_true, df_sp_res))
def test_sampling(self):
df_sample = self.density.sample(n=1, seed=101)
self.assertTrue(set(df_sample.columns) == set(["x", "y"]))
# --------------------------------------------------
class TestFunction(unittest.TestCase):
def setUp(self):
self.fcn = gr.Function(lambda x: x, ["x"], ["x"], "test", 0)
self.fcn_vec = gr.FunctionVectorized(lambda df: df, ["x"], ["x"], "test", 0)
self.df = pd.DataFrame({"x": [0]})
self.df_wrong = pd.DataFrame({"z": [0]})
def test_function(self):
fcn_copy = self.fcn.copy()
self.assertTrue(self.fcn.var == fcn_copy.var)
self.assertTrue(self.fcn.out == fcn_copy.out)
self.assertTrue(self.fcn.name == fcn_copy.name)
pd.testing.assert_frame_equal(
self.df, self.fcn.eval(self.df), check_dtype=False
)
with self.assertRaises(ValueError):
self.fcn.eval(self.df_wrong)
## Invoke summary
self.fcn.summary()
def test_function_vectorized(self):
fcn_copy = self.fcn_vec.copy()
self.assertTrue(self.fcn_vec.var == fcn_copy.var)
self.assertTrue(self.fcn_vec.out == fcn_copy.out)
self.assertTrue(self.fcn_vec.name == fcn_copy.name)
pd.testing.assert_frame_equal(
self.df, self.fcn_vec.eval(self.df), check_dtype=False
)
def test_function_model(self):
md_base = gr.Model() >> gr.cp_function(
fun=lambda x: x, var=1, out=1, name="name", runtime=1
)
## Base constructor
func = gr.FunctionModel(md_base)
self.assertTrue(md_base.var == func.var)
self.assertTrue(md_base.out == func.out)
self.assertTrue(md_base.name == func.name)
self.assertTrue(md_base.runtime(1) == func.runtime)
## Test copy
func_copy = func.copy()
self.assertTrue(func_copy.var == func.var)
self.assertTrue(func_copy.out == func.out)
self.assertTrue(func_copy.name == func.name)
self.assertTrue(func_copy.runtime == func.runtime)
## Run tests
if __name__ == "__main__":
unittest.main()
| [
"context.grama.df_equal",
"context.grama.Domain",
"numpy.allclose",
"context.grama.eval_nominal",
"numpy.isclose",
"context.grama.MarginalNamed",
"unittest.main",
"pandas.DataFrame",
"context.grama.cp_copula_gaussian",
"context.grama.FunctionModel",
"context.grama.Model",
"scipy.stats.norm.cdf... | [((13386, 13401), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13399, 13401), False, 'import unittest\n'), ((416, 452), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'z': [0.0, 1.0]}"}), "(data={'z': [0.0, 1.0]})\n", (428, 452), True, 'import pandas as pd\n'), ((526, 582), 'context.grama.Domain', 'gr.Domain', ([], {'bounds': "{'x0': [-1.0, +1.0], 'x1': [0.0, 1.0]}"}), "(bounds={'x0': [-1.0, +1.0], 'x1': [0.0, 1.0]})\n", (535, 582), True, 'from context import grama as gr\n'), ((632, 699), 'context.grama.MarginalNamed', 'gr.MarginalNamed', ([], {'d_name': '"""uniform"""', 'd_param': "{'loc': -1, 'scale': 2}"}), "(d_name='uniform', d_param={'loc': -1, 'scale': 2})\n", (648, 699), True, 'from context import grama as gr\n'), ((748, 823), 'context.grama.MarginalNamed', 'gr.MarginalNamed', ([], {'sign': '(-1)', 'd_name': '"""uniform"""', 'd_param': "{'loc': 0, 'scale': 1}"}), "(sign=-1, d_name='uniform', d_param={'loc': 0, 'scale': 1})\n", (764, 823), True, 'from context import grama as gr\n'), ((1165, 1211), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'x1': [0.0], 'x0': [+1.0]}"}), "(data={'x1': [0.0], 'x0': [+1.0]})\n", (1177, 1211), True, 'import pandas as pd\n'), ((1301, 1341), 'pandas.DataFrame', 'pd.DataFrame', (["{'x0': [0.5], 'x1': [0.5]}"], {}), "({'x0': [0.5], 'x1': [0.5]})\n", (1313, 1341), True, 'import pandas as pd\n'), ((1371, 1411), 'pandas.DataFrame', 'pd.DataFrame', (["{'x0': [0.0], 'x1': [0.5]}"], {}), "({'x0': [0.0], 'x1': [0.5]})\n", (1383, 1411), True, 'import pandas as pd\n'), ((3784, 3822), 'context.grama.df_make', 'gr.df_make', ([], {'x0': '[0, 1, 2]', 'y0': '[0, 1, 2]'}), '(x0=[0, 1, 2], y0=[0, 1, 2])\n', (3794, 3822), True, 'from context import grama as gr\n'), ((3841, 3879), 'context.grama.df_make', 'gr.df_make', ([], {'x0': '[0, 1, 2]', 'y0': '[1, 2, 3]'}), '(x0=[0, 1, 2], y0=[1, 2, 3])\n', (3851, 3879), True, 'from context import grama as gr\n'), ((4901, 4937), 'context.grama.df_make', 'gr.df_make', ([], {'x0': '(0.0)', 'x1': '(+0.1)', 'x2': '(-0.1)'}), '(x0=0.0, x1=+0.1, x2=-0.1)\n', (4911, 4937), True, 'from context import grama as gr\n'), ((4955, 4999), 'context.grama.eval_nominal', 'gr.eval_nominal', (['md'], {'df_det': '"""nom"""', 'skip': '(True)'}), "(md, df_det='nom', skip=True)\n", (4970, 4999), True, 'from context import grama as gr\n'), ((5703, 5719), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5711, 5719), True, 'import numpy as np\n'), ((5823, 5847), 'context.grama.df_make', 'gr.df_make', ([], {'x': '(0.0)', 'y': '(0.0)'}), '(x=0.0, y=0.0)\n', (5833, 5847), True, 'from context import grama as gr\n'), ((6016, 6032), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (6024, 6032), True, 'import numpy as np\n'), ((6521, 6533), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (6531, 6533), True, 'import networkx as nx\n'), ((6869, 6918), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['G_true', '"""model"""', '"""parent"""'], {}), "(G_true, 'model', 'parent')\n", (6891, 6918), True, 'import networkx as nx\n'), ((7280, 7298), 'context.models.make_test', 'models.make_test', ([], {}), '()\n', (7296, 7298), False, 'from context import models\n'), ((7565, 7628), 'context.grama.MarginalNamed', 'gr.MarginalNamed', ([], {'d_name': '"""norm"""', 'd_param': "{'loc': 0, 'scale': 1}"}), "(d_name='norm', d_param={'loc': 0, 'scale': 1})\n", (7581, 7628), True, 'from context import grama as gr\n'), ((8134, 8165), 'context.grama.Domain', 'gr.Domain', ([], {'bounds': "{'x': (0, 1)}"}), "(bounds={'x': (0, 1)})\n", (8143, 8165), True, 'from context import grama as gr\n'), ((8236, 8247), 'context.grama.Domain', 'gr.Domain', ([], {}), '()\n', (8245, 8247), True, 'from context import grama as gr\n'), ((8982, 8992), 'context.grama.Model', 'gr.Model', ([], {}), '()\n', (8990, 8992), True, 'from context import grama as gr\n'), ((9127, 9169), 'context.grama.CopulaIndependence', 'gr.CopulaIndependence', ([], {'var_rand': "['x', 'y']"}), "(var_rand=['x', 'y'])\n", (9148, 9169), True, 'from context import grama as gr\n'), ((9321, 9337), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9329, 9337), True, 'import numpy as np\n'), ((9481, 9497), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (9489, 9497), True, 'import numpy as np\n'), ((9942, 9988), 'context.grama.CopulaGaussian', 'gr.CopulaGaussian', (["['x', 'y']"], {'df_corr': 'df_corr'}), "(['x', 'y'], df_corr=df_corr)\n", (9959, 9988), True, 'from context import grama as gr\n'), ((10455, 10471), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (10463, 10471), True, 'import numpy as np\n'), ((10615, 10631), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (10623, 10631), True, 'import numpy as np\n'), ((11544, 11593), 'context.grama.Function', 'gr.Function', (['(lambda x: x)', "['x']", "['x']", '"""test"""', '(0)'], {}), "(lambda x: x, ['x'], ['x'], 'test', 0)\n", (11555, 11593), True, 'from context import grama as gr\n'), ((11618, 11679), 'context.grama.FunctionVectorized', 'gr.FunctionVectorized', (['(lambda df: df)', "['x']", "['x']", '"""test"""', '(0)'], {}), "(lambda df: df, ['x'], ['x'], 'test', 0)\n", (11639, 11679), True, 'from context import grama as gr\n'), ((11699, 11723), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [0]}"], {}), "({'x': [0]})\n", (11711, 11723), True, 'import pandas as pd\n'), ((11749, 11773), 'pandas.DataFrame', 'pd.DataFrame', (["{'z': [0]}"], {}), "({'z': [0]})\n", (11761, 11773), True, 'import pandas as pd\n'), ((12835, 12860), 'context.grama.FunctionModel', 'gr.FunctionModel', (['md_base'], {}), '(md_base)\n', (12851, 12860), True, 'from context import grama as gr\n'), ((2461, 2486), 'pandas.DataFrame', 'pd.DataFrame', (["{'x0': [0]}"], {}), "({'x0': [0]})\n", (2473, 2486), True, 'import pandas as pd\n'), ((2967, 2977), 'context.grama.Model', 'gr.Model', ([], {}), '()\n', (2975, 2977), True, 'from context import grama as gr\n'), ((2981, 3026), 'context.grama.cp_function', 'gr.cp_function', ([], {'fun': '(lambda x: x)', 'var': '(1)', 'out': '(1)'}), '(fun=lambda x: x, var=1, out=1)\n', (2995, 3026), True, 'from context import grama as gr\n'), ((3056, 3070), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3068, 3070), True, 'import pandas as pd\n'), ((3121, 3182), 'context.grama.cp_marginals', 'gr.cp_marginals', ([], {'x0': "{'dist': 'uniform', 'loc': 0, 'scale': 1}"}), "(x0={'dist': 'uniform', 'loc': 0, 'scale': 1})\n", (3136, 3182), True, 'from context import grama as gr\n'), ((3705, 3715), 'context.grama.Model', 'gr.Model', ([], {}), '()\n', (3713, 3715), True, 'from context import grama as gr\n'), ((3719, 3767), 'context.grama.cp_function', 'gr.cp_function', (['(lambda x: x[0] + 1)'], {'var': '(1)', 'out': '(1)'}), '(lambda x: x[0] + 1, var=1, out=1)\n', (3733, 3767), True, 'from context import grama as gr\n'), ((3904, 3922), 'context.grama.ev_df', 'gr.ev_df', ([], {'df': 'df_in'}), '(df=df_in)\n', (3912, 3922), True, 'from context import grama as gr\n'), ((3948, 3988), 'context.grama.df_equal', 'gr.df_equal', (['df_res', 'df_true'], {'close': '(True)'}), '(df_res, df_true, close=True)\n', (3959, 3988), True, 'from context import grama as gr\n'), ((4476, 4515), 'context.grama.df_equal', 'gr.df_equal', (['df_res', 'self.df_median_out'], {}), '(df_res, self.df_median_out)\n', (4487, 4515), True, 'from context import grama as gr\n'), ((4567, 4577), 'context.grama.Model', 'gr.Model', ([], {}), '()\n', (4575, 4577), True, 'from context import grama as gr\n'), ((4581, 4605), 'context.grama.cp_bounds', 'gr.cp_bounds', ([], {'x': '[-1, +1]'}), '(x=[-1, +1])\n', (4593, 4605), True, 'from context import grama as gr\n'), ((4662, 4681), 'context.grama.eval_nominal', 'gr.eval_nominal', (['md'], {}), '(md)\n', (4677, 4681), True, 'from context import grama as gr\n'), ((4782, 4792), 'context.grama.Model', 'gr.Model', ([], {}), '()\n', (4790, 4792), True, 'from context import grama as gr\n'), ((4796, 4859), 'context.grama.cp_bounds', 'gr.cp_bounds', ([], {'x0': '[-1, +1]', 'x1': '[0.1, np.Inf]', 'x2': '[-np.Inf, -0.1]'}), '(x0=[-1, +1], x1=[0.1, np.Inf], x2=[-np.Inf, -0.1])\n', (4808, 4859), True, 'from context import grama as gr\n'), ((5025, 5053), 'context.grama.df_equal', 'gr.df_equal', (['df_res', 'df_true'], {}), '(df_res, df_true)\n', (5036, 5053), True, 'from context import grama as gr\n'), ((5244, 5278), 'numpy.array', 'np.array', (['[[1.0, 0.5], [0.5, 1.0]]'], {}), '([[1.0, 0.5], [0.5, 1.0]])\n', (5252, 5278), True, 'import numpy as np\n'), ((5469, 5507), 'context.grama.cp_copula_gaussian', 'gr.cp_copula_gaussian', ([], {'df_corr': 'df_corr'}), '(df_corr=df_corr)\n', (5490, 5507), True, 'from context import grama as gr\n'), ((5790, 5805), 'numpy.all', 'np.all', (['(z == zp)'], {}), '(z == zp)\n', (5796, 5805), True, 'import numpy as np\n'), ((5942, 5966), 'context.grama.df_equal', 'gr.df_equal', (['df_z', 'df_zp'], {}), '(df_z, df_zp)\n', (5953, 5966), True, 'from context import grama as gr\n'), ((6228, 6256), 'numpy.allclose', 'np.allclose', (['dxdz_fd', 'dxdz_p'], {}), '(dxdz_fd, dxdz_p)\n', (6239, 6256), True, 'import numpy as np\n'), ((6430, 6492), 'context.grama.cp_function', 'gr.cp_function', (['(lambda x: x[0] + x[1])'], {'var': "['x0', 'y0']", 'out': '(1)'}), "(lambda x: x[0] + x[1], var=['x0', 'y0'], out=1)\n", (6444, 6492), True, 'from context import grama as gr\n'), ((9416, 9431), 'numpy.all', 'np.all', (['(z == zp)'], {}), '(z == zp)\n', (9422, 9431), True, 'import numpy as np\n'), ((9713, 9741), 'numpy.allclose', 'np.allclose', (['dudz_fd', 'dudz_p'], {}), '(dudz_fd, dudz_p)\n', (9724, 9741), True, 'import numpy as np\n'), ((9889, 9923), 'numpy.array', 'np.array', (['[[1.0, 0.5], [0.5, 1.0]]'], {}), '([[1.0, 0.5], [0.5, 1.0]])\n', (9897, 9923), True, 'import numpy as np\n'), ((10355, 10409), 'context.grama.CopulaGaussian', 'gr.CopulaGaussian', (["['x', 'y']"], {'df_corr': 'df_corr_invalid'}), "(['x', 'y'], df_corr=df_corr_invalid)\n", (10372, 10409), True, 'from context import grama as gr\n'), ((10550, 10565), 'numpy.all', 'np.all', (['(z == zp)'], {}), '(z == zp)\n', (10556, 10565), True, 'import numpy as np\n'), ((10847, 10875), 'numpy.allclose', 'np.allclose', (['dudz_fd', 'dudz_p'], {}), '(dudz_fd, dudz_p)\n', (10858, 10875), True, 'import numpy as np\n'), ((11161, 11195), 'context.grama.df_equal', 'gr.df_equal', (['df_pr_true', 'df_pr_res'], {}), '(df_pr_true, df_pr_res)\n', (11172, 11195), True, 'from context import grama as gr\n'), ((11221, 11255), 'context.grama.df_equal', 'gr.df_equal', (['df_sp_true', 'df_sp_res'], {}), '(df_sp_true, df_sp_res)\n', (11232, 11255), True, 'from context import grama as gr\n'), ((12685, 12695), 'context.grama.Model', 'gr.Model', ([], {}), '()\n', (12693, 12695), True, 'from context import grama as gr\n'), ((12699, 12768), 'context.grama.cp_function', 'gr.cp_function', ([], {'fun': '(lambda x: x)', 'var': '(1)', 'out': '(1)', 'name': '"""name"""', 'runtime': '(1)'}), "(fun=lambda x: x, var=1, out=1, name='name', runtime=1)\n", (12713, 12768), True, 'from context import grama as gr\n'), ((1101, 1132), 'context.grama.Density', 'gr.Density', ([], {'marginals': 'marginals'}), '(marginals=marginals)\n', (1111, 1132), True, 'from context import grama as gr\n'), ((1637, 1668), 'context.grama.Density', 'gr.Density', ([], {'marginals': 'marginals'}), '(marginals=marginals)\n', (1647, 1668), True, 'from context import grama as gr\n'), ((5308, 5318), 'context.grama.Model', 'gr.Model', ([], {}), '()\n', (5316, 5318), True, 'from context import grama as gr\n'), ((6340, 6357), 'context.grama.Model', 'gr.Model', (['"""model"""'], {}), "('model')\n", (6348, 6357), True, 'from context import grama as gr\n'), ((6373, 6414), 'context.grama.cp_function', 'gr.cp_function', (['(lambda x: x)'], {'var': '(1)', 'out': '(1)'}), '(lambda x: x, var=1, out=1)\n', (6387, 6414), True, 'from context import grama as gr\n'), ((7846, 7859), 'scipy.stats.norm.pdf', 'norm.pdf', (['(0.5)'], {}), '(0.5)\n', (7854, 7859), False, 'from scipy.stats import norm\n'), ((7915, 7928), 'scipy.stats.norm.cdf', 'norm.cdf', (['(0.5)'], {}), '(0.5)\n', (7923, 7928), False, 'from scipy.stats import norm\n'), ((7984, 7997), 'scipy.stats.norm.ppf', 'norm.ppf', (['(0.5)'], {}), '(0.5)\n', (7992, 7997), False, 'from scipy.stats import norm\n'), ((10055, 10090), 'numpy.isclose', 'np.isclose', (['copula.Sigma_h', 'Sigma_h'], {}), '(copula.Sigma_h, Sigma_h)\n', (10065, 10090), True, 'import numpy as np\n'), ((922, 996), 'context.grama.Function', 'gr.Function', (['(lambda x: [x[0], x[1]])', "['x0', 'x1']", "['y0', 'y1']", '"""test"""', '(0)'], {}), "(lambda x: [x[0], x[1]], ['x0', 'x1'], ['y0', 'y1'], 'test', 0)\n", (933, 996), True, 'from context import grama as gr\n'), ((1487, 1563), 'context.grama.Function', 'gr.Function', (['(lambda x: x[0] + x[1] + x[2])', "['x', 'y', 'z']", "['f']", '"""test"""', '(0)'], {}), "(lambda x: x[0] + x[1] + x[2], ['x', 'y', 'z'], ['f'], 'test', 0)\n", (1498, 1563), True, 'from context import grama as gr\n'), ((1781, 1830), 'context.grama.Function', 'gr.Function', (['(lambda x: x)', "['x0']", "['y0']", '"""f0"""', '(1)'], {}), "(lambda x: x, ['x0'], ['y0'], 'f0', 1)\n", (1792, 1830), True, 'from context import grama as gr\n'), ((1848, 1897), 'context.grama.Function', 'gr.Function', (['(lambda x: x)', "['x0']", "['y1']", '"""f1"""', '(1)'], {}), "(lambda x: x, ['x0'], ['y1'], 'f1', 1)\n", (1859, 1897), True, 'from context import grama as gr\n'), ((6069, 6085), 'numpy.array', 'np.array', (['[h, 0]'], {}), '([h, 0])\n', (6077, 6085), True, 'import numpy as np\n'), ((6140, 6156), 'numpy.array', 'np.array', (['[0, h]'], {}), '([0, h])\n', (6148, 6156), True, 'import numpy as np\n'), ((8616, 8683), 'context.grama.MarginalNamed', 'gr.MarginalNamed', ([], {'d_name': '"""uniform"""', 'd_param': "{'loc': -1, 'scale': 2}"}), "(d_name='uniform', d_param={'loc': -1, 'scale': 2})\n", (8632, 8683), True, 'from context import grama as gr\n'), ((8703, 8770), 'context.grama.MarginalNamed', 'gr.MarginalNamed', ([], {'d_name': '"""uniform"""', 'd_param': "{'loc': -1, 'scale': 2}"}), "(d_name='uniform', d_param={'loc': -1, 'scale': 2})\n", (8719, 8770), True, 'from context import grama as gr\n'), ((9538, 9554), 'numpy.array', 'np.array', (['[h, 0]'], {}), '([h, 0])\n', (9546, 9554), True, 'import numpy as np\n'), ((9617, 9633), 'numpy.array', 'np.array', (['[0, h]'], {}), '([0, h])\n', (9625, 9633), True, 'import numpy as np\n'), ((10672, 10688), 'numpy.array', 'np.array', (['[h, 0]'], {}), '([h, 0])\n', (10680, 10688), True, 'import numpy as np\n'), ((10751, 10767), 'numpy.array', 'np.array', (['[0, h]'], {}), '([0, h])\n', (10759, 10767), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Experiment of Changing Priors
<NAME>
2020-1-14
"""
import pickle
import datetime
import numpy as np
import torch
import stability_testers as st
import sinkhorn_torch as sk
class PriorVariation:
"""
Changing priors.
Basic requirements on paramaters:
---------------------------------
dim : can be only 3 or 4
method : for dim 3, "hex" or "circ"
for dim 4, ""
correct hypothesis: always 0
change matrix columns: second row with minimal scbi_roc?
"""
# dim 3 cases: For some historical simplicity reasons,
# we chose to use row vectors as coordinates, so transformation
# matrices are written in the row-format
x_unit_3 = np.array((2, -1, -1), dtype=np.float64) / np.sqrt(6)
y_unit_3 = np.array((-1, 2, -1), dtype=np.float64) / np.sqrt(6)
trans_simplex_3 = np.array([x_unit_3, y_unit_3])
x_vis_3 = np.array((0, 1), dtype=np.float64)
y_vis_3 = np.array((np.sqrt(3)/2, -1/2), dtype=np.float64)
trans_visual_3 = np.array([x_vis_3, y_vis_3])
# dim 4 cases:
trans_simplex_4 = np.array([[3, -1, -1, -1],
[-1, 3, -1, -1],
[-1, -1, -1, 3]],
dtype=np.float64) / (np.sqrt(12))
trans_visual_4 = np.array([[np.sqrt(8) / 3, 0, -1 / 3],
[-np.sqrt(2) / 3, np.sqrt(2 / 3), -1 / 3],
[0, 0, 1]], dtype=np.float64)
trans_simplex = {3: trans_simplex_3,
4: trans_simplex_4}
trans_visual = {3: trans_visual_3,
4: trans_visual_4}
@staticmethod
def angle_to_std(phi, theta):
"""
Latitude and Longitude to standard coordinates
Parameters
----------
phi, theta: must be numpy 1d-arrays.
"""
return np.array([np.sin(phi)*np.cos(theta),
np.sin(phi)*np.sin(theta),
np.cos(phi)], dtype=np.float64).T
@staticmethod
def gen_hex_3(r_inner=1, r_outer=5, **args):
"""
Generate hex-lattice
Parameters
----------
r_inner : inner radius, integer
r_outer : outer radius, integer, included
Return
------
Dict of numpy 2-d arrays of shape (points_per_layer, 2),
with num_of_layers elements
coordinated in the last dimension (2 elements) are in
lattice basis, should multiply trans_visual_3 and trans_simplex_3 to
change variables to make them visualized or in simplex coordinate.
"""
unit_sequence = np.array(((0., 1.),
(-1., 0.),
(-1., -1.),
(0., -1.),
(1., 0.),
(1., 1.),
(1., 0.), ),
dtype=np.float64)
layers = dict()
cur = unit_sequence[-1] * r_inner
for r in range(r_inner, r_outer+1):
ret = []
for j in range(6):
for i in range(r):
cur += unit_sequence[j]
ret += [cur.copy(),]
layers[r] = np.array(ret)
cur += unit_sequence[-1]
return layers
@staticmethod
def gen_circ_3(r_inner=1, r_outer=5, **args):
"""
Generate circle-testpoints
Parameters
----------
r_inner : inner radius, integer
r_outer : outer radius, integer, included
Return
------
Dict of numpy 2-d arrays of shape (points_per_layer, 2),
with num_of_layers elements
coordinated in the last dimension (2 elements) are in
lattice basis, should multiply trans_visual_3 and trans_simplex_3 to
change variables to make them visualized or in simplex coordinate.
"""
if "density" in args.keys():
density = args["density"]
else:
density = 6
if "phase_start" in args.keys():
p_start = args["phase_start"]
else:
p_start = 0
if "phase_end" in args.keys():
p_end = args["phase_end"]
else:
p_end = 1
if "endpoint" in args.keys():
endpoint = args["endpoint"]
else:
endpoint = False
layers = dict()
for r in range(r_inner, r_outer+1):
angle = 2 * np.pi / density / r
ret = []
for i in np.linspace(r * density * p_start,
r * density * p_end,
density * r, endpoint=endpoint):
ret += [np.array((r*np.sin(angle*i), r*np.cos(angle*i)), dtype=np.float64)]
layers[r] = np.matmul(np.array(ret), np.linalg.inv(PriorVariation.trans_visual_3))
return layers
config = {3:{"hex" : gen_hex_3.__func__,
"circ": gen_circ_3.__func__,},
4:{}}
def __init__(self,
dim=3,
method='hex',
matrix=torch.tensor([[0.1, 0.2, 0.7],
[0.3, 0.4, 0.4],
[0.6, 0.3, 0.1]],
dtype=torch.float64),
prior=torch.ones(3, dtype=torch.float64)/3,
resolution=0.02,
r_inner=1,
r_outer=2,
density=6):
"""
Initialization
"""
self.m = 0
self.n = 0
self.dim = 0
self.method = None
self.set_dimension(dim)
self.set_method(method)
self.set_matrix(matrix)
self.set_prior(prior)
self.set_perturbations(resolution, r_inner, r_outer, density)
# self.gen_pts =
def set_perturbations(self, resolution=0.02, r_inner=1, r_outer=5, density=6):
"""
Set resolution / r_inner / r_outer
"""
self.resolution = resolution
self.r_inner = r_inner
self.r_outer = r_outer
self.density = density
def set_prior(self, prior):
"""
Set the prior of teacher
"""
self.prior = prior
def get_prior(self):
"""
Get prior of teacher
"""
return self.prior
def set_matrix(self, matrix):
"""
Set T=L the matrix (joint distribution)
and the size of n, m
"""
self.n, self.m = matrix.shape
self.matrix = matrix
def get_matrix(self):
"""
Get matrix of joint distribution
"""
return self.matrix
def set_dimension(self, dim):
"""
Set dimension of prior study.
"""
if dim not in PriorVariation.config.keys():
print("Dimension is not set. We can only do dim-3 and 4 cases.")
return
self.dim = dim
def get_dimension(self):
"""
Get Dimension
"""
return self.dim
def set_method(self, method):
"""
Set method of generating points.
"""
if self.dim not in PriorVariation.config.keys():
print("Please set the dim first")
return
if method not in PriorVariation.config[self.dim].keys():
print("Method is not supported, please choose from",
PriorVariation.config[self.dim].keys())
return
self.method = method
def get_method(self):
"""
Get method
"""
return self.method
def simulate(self, repeats=100, block_size=100, threads=30, **args):
"""
Main method
The `**args` is sent to the method generating sample sets.
"""
self.tester = st.StabilityTester("cpu")
self.tester.set_correct_hypo(0)
self.tester.set_mat_learn(self.matrix)
self.tester.set_mat_teach(self.matrix)
self.tester.set_prior_teach(self.prior)
self.perturb_base = PriorVariation.config[self.dim][self.method](self.r_inner,
self.r_outer,
density = self.density,
**args)
# posterior = dict()
self.learn_result = dict()
self.teach_result = dict()
for key, value in self.perturb_base.items():
# posterior[key] = torch.from_numpy(np.matmul(value,
# PriorVariation.trans_simplex[self.dim]))
posterior = torch.from_numpy(np.matmul(value,
PriorVariation.trans_simplex[self.dim]))
print("Layer:",key, datetime.datetime.today())
self.learn_result[key] = []
self.teach_result[key] = []
# for p_learn in posterior[key]:
for p_learn in posterior:
prior_learn = self.prior + p_learn * self.resolution
self.tester.set_prior_learn(prior_learn)
inference_result = self.tester.inference_fixed_initial(repeats,
block_size,
threads,timer=False).numpy()
self.teach_result[key] += [inference_result[0],]
self.learn_result[key] += [inference_result[1],]
with open("Dim"+str(self.dim)+"_"+str(datetime.datetime.today())+".log", "wb") as fp:
pickle.dump({"base" : self.perturb_base,
"teach": self.teach_result,
"learn": self.learn_result}, fp)
pickle.dump({"matrix" : self.matrix,
"prior_t" : self.prior,
"method" : self.method,
"density" : self.density,
"resolution": self.resolution}, fp)
del self.tester
return self.teach_result, self.learn_result
def fastest_path_bin(self, repeats=100, block_size=100, threads=30, delta_angle=0.01):
"""
In the dim=3 case, use binary search on each level to find out
worst / best position on each circle.
"""
assert self.dim == 3
self.tester = st.StabilityTester("cpu")
self.tester.set_correct_hypo(0)
self.tester.set_mat_learn(self.matrix)
self.tester.set_mat_teach(self.matrix)
self.tester.set_prior_teach(self.prior)
self.learn_result = dict()
self.teach_result = dict()
fastest_path = []
ave = lambda x, y: (x + y) / 2
for radius in range(self.r_inner, self.r_outer+1):
threshold = delta_angle / radius
# left and right nodes
data = dict()
left = 0.
right = 1.
mid = ave(left, right)
coords = PriorVariation.gen_circ_3(1, 1, density=3,
phase_start=left,
phase_end=right,
endpoint=True)
prior_learn = torch.from_numpy(np.matmul(coords[1],
PriorVariation.trans_simplex_3))
prior_learn = prior_learn * radius * self.resolution + self.prior
tmp = []
for prior in prior_learn:
self.tester.set_prior_learn(prior)
inference_result = self.tester.inference_fixed_initial(repeats,
block_size,
threads)
tmp += [inference_result[1][0],]
data[left] = tmp[0]
data[mid] = tmp[1]
data[right] = tmp[2]
# Very typical bisection method. Looks like a bad implementation.
while right - left > threshold:
q1 = ave(left, mid)
q3 = ave(mid, right)
coords = PriorVariation.gen_circ_3(1, 1, density=2,
phase_start=q1,
phase_end=q3,
endpoint=True)
prior_learn = torch.from_numpy(np.matmul(coords[1],
PriorVariation.trans_simplex_3))
prior_learn = prior_learn * radius *self.resolution + self.prior
self.tester.set_prior_learn(prior_learn[0])
data[q1] = self.tester.inference_fixed_initial(repeats,
block_size,
threads)[1][0]
self.tester.set_prior_learn(prior_learn[1])
data[q3] = self.tester.inference_fixed_initial(repeats,
block_size,
threads)[1][0]
for i in range(2):
if data[left] > data[right]:
left = q1
q1 = mid
else:
right = q3
q3 = mid
mid = ave(left, right)
# Now data[mid] can reflects the steepest point we have on the circle.
print(data)
fastest_path += [(mid, data[mid]),]
del data
return fastest_path
@staticmethod
def read_data(filename):
"""
Basic method of reading files.
"""
setup = None
with open(filename, "rb") as fp:
data = pickle.load(fp)
try:
setup = pickle.load(fp)
print("Loaded a new version log file.")
except EOFError:
print("Loaded an old version log file.")
return data, setup
if __name__ == '__main__':
pass
| [
"torch.ones",
"pickle.dump",
"datetime.datetime.today",
"stability_testers.StabilityTester",
"pickle.load",
"numpy.array",
"numpy.linalg.inv",
"numpy.linspace",
"numpy.matmul",
"numpy.cos",
"numpy.sin",
"torch.tensor",
"numpy.sqrt"
] | [((863, 893), 'numpy.array', 'np.array', (['[x_unit_3, y_unit_3]'], {}), '([x_unit_3, y_unit_3])\n', (871, 893), True, 'import numpy as np\n'), ((908, 942), 'numpy.array', 'np.array', (['(0, 1)'], {'dtype': 'np.float64'}), '((0, 1), dtype=np.float64)\n', (916, 942), True, 'import numpy as np\n'), ((1027, 1055), 'numpy.array', 'np.array', (['[x_vis_3, y_vis_3]'], {}), '([x_vis_3, y_vis_3])\n', (1035, 1055), True, 'import numpy as np\n'), ((720, 759), 'numpy.array', 'np.array', (['(2, -1, -1)'], {'dtype': 'np.float64'}), '((2, -1, -1), dtype=np.float64)\n', (728, 759), True, 'import numpy as np\n'), ((762, 772), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (769, 772), True, 'import numpy as np\n'), ((788, 827), 'numpy.array', 'np.array', (['(-1, 2, -1)'], {'dtype': 'np.float64'}), '((-1, 2, -1), dtype=np.float64)\n', (796, 827), True, 'import numpy as np\n'), ((830, 840), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (837, 840), True, 'import numpy as np\n'), ((1100, 1179), 'numpy.array', 'np.array', (['[[3, -1, -1, -1], [-1, 3, -1, -1], [-1, -1, -1, 3]]'], {'dtype': 'np.float64'}), '([[3, -1, -1, -1], [-1, 3, -1, -1], [-1, -1, -1, 3]], dtype=np.float64)\n', (1108, 1179), True, 'import numpy as np\n'), ((1278, 1289), 'numpy.sqrt', 'np.sqrt', (['(12)'], {}), '(12)\n', (1285, 1289), True, 'import numpy as np\n'), ((2650, 2771), 'numpy.array', 'np.array', (['((0.0, 1.0), (-1.0, 0.0), (-1.0, -1.0), (0.0, -1.0), (1.0, 0.0), (1.0, 1.0),\n (1.0, 0.0))'], {'dtype': 'np.float64'}), '(((0.0, 1.0), (-1.0, 0.0), (-1.0, -1.0), (0.0, -1.0), (1.0, 0.0), (\n 1.0, 1.0), (1.0, 0.0)), dtype=np.float64)\n', (2658, 2771), True, 'import numpy as np\n'), ((5184, 5275), 'torch.tensor', 'torch.tensor', (['[[0.1, 0.2, 0.7], [0.3, 0.4, 0.4], [0.6, 0.3, 0.1]]'], {'dtype': 'torch.float64'}), '([[0.1, 0.2, 0.7], [0.3, 0.4, 0.4], [0.6, 0.3, 0.1]], dtype=\n torch.float64)\n', (5196, 5275), False, 'import torch\n'), ((7879, 7904), 'stability_testers.StabilityTester', 'st.StabilityTester', (['"""cpu"""'], {}), "('cpu')\n", (7897, 7904), True, 'import stability_testers as st\n'), ((10536, 10561), 'stability_testers.StabilityTester', 'st.StabilityTester', (['"""cpu"""'], {}), "('cpu')\n", (10554, 10561), True, 'import stability_testers as st\n'), ((3299, 3312), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (3307, 3312), True, 'import numpy as np\n'), ((4601, 4692), 'numpy.linspace', 'np.linspace', (['(r * density * p_start)', '(r * density * p_end)', '(density * r)'], {'endpoint': 'endpoint'}), '(r * density * p_start, r * density * p_end, density * r,\n endpoint=endpoint)\n', (4612, 4692), True, 'import numpy as np\n'), ((5408, 5442), 'torch.ones', 'torch.ones', (['(3)'], {'dtype': 'torch.float64'}), '(3, dtype=torch.float64)\n', (5418, 5442), False, 'import torch\n'), ((9754, 9858), 'pickle.dump', 'pickle.dump', (["{'base': self.perturb_base, 'teach': self.teach_result, 'learn': self.\n learn_result}", 'fp'], {}), "({'base': self.perturb_base, 'teach': self.teach_result, 'learn':\n self.learn_result}, fp)\n", (9765, 9858), False, 'import pickle\n'), ((9918, 10065), 'pickle.dump', 'pickle.dump', (["{'matrix': self.matrix, 'prior_t': self.prior, 'method': self.method,\n 'density': self.density, 'resolution': self.resolution}", 'fp'], {}), "({'matrix': self.matrix, 'prior_t': self.prior, 'method': self.\n method, 'density': self.density, 'resolution': self.resolution}, fp)\n", (9929, 10065), False, 'import pickle\n'), ((14066, 14081), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (14077, 14081), False, 'import pickle\n'), ((967, 977), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (974, 977), True, 'import numpy as np\n'), ((1401, 1415), 'numpy.sqrt', 'np.sqrt', (['(2 / 3)'], {}), '(2 / 3)\n', (1408, 1415), True, 'import numpy as np\n'), ((4882, 4895), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (4890, 4895), True, 'import numpy as np\n'), ((4897, 4941), 'numpy.linalg.inv', 'np.linalg.inv', (['PriorVariation.trans_visual_3'], {}), '(PriorVariation.trans_visual_3)\n', (4910, 4941), True, 'import numpy as np\n'), ((8796, 8852), 'numpy.matmul', 'np.matmul', (['value', 'PriorVariation.trans_simplex[self.dim]'], {}), '(value, PriorVariation.trans_simplex[self.dim])\n', (8805, 8852), True, 'import numpy as np\n'), ((8937, 8962), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (8960, 8962), False, 'import datetime\n'), ((11425, 11477), 'numpy.matmul', 'np.matmul', (['coords[1]', 'PriorVariation.trans_simplex_3'], {}), '(coords[1], PriorVariation.trans_simplex_3)\n', (11434, 11477), True, 'import numpy as np\n'), ((14123, 14138), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (14134, 14138), False, 'import pickle\n'), ((1324, 1334), 'numpy.sqrt', 'np.sqrt', (['(8)'], {}), '(8)\n', (1331, 1334), True, 'import numpy as np\n'), ((1995, 2006), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2001, 2006), True, 'import numpy as np\n'), ((12622, 12674), 'numpy.matmul', 'np.matmul', (['coords[1]', 'PriorVariation.trans_simplex_3'], {}), '(coords[1], PriorVariation.trans_simplex_3)\n', (12631, 12674), True, 'import numpy as np\n'), ((1385, 1395), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1392, 1395), True, 'import numpy as np\n'), ((1891, 1902), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1897, 1902), True, 'import numpy as np\n'), ((1903, 1916), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1909, 1916), True, 'import numpy as np\n'), ((1943, 1954), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1949, 1954), True, 'import numpy as np\n'), ((1955, 1968), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1961, 1968), True, 'import numpy as np\n'), ((9694, 9719), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (9717, 9719), False, 'import datetime\n'), ((4792, 4809), 'numpy.sin', 'np.sin', (['(angle * i)'], {}), '(angle * i)\n', (4798, 4809), True, 'import numpy as np\n'), ((4811, 4828), 'numpy.cos', 'np.cos', (['(angle * i)'], {}), '(angle * i)\n', (4817, 4828), True, 'import numpy as np\n')] |
"""Unit test for analysis.py module"""
import datetime
import numpy as np
from matplotlib.dates import date2num
from floodsystem.analysis import polyfit, forecast
def test_polyfit():
dates = [datetime.datetime(2016, 12, 30), datetime.datetime(2016, 12, 31), datetime.datetime(2017, 1, 1),
datetime.datetime(2017, 1, 2), datetime.datetime(2017, 1, 3), datetime.datetime(2017, 1, 4),
datetime.datetime(2017, 1, 5)]
t = date2num(dates)
f = np.poly1d([1, -2, 10, 4])
# create simple polynomial and see if function gives the same polynomial
y = [f(n-t[0]) for n in t]
f, x0 = polyfit(dates, y, 3)
assert round(f.coefficients[0]) == 1
assert round(f.coefficients[1]) == -2
assert round(f.coefficients[2]) == 10
assert round(f.coefficients[3]) == 4
assert x0 == t[0]
def test_forecast():
f = np.poly1d([1, -2, 10, 4])
now = date2num(datetime.datetime.now())
change = forecast(f, now)
# gradient at x=0 should be 10, so change over 0.5 days will bbe 5
assert round(change) == 5
| [
"numpy.poly1d",
"floodsystem.analysis.forecast",
"datetime.datetime",
"floodsystem.analysis.polyfit",
"matplotlib.dates.date2num",
"datetime.datetime.now"
] | [((439, 454), 'matplotlib.dates.date2num', 'date2num', (['dates'], {}), '(dates)\n', (447, 454), False, 'from matplotlib.dates import date2num\n'), ((464, 489), 'numpy.poly1d', 'np.poly1d', (['[1, -2, 10, 4]'], {}), '([1, -2, 10, 4])\n', (473, 489), True, 'import numpy as np\n'), ((611, 631), 'floodsystem.analysis.polyfit', 'polyfit', (['dates', 'y', '(3)'], {}), '(dates, y, 3)\n', (618, 631), False, 'from floodsystem.analysis import polyfit, forecast\n'), ((853, 878), 'numpy.poly1d', 'np.poly1d', (['[1, -2, 10, 4]'], {}), '([1, -2, 10, 4])\n', (862, 878), True, 'import numpy as np\n'), ((937, 953), 'floodsystem.analysis.forecast', 'forecast', (['f', 'now'], {}), '(f, now)\n', (945, 953), False, 'from floodsystem.analysis import polyfit, forecast\n'), ((199, 230), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(12)', '(30)'], {}), '(2016, 12, 30)\n', (216, 230), False, 'import datetime\n'), ((232, 263), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(12)', '(31)'], {}), '(2016, 12, 31)\n', (249, 263), False, 'import datetime\n'), ((265, 294), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (282, 294), False, 'import datetime\n'), ((301, 330), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(2)'], {}), '(2017, 1, 2)\n', (318, 330), False, 'import datetime\n'), ((332, 361), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(3)'], {}), '(2017, 1, 3)\n', (349, 361), False, 'import datetime\n'), ((363, 392), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(4)'], {}), '(2017, 1, 4)\n', (380, 392), False, 'import datetime\n'), ((399, 428), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(1)', '(5)'], {}), '(2017, 1, 5)\n', (416, 428), False, 'import datetime\n'), ((898, 921), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (919, 921), False, 'import datetime\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .spectral_normalization import SpectralNorm
from torch.autograd import Variable
from torchvision import models
import sys
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False, only_last = False, final_feat_size=8):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.only_last = only_last
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
if final_feat_size <=64 or type(vgg_pretrained_features[x]) is not nn.MaxPool2d:
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
if final_feat_size <=32 or type(vgg_pretrained_features[x]) is not nn.MaxPool2d:
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
if final_feat_size <=16 or type(vgg_pretrained_features[x]) is not nn.MaxPool2d:
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
if final_feat_size <=8 or type(vgg_pretrained_features[x]) is not nn.MaxPool2d:
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
if self.only_last:
return h_relu5
else:
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class AdaptiveScaleTconv(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dim_out, scale=2, use_deform=True, n_filters=1):
super(AdaptiveScaleTconv, self).__init__()
if int(torch.__version__.split('.')[1])<4:
self.upsampLayer = nn.Upsample(scale_factor=scale, mode='bilinear')
else:
self.upsampLayer = nn.Upsample(scale_factor=scale, mode='bilinear', align_corners=False)
if n_filters > 1:
self.convFilter = nn.Sequential(*[nn.Conv2d(dim_in if i==0 else dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False) for i in xrange(n_filters)])
else:
self.convFilter = nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False)
self.use_deform = use_deform
if use_deform:
self.coordfilter = nn.Conv2d(dim_in, 2, kernel_size=3, stride=1, padding=1, dilation=1, bias=False)
self.coordfilter.weight.data.zero_()
#Identity transform used to create a regular grid!
def forward(self, x, extra_inp=None):
# First upsample the input with transposed/ upsampling
# Compute the warp co-ordinates using a conv
# Warp the conv
up_out = self.upsampLayer(x)
filt_out = self.convFilter(up_out if extra_inp is None else torch.cat([up_out,extra_inp], dim=1))
if self.use_deform:
cord_offset = self.coordfilter(up_out)
reg_grid = Variable(torch.FloatTensor(np.stack(np.meshgrid(np.linspace(-1,1, up_out.size(2)), np.linspace(-1,1, up_out.size(3))))).cuda(),requires_grad=False)
deform_grid = reg_grid.detach() + F.tanh(cord_offset)
deformed_out = F.grid_sample(filt_out, deform_grid.transpose(1,3).transpose(1,2), mode='bilinear', padding_mode='zeros')
feat_out = (deform_grid, reg_grid, cord_offset)
else:
deformed_out = filt_out
feat_out = []
#Deformed out
return deformed_out, feat_out
class ResidualBlock(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dilation=1, padtype = 'zero'):
super(ResidualBlock, self).__init__()
pad = dilation
layers = []
if padtype== 'reflection':
layers.append(nn.ReflectionPad2d(pad)); pad=0
elif padtype == 'replication':
layers.append(nn.ReplicationPad2d(p)); pad=0
layers.extend([ nn.Conv2d(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=dilation, bias=False),
nn.InstanceNorm2d(dim_in, affine=True),
nn.LeakyReLU(0.1,inplace=True)])
pad = dilation
if padtype== 'reflection':
layers.append(nn.ReflectionPad2d(pad)); pad=0
elif padtype == 'replication':
layers.append(nn.ReplicationPad2d(p)); pad=0
layers.extend([
nn.Conv2d(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=dilation, bias=False),
nn.InstanceNorm2d(dim_in, affine=True),
nn.LeakyReLU(0.1,inplace=True)
])
self.main = nn.Sequential(*layers)
def forward(self, x):
return x + self.main(x)
class ResidualBlockBnorm(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dilation=1, padtype = 'zero'):
super(ResidualBlockBnorm, self).__init__()
pad = dilation
layers = []
if padtype == 'reflection':
layers.append(nn.ReflectionPad2d(pad)); pad=0
elif padtype == 'replication':
layers.append(nn.ReplicationPad2d(p)); pad=0
layers.extend([ nn.Conv2d(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=dilation, bias=False),
nn.BatchNorm2d(dim_in, affine=True),
nn.LeakyReLU(0.1,inplace=True)])
pad = dilation
if padtype== 'reflection':
layers.append(nn.ReflectionPad2d(pad)); pad=0
elif padtype == 'replication':
layers.append(nn.ReplicationPad2d(p)); pad=0
layers.extend([
nn.Conv2d(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=dilation, bias=False),
nn.BatchNorm2d(dim_in, affine=True),
nn.LeakyReLU(0.1,inplace=True)
])
self.main = nn.Sequential(*layers)
def forward(self, x):
return x + self.main(x)
class ResidualBlockNoNorm(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dilation=1, padtype = 'zero'):
super(ResidualBlockNoNorm, self).__init__()
pad = dilation
layers = []
if padtype == 'reflection':
layers.append(nn.ReflectionPad2d(pad)); pad=0
elif padtype == 'replication':
layers.append(nn.ReplicationPad2d(p)); pad=0
layers.extend([ nn.Conv2d(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=dilation, bias=False),
nn.LeakyReLU(0.1,inplace=True)])
pad = dilation
if padtype== 'reflection':
layers.append(nn.ReflectionPad2d(pad)); pad=0
elif padtype == 'replication':
layers.append(nn.ReplicationPad2d(p)); pad=0
layers.extend([
nn.Conv2d(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=dilation, bias=False),
nn.LeakyReLU(0.1,inplace=True)
])
self.main = nn.Sequential(*layers)
def forward(self, x):
return x + self.main(x)
class Generator(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, g_smooth_layers=0, binary_mask=0):
super(Generator, self).__init__()
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim))
# Up-Sampling
for i in range(2):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
def forward(self, x, c):
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
return self.main(x)
class GeneratorDiff(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, g_smooth_layers=0, binary_mask=0):
super(GeneratorDiff, self).__init__()
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim))
# Up-Sampling
for i in range(2):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
# Remove this non-linearity or use 2.0*tanh ?
layers.append(nn.Tanh())
self.hardtanh = nn.Hardtanh(min_val=-1, max_val=1)
self.main = nn.Sequential(*layers)
def forward(self, x, c, out_diff = False):
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
xcat = torch.cat([x, c], dim=1)
net_out = self.main(xcat)
if out_diff:
return (x+2.0*net_out), net_out
else:
return (x+2.0*net_out)
class GeneratorDiffWithInp(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=3, g_smooth_layers=0, binary_mask=0):
super(GeneratorDiffWithInp, self).__init__()
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim))
# Up-Sampling
self.up_sampling_convlayers = nn.ModuleList()
self.up_sampling_inorm= nn.ModuleList()
self.up_sampling_ReLU= nn.ModuleList()
for i in range(2):
self.up_sampling_convlayers.append(nn.ConvTranspose2d(curr_dim+3, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
self.up_sampling_inorm.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
self.up_sampling_ReLU.append(nn.ReLU(inplace=False))
curr_dim = curr_dim // 2
self.final_Layer = nn.Conv2d(curr_dim+3, 3, kernel_size=7, stride=1, padding=3, bias=False)
#layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
# Remove this non-linearity or use 2.0*tanh ?
self.finalNonLin = nn.Tanh()
self.hardtanh = nn.Hardtanh(min_val=-1, max_val=1)
self.main = nn.Sequential(*layers)
def forward(self, x, c, out_diff = False):
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
xcat = torch.cat([x, c], dim=1)
bottle_out = self.main(xcat)
curr_downscale = 4
up_inp = [torch.cat([bottle_out,nn.functional.avg_pool2d(x,curr_downscale)], dim=1)]
curr_downscale = curr_downscale//2
up_out = []
for i in range(len(self.up_sampling_convlayers)):
#self.up_sampling_convlayers(x
up_out.append(self.up_sampling_ReLU[i](self.up_sampling_inorm[i](self.up_sampling_convlayers[i](up_inp[i]))))
up_inp.append(torch.cat([up_out[i],nn.functional.avg_pool2d(x,curr_downscale)], dim=1))
curr_downscale = curr_downscale//2
net_out = self.finalNonLin(self.final_Layer(up_inp[-1]))
if out_diff:
return (x+2.0*net_out), net_out
else:
return (x+2.0*net_out)
class GeneratorDiffAndMask(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=3, g_smooth_layers=0, binary_mask=0):
super(GeneratorDiffAndMask, self).__init__()
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim))
# Up-Sampling Differential layers
self.up_sampling_convlayers = nn.ModuleList()
self.up_sampling_inorm= nn.ModuleList()
self.up_sampling_ReLU= nn.ModuleList()
# Up-Sampling Mask layers
self.up_sampling_convlayers_mask = nn.ModuleList()
self.up_sampling_inorm_mask= nn.ModuleList()
self.up_sampling_ReLU_mask = nn.ModuleList()
for i in range(2):
self.up_sampling_convlayers.append(nn.ConvTranspose2d(curr_dim+3, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
self.up_sampling_inorm.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
self.up_sampling_ReLU.append(nn.ReLU(inplace=False))
## Add the mask path
self.up_sampling_convlayers_mask.append(nn.ConvTranspose2d(curr_dim+3, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
self.up_sampling_inorm_mask.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
self.up_sampling_ReLU_mask.append(nn.ReLU(inplace=False))
curr_dim = curr_dim // 2
self.final_Layer = nn.Conv2d(curr_dim+3, 3, kernel_size=7, stride=1, padding=3, bias=False)
#layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
# Remove this non-linearity or use 2.0*tanh ?
self.finalNonLin = nn.Tanh()
self.final_Layer_mask = nn.Conv2d(curr_dim+3, 1, kernel_size=7, stride=1, padding=3, bias=False)
self.finalNonLin_mask = nn.Sigmoid()
self.hardtanh = nn.Hardtanh(min_val=-1, max_val=1)
self.main = nn.Sequential(*layers)
def forward(self, x, c, out_diff = False):
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
xcat = torch.cat([x, c], dim=1)
bottle_out = self.main(xcat)
curr_downscale = 4
up_inp = [torch.cat([bottle_out,nn.functional.avg_pool2d(x,curr_downscale)], dim=1)]
up_inp_mask = [None]
up_inp_mask[0] = up_inp[0]
curr_downscale = curr_downscale//2
up_out = []
up_out_mask = []
for i in range(len(self.up_sampling_convlayers)):
#self.up_sampling_convlayers(x
up_out.append(self.up_sampling_ReLU[i](self.up_sampling_inorm[i](self.up_sampling_convlayers[i](up_inp[i]))))
up_inp.append(torch.cat([up_out[i],nn.functional.avg_pool2d(x,curr_downscale)], dim=1))
# Compute the maks output
up_out_mask.append(self.up_sampling_ReLU_mask[i](self.up_sampling_inorm_mask[i](self.up_sampling_convlayers_mask[i](up_inp_mask[i]))))
up_inp_mask.append(torch.cat([up_out_mask[i],nn.functional.avg_pool2d(x,curr_downscale)], dim=1))
curr_downscale = curr_downscale//2
net_out = self.finalNonLin(self.final_Layer(up_inp[-1]))
mask = self.finalNonLin_mask(self.final_Layer_mask(up_inp_mask[-1]))
if out_diff:
return ((1-mask)*x+mask*net_out), (net_out, mask)
else:
return ((1-mask)*x+mask*net_out)
class GeneratorDiffAndMask_V2(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=3, g_smooth_layers=0, binary_mask=0):
super(GeneratorDiffAndMask_V2, self).__init__()
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim))
# Up-Sampling Differential layers
self.up_sampling_convlayers = nn.ModuleList()
self.up_sampling_inorm= nn.ModuleList()
self.up_sampling_ReLU= nn.ModuleList()
for i in range(2):
self.up_sampling_convlayers.append(nn.ConvTranspose2d(curr_dim+3, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
self.up_sampling_inorm.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
self.up_sampling_ReLU.append(nn.ReLU(inplace=False))
curr_dim = curr_dim // 2
self.final_Layer = nn.Conv2d(curr_dim+3, 3, kernel_size=7, stride=1, padding=3, bias=False)
#layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
# Remove this non-linearity or use 2.0*tanh ?
self.finalNonLin = nn.Tanh()
self.final_Layer_mask = nn.Conv2d(curr_dim+3, 1, kernel_size=7, stride=1, padding=3, bias=False)
self.finalNonLin_mask = nn.Sigmoid()
self.g_smooth_layers = g_smooth_layers
if g_smooth_layers > 0:
smooth_layers = []
for i in range(g_smooth_layers):
smooth_layers.append(nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1, bias=False))
smooth_layers.append(nn.Tanh())
self.smooth_layers= nn.Sequential(*smooth_layers)
self.hardtanh = nn.Hardtanh(min_val=-1, max_val=1)
self.binary_mask = binary_mask
self.main = nn.Sequential(*layers)
def forward(self, x, c, out_diff = False):
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
xcat = torch.cat([x, c], dim=1)
bottle_out = self.main(xcat)
curr_downscale = 4
up_inp = [torch.cat([bottle_out,nn.functional.avg_pool2d(x,curr_downscale)], dim=1)]
curr_downscale = curr_downscale//2
up_out = []
up_out_mask = []
for i in range(len(self.up_sampling_convlayers)):
#self.up_sampling_convlayers(x
up_out.append(self.up_sampling_ReLU[i](self.up_sampling_inorm[i](self.up_sampling_convlayers[i](up_inp[i]))))
up_inp.append(torch.cat([up_out[i],nn.functional.avg_pool2d(x,curr_downscale)], dim=1))
curr_downscale = curr_downscale//2
net_out = self.finalNonLin(self.final_Layer(up_inp[-1]))
mask = self.finalNonLin_mask(2.0*self.final_Layer_mask(up_inp[-1]))
if self.binary_mask:
mask = ((mask>0.5).float()- mask).detach() + mask
masked_image = ((1-mask)*x+(mask)*(2.0*net_out))
if self.g_smooth_layers > 0:
out_image = self.smooth_layers(masked_image)
else:
out_image = masked_image
if out_diff:
return out_image, (net_out, mask)
else:
return out_image
def get_conv_inorm_relu_block(i, o, k, s, p, slope=0.1, padtype='zero', dilation=1):
layers = []
if padtype == 'reflection':
layers.append(nn.ReflectionPad2d(p)); p=0
elif padtype == 'replication':
layers.append(nn.ReplicationPad2d(p)); p=0
layers.append(nn.Conv2d(i, o, kernel_size=k, stride=s, padding=p, dilation=dilation, bias=False))
layers.append(nn.InstanceNorm2d(o, affine=True))
layers.append(nn.LeakyReLU(slope,inplace=True))
return layers
class GeneratorOnlyMask(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=5, g_smooth_layers=0, binary_mask=0):
super(GeneratorOnlyMask, self).__init__()
layers = []
layers.extend(get_conv_inorm_relu_block(3+c_dim, conv_dim, 7, 1, 3, padtype='zero'))
# Down-Sampling
curr_dim = conv_dim
for i in range(3):
layers.extend(get_conv_inorm_relu_block(curr_dim, curr_dim*2, 4, 2, 1, padtype='zero'))
curr_dim = curr_dim * 2
dilation=1
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dilation=dilation, padtype='zero'))
if i> 1:
# This gives dilation as 1, 1, 2, 4, 8, 16
dilation=dilation*2
# Up-Sampling Differential layers
self.up_sampling_convlayers = nn.ModuleList()
self.up_sampling_inorm= nn.ModuleList()
self.up_sampling_ReLU= nn.ModuleList()
for i in range(3):
self.up_sampling_convlayers.append(nn.ConvTranspose2d(curr_dim+3, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
self.up_sampling_inorm.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
self.up_sampling_ReLU.append(nn.ReLU(inplace=False))
curr_dim = curr_dim // 2
self.final_Layer_mask = nn.Conv2d(curr_dim+3, 1, kernel_size=7, stride=1, padding=3, bias=False)
self.finalNonLin_mask = nn.Sigmoid()
self.g_smooth_layers = g_smooth_layers
if g_smooth_layers > 0:
smooth_layers = []
for i in range(g_smooth_layers):
smooth_layers.append(nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1, bias=False))
smooth_layers.append(nn.Tanh())
self.smooth_layers= nn.Sequential(*smooth_layers)
self.binary_mask = binary_mask
self.main = nn.Sequential(*layers)
def forward(self, x, c, out_diff = False):
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
xcat = torch.cat([x, c], dim=1)
bottle_out = self.main(xcat)
curr_downscale = 8
up_inp = [torch.cat([bottle_out,nn.functional.avg_pool2d(x,curr_downscale)], dim=1)]
curr_downscale = curr_downscale//2
up_out = []
up_out_mask = []
for i in range(len(self.up_sampling_convlayers)):
#self.up_sampling_convlayers(x
up_out.append(self.up_sampling_ReLU[i](self.up_sampling_inorm[i](self.up_sampling_convlayers[i](up_inp[i]))))
up_inp.append(torch.cat([up_out[i],nn.functional.avg_pool2d(x,curr_downscale)], dim=1))
curr_downscale = curr_downscale//2
mask = self.finalNonLin_mask(2.0*self.final_Layer_mask(up_inp[-1]))
if self.binary_mask:
mask = ((mask>0.5).float()- mask).detach() + mask
masked_image = (1-mask)*x #+(mask)*(2.0*net_out))
out_image = masked_image
if out_diff:
return out_image, mask
else:
return out_image
class GeneratorMaskAndFeat(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=5, g_smooth_layers=0, binary_mask=0, out_feat_dim=256, up_sampling_type='bilinear',
n_upsamp_filt=2, mask_size = 0, additional_cond='image', per_classMask=0, noInpLabel=0, mask_normalize = False, nc=3,
use_bias = False, use_bnorm = 0, cond_inp_pnet=0, cond_parallel_track = 0):
super(GeneratorMaskAndFeat, self).__init__()
self.lowres_mask = int(mask_size <= 32)
self.per_classMask = per_classMask
self.additional_cond = additional_cond
self.noInpLabel = noInpLabel
self.mask_normalize = mask_normalize
layers = []
# Image is 128 x 128
layers.extend(get_conv_inorm_relu_block(nc if noInpLabel else nc+c_dim, conv_dim, 7, 1, 3, padtype='zero'))
# Down-Sampling
curr_dim = conv_dim
extra_dim = 3 if self.additional_cond == 'image' else c_dim if self.additional_cond == 'label'else 0
#-------------------------------------------
# After downsampling spatial dim is 16 x 16
# Feat dim is 512
#-------------------------------------------
for i in range(3 - self.lowres_mask):
layers.extend(get_conv_inorm_relu_block(curr_dim, curr_dim*2, 4, 2, 1, padtype='zero'))
curr_dim = curr_dim * 2
dilation=1
#-------------------------------------------
# After residual spatial dim is 16 x 16
# Feat dim is 512
#-------------------------------------------
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dilation=dilation, padtype='zero'))
if i> 1:
# This gives dilation as 1, 1, 2, 4, 8, 16
dilation=dilation*2
# Up-Sampling Differential layers
self.up_sampling_convlayers = nn.ModuleList()
if self.lowres_mask == 0:
self.up_sampling_inorm= nn.ModuleList()
self.up_sampling_ReLU= nn.ModuleList()
self.out_feat_dim = out_feat_dim
if out_feat_dim > 0:
featGenLayers = []
#-------------------------------------------
# After featGen layers spatial dim is 1 x 1
# Feat dim is 512
#-------------------------------------------
for i in xrange(3):
featGenLayers.extend(get_conv_inorm_relu_block(curr_dim, curr_dim, 3, 1, 1, padtype='zero'))
featGenLayers.append(nn.MaxPool2d(2) if i<2 else nn.MaxPool2d(4))
self.featGenConv = nn.Sequential(*featGenLayers)
self.featGenLin = nn.Linear(curr_dim, out_feat_dim)
for i in range(3-self.lowres_mask):
if self.lowres_mask == 0:
if up_sampling_type== 't_conv':
self.up_sampling_convlayers.append(nn.ConvTranspose2d(curr_dim+extra_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=use_bias))
elif up_sampling_type == 'nearest':
self.up_sampling_convlayers.append(nn.Upsample(scale_factor=2, mode='nearest'))
self.up_sampling_convlayers.append(nn.Conv2d(curr_dim+extra_dim, curr_dim//2, kernel_size=3, stride=1, padding=1, bias=use_bias))
elif up_sampling_type == 'deform':
self.up_sampling_convlayers.append(AdaptiveScaleTconv(curr_dim+extra_dim, curr_dim//2, scale=2, n_filters=n_upsamp_filt))
elif up_sampling_type == 'bilinear':
self.up_sampling_convlayers.append(AdaptiveScaleTconv(curr_dim+extra_dim, curr_dim//2, scale=2, use_deform=False, n_filters=n_upsamp_filt))
self.up_sampling_inorm.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
self.up_sampling_ReLU.append(nn.ReLU(inplace=False))
else:
# In this case just use more residual blocks to drop dimensions
self.up_sampling_convlayers.append(nn.Sequential(*get_conv_inorm_relu_block(curr_dim+extra_dim, curr_dim//2, 3, 1, 1, padtype='zero')))
curr_dim = curr_dim // 2
self.final_Layer_mask = nn.Conv2d(curr_dim+extra_dim, c_dim+1 if per_classMask else 1, kernel_size=7, stride=1, padding=3, bias=True if mask_normalize else use_bias)
self.finalNonLin_mask = nn.Sigmoid()
self.g_smooth_layers = g_smooth_layers
if g_smooth_layers > 0:
smooth_layers = []
for i in range(g_smooth_layers):
smooth_layers.append(nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1, bias=False))
smooth_layers.append(nn.Tanh())
self.smooth_layers= nn.Sequential(*smooth_layers)
self.binary_mask = binary_mask
self.main = nn.Sequential(*layers)
def prepInp(self, feat, x, c, curr_scale):
if self.additional_cond == 'image':
up_inp = torch.cat([feat,nn.functional.avg_pool2d(x,curr_scale)], dim=1)
elif self.additional_cond == 'label':
up_inp = torch.cat([feat,nn.functional.avg_pool2d(c,curr_scale)], dim=1)
else:
up_inp = feat
return up_inp
def forward(self, x, c, out_diff = False, binary_mask=False, mask_threshold = 0.3):
# replicate spatially and concatenate domain information
bsz = x.size(0)
if self.per_classMask:
maxC,cIdx = c.max(dim=1)
cIdx[maxC==0] = c.size(1) + 1 if self.mask_normalize else c.size(1)
if self.noInpLabel:
xcat = x
else:
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
xcat = torch.cat([x, c], dim=1)
bottle_out = self.main(xcat)
curr_downscale = 8 if self.lowres_mask == 0 else 4
up_inp = [self.prepInp(bottle_out, x, c, curr_downscale)]
curr_downscale = curr_downscale//2 if self.lowres_mask == 0 else curr_downscale
up_out = []
for i in range(len(self.up_sampling_convlayers)):
#self.up_sampling_convlayers(x
if type(self.up_sampling_convlayers[i]) == AdaptiveScaleTconv:
upsampout,_ = self.up_sampling_convlayers[i](up_inp[i])
else:
upsampout = self.up_sampling_convlayers[i](up_inp[i])
up_out.append(self.up_sampling_ReLU[i](self.up_sampling_inorm[i](upsampout)) if self.lowres_mask == 0 else upsampout)
up_inp.append(self.prepInp(up_out[i], x, c, curr_downscale))
curr_downscale = curr_downscale//2 if self.lowres_mask == 0 else curr_downscale
allmasks = self.final_Layer_mask(up_inp[-1])
if self.mask_normalize:
allmasks = torch.cat([F.softmax(allmasks, dim=1), torch.zeros_like(allmasks[:,0:1,::]).detach()], dim=1)
chosenMask = allmasks if (self.per_classMask==0) else allmasks[torch.arange(cIdx.size(0)).long().cuda(),cIdx,::].view(bsz,1,allmasks.size(2), allmasks.size(3))
if not self.mask_normalize:
mask = self.finalNonLin_mask(2.0*chosenMask)
else:
mask = chosenMask
if self.out_feat_dim > 0:
out_feat = self.featGenLin(self.featGenConv(bottle_out).view(bsz,-1))
else:
out_feat = None
if self.binary_mask or binary_mask:
if self.mask_normalize:
maxV,_ = allmasks.max(dim=1)
mask = (torch.ge(mask, maxV.view(mask.size())).float()- mask).detach() + mask
else:
mask = ((mask>=mask_threshold).float()- mask).detach() + mask
#masked_image = (1-mask)*x #+(mask)*(2.0*net_out))
#out_image = masked_image
return None, mask, out_feat, allmasks
class GeneratorMaskAndFeat_ImNetBackbone(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=5, g_smooth_layers=0, binary_mask=0, out_feat_dim=256, up_sampling_type='bilinear',
n_upsamp_filt=2, mask_size = 0, additional_cond='image', per_classMask=0, noInpLabel=0, mask_normalize = False, nc=3, use_bias = False,
net_type='vgg19', use_bnorm = 0, cond_inp_pnet=0):
super(GeneratorMaskAndFeat_ImNetBackbone, self).__init__()
self.pnet = Vgg19() if net_type == 'vgg19' else None
self.per_classMask = per_classMask
self.additional_cond = additional_cond
self.noInpLabel = noInpLabel
self.mask_normalize = mask_normalize
self.nc = nc
self.out_feat_dim = out_feat_dim
self.binary_mask = binary_mask
# Down-Sampling
curr_dim = conv_dim
extra_dim = 3 if self.additional_cond == 'image' else c_dim if self.additional_cond == 'label'else 0
layers = nn.ModuleList()
if nc > 3:
extra_dim = extra_dim + 1
self.appendGtInp = True
else:
self.appendGtInp = False
ResBlock = ResidualBlockBnorm if use_bnorm==1 else ResidualBlock if use_bnorm==2 else ResidualBlockNoNorm
#===========================================================
# Three blocks of layers:
# Feature absorb layer --> Residual block --> Upsampling
#===========================================================
# First block This takes input features of 512x8x8 dims
# Upsample to 16x16
layers.append(nn.Conv2d(512+extra_dim, 512, kernel_size=3, stride=1, padding=1))
layers.append(nn.LeakyReLU(0.1))
layers.append(ResBlock(dim_in=512,dilation=1, padtype='zero'))
layers.append(nn.Upsample(scale_factor=2, mode='nearest'))
#-----------------------------------------------------------
# Second Block - This takes input features of 512x16x16 from Layer 1 and 512x16x16 from VGG
# Upsample to 32x32
layers.append(nn.Conv2d(1024+extra_dim, 512, kernel_size=3, stride=1, padding=1))
layers.append(nn.LeakyReLU(0.1))
layers.append(ResBlock(dim_in=512,dilation=1, padtype='zero'))
layers.append(nn.Upsample(scale_factor=2, mode='nearest'))
#-----------------------------------------------------------
# Third layer
# This takes input features of 256x32x32 from Layer 1 and 256x32x32 from VGG
layers.append(nn.Conv2d(512+256+extra_dim, 512, kernel_size=3, stride=1, padding=1))
layers.append(nn.LeakyReLU(0.1))
layers.append(ResBlock(dim_in=512,dilation=1, padtype='zero'))
self.layers = layers
self.final_Layer_mask = nn.Conv2d(512+extra_dim, c_dim+1 if per_classMask else 1, kernel_size=7, stride=1, padding=3, bias=True if mask_normalize else use_bias)
self.finalNonLin_mask = nn.Sigmoid()
self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1), requires_grad=False).cuda()
self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1), requires_grad=False).cuda()
def prepInp(self, feat, img, c, gtmask):
if self.additional_cond == 'image':
up_inp = torch.cat([feat,nn.functional.adaptive_avg_pool2d(img,feat.size(-1))], dim=1)
elif self.additional_cond == 'label':
up_inp = torch.cat([feat,c.expand(c.size(0), c.size(1), feat.size(2), feat.size(3))], dim=1)
else:
up_inp = feat
if self.appendGtInp:
up_inp = torch.cat([up_inp, nn.functional.adaptive_max_pool2d(gtmask,up_inp.size(-1))], dim=1)
return up_inp
def forward(self, x, c, out_diff = False, binary_mask=False, mask_threshold = 0.3):
# replicate spatially and concatenate domain information
bsz = x.size(0)
img = x[:,:3,::]
gtmask = x[:,3:,::] if self.appendGtInp else None
if self.per_classMask:
maxC,cIdx = c.max(dim=1)
cIdx[maxC==0] = c.size(1) + 1 if self.mask_normalize else c.size(1)
c = c.unsqueeze(2).unsqueeze(3)
img = (img - self.shift.expand_as(img))/self.scale.expand_as(img)
vgg_out = self.pnet(img)
up_inp = [self.prepInp(vgg_out[-1], img, c, gtmask)]
for i in range(len(self.layers)):
#self.up_sampling_convlayers(img
upsampout = self.layers[i](up_inp[-1])
up_inp.append(upsampout)
if i%4 == 3:
up_inp.append(self.prepInp(torch.cat([up_inp[-1],vgg_out[-1-(i+1)//4]],dim=1), img, c, gtmask))
up_inp.append(self.prepInp(up_inp[-1], img, c, gtmask))
allmasks = self.final_Layer_mask(up_inp[-1])
if self.mask_normalize:
allmasks = torch.cat([F.softmax(allmasks, dim=1), torch.zeros_like(allmasks[:,0:1,::]).detach()], dim=1)
chosenMask = allmasks if (self.per_classMask==0) else allmasks[torch.arange(cIdx.size(0)).long().cuda(),cIdx,::].view(bsz,1,allmasks.size(2), allmasks.size(3))
if not self.mask_normalize:
mask = self.finalNonLin_mask(2.0*chosenMask)
else:
mask = chosenMask
if self.out_feat_dim > 0:
out_feat = self.featGenLin(self.featGenConv(bottle_out).view(bsz,-1))
else:
out_feat = None
if self.binary_mask or binary_mask:
if self.mask_normalize:
maxV,_ = allmasks.max(dim=1)
mask = (torch.ge(mask, maxV.view(mask.size())).float()- mask).detach() + mask
else:
mask = ((mask>=mask_threshold).float()- mask).detach() + mask
#masked_image = (1-mask)*x #+(mask)*(2.0*net_out))
#out_image = masked_image
return None, mask, out_feat, allmasks
class GeneratorMaskAndFeat_ImNetBackbone_V2(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=5, g_smooth_layers=0, binary_mask=0, out_feat_dim=256, up_sampling_type='bilinear',
n_upsamp_filt=2, mask_size = 0, additional_cond='image', per_classMask=0, noInpLabel=0, mask_normalize = False, nc=3, use_bias = False,
net_type='vgg19', use_bnorm = 0, cond_inp_pnet=0, cond_parallel_track= 0):
super(GeneratorMaskAndFeat_ImNetBackbone_V2, self).__init__()
self.pnet = Vgg19(final_feat_size=mask_size) if net_type == 'vgg19' else None
self.mask_size = mask_size
self.per_classMask = per_classMask
self.additional_cond = additional_cond
self.noInpLabel = noInpLabel
self.mask_normalize = mask_normalize
self.nc = nc
self.out_feat_dim = out_feat_dim
self.cond_inp_pnet = cond_inp_pnet
self.cond_parallel_track = cond_parallel_track
# Down-Sampling
curr_dim = conv_dim
extra_dim = 3 if self.additional_cond == 'image' else c_dim if self.additional_cond == 'label'else 0
layers = nn.ModuleList()
if nc > 3:
extra_dim = extra_dim# + 1
self.appendGtInp = False #True
else:
self.appendGtInp = False
ResBlock = ResidualBlockBnorm if use_bnorm==1 else ResidualBlock if use_bnorm==2 else ResidualBlockNoNorm
#===========================================================
# Three blocks of layers:
# Feature absorb layer --> Residual block --> Upsampling
#===========================================================
# First block This takes input features of 512x32x32 dims
# Upsample to 16x16
start_dim = 512
gt_cond_dim = 0 if cond_inp_pnet else int(nc>3)*self.cond_parallel_track if self.cond_parallel_track else int(nc>3)
if self.cond_parallel_track:
cond_parallel_layers = [] #nn.ModuleList()
cond_parallel_layers.append(nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1))
cond_parallel_layers.append(nn.LeakyReLU(0.1, inplace=True))
cond_parallel_layers.append(nn.Conv2d(64, self.cond_parallel_track, kernel_size=3, stride=1, padding=1))
cond_parallel_layers.append(nn.LeakyReLU(0.1, inplace=True))
self.cond_parallel_layers = nn.Sequential(*cond_parallel_layers)
layers.append(nn.Conv2d(512+extra_dim + gt_cond_dim, start_dim, kernel_size=3, stride=1, padding=1))
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(ResBlock(dim_in=start_dim,dilation=1, padtype='zero'))
#layers.append(nn.Upsample(scale_factor=2, mode='nearest'))
#-----------------------------------------------------------
# Second Block - This takes input features of 512x16x16 from Layer 1 and 512x16x16 from VGG
# Upsample to 32x32
layers.append(nn.Conv2d(start_dim+extra_dim, start_dim//2, kernel_size=3, stride=1, padding=1))
start_dim = start_dim // 2
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(ResBlock(dim_in=start_dim,dilation=1, padtype='zero'))
#-----------------------------------------------------------
# Third layer
# This takes input features of 256x32x32 from Layer 1 and 256x32x32 from VGG
layers.append(nn.Conv2d(start_dim+extra_dim, start_dim//2, kernel_size=3, stride=1, padding=1))
start_dim = start_dim // 2
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(ResBlock(dim_in=start_dim,dilation=1, padtype='zero'))
self.layers = layers
self.final_Layer_mask = nn.Conv2d(start_dim+extra_dim, c_dim+1 if per_classMask else 1, kernel_size=7, stride=1, padding=3, bias=True if mask_normalize else use_bias)
self.finalNonLin_mask = nn.Sigmoid()
self.binary_mask = binary_mask
self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1), requires_grad=False).cuda()
self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1), requires_grad=False).cuda()
def prepInp(self, feat, img, c, gtmask):
if self.additional_cond == 'image':
up_inp = torch.cat([feat,nn.functional.adaptive_avg_pool2d(img,feat.size(-1))], dim=1)
elif self.additional_cond == 'label':
up_inp = torch.cat([feat,c.expand(c.size(0), c.size(1), feat.size(2), feat.size(3))], dim=1)
else:
up_inp = feat
if self.appendGtInp:
up_inp = torch.cat([up_inp, nn.functional.adaptive_max_pool2d(gtmask,up_inp.size(-1))], dim=1)
return up_inp
def forward(self, x, c, out_diff = False, binary_mask=False, mask_threshold = 0.3):
# replicate spatially and concatenate domain information
bsz = x.size(0)
gtmask = x[:,3:,::] if x.size(1) > 3 else None
img = x[:,:3,::]
img = (img - self.shift.expand_as(img))/self.scale.expand_as(img)
if self.cond_inp_pnet:
img = img*gtmask
if self.cond_parallel_track and gtmask is not None:
gtfeat = self.cond_parallel_layers(gtmask)
else:
gtfeat = gtmask
if self.per_classMask:
maxC,cIdx = c.max(dim=1)
cIdx[maxC==0] = c.size(1) + 1 if self.mask_normalize else c.size(1)
c = c.unsqueeze(2).unsqueeze(3)
vgg_out = self.pnet(img)
#up_inp = [self.prepInp(vgg_out[-1], img, c, gtmask)]
if (gtfeat is not None) and (not self.cond_inp_pnet):
up_inp = [torch.cat([self.prepInp(vgg_out[-1], img, c, gtfeat), nn.functional.adaptive_max_pool2d(gtfeat,vgg_out[-1].size(-1))],dim=1)]
else:
up_inp = [self.prepInp(vgg_out[-1], img, c, gtfeat)]
for i in range(len(self.layers)):
#self.up_sampling_convlayers(img
upsampout = self.layers[i](up_inp[-1])
up_inp.append(upsampout)
if i%3 == 2:
up_inp.append(self.prepInp(upsampout, img, c, gtfeat))
allmasks = self.final_Layer_mask(up_inp[-1])
if self.mask_normalize:
allmasks = torch.cat([F.softmax(allmasks, dim=1), torch.zeros_like(allmasks[:,0:1,::]).detach()], dim=1)
chosenMask = allmasks if (self.per_classMask==0) else allmasks[torch.arange(cIdx.size(0)).long().cuda(),cIdx,::].view(bsz,1,allmasks.size(2), allmasks.size(3))
if not self.mask_normalize:
mask = self.finalNonLin_mask(2.0*chosenMask)
else:
mask = chosenMask
if self.out_feat_dim > 0:
out_feat = self.featGenLin(self.featGenConv(bottle_out).view(bsz,-1))
else:
out_feat = None
if self.binary_mask or binary_mask:
if self.mask_normalize:
maxV,_ = allmasks.max(dim=1)
mask = (torch.ge(mask, maxV.view(mask.size())).float()- mask).detach() + mask
else:
mask = ((mask>=mask_threshold).float()- mask).detach() + mask
#masked_image = (1-mask)*x #+(mask)*(2.0*net_out))
#out_image = masked_image
return None, mask, out_feat, allmasks
class GeneratorBoxReconst(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, feat_dim=128, repeat_num=6, g_downsamp_layers=2, dil_start =0,
up_sampling_type='t_conv', padtype='zero', nc=3, n_upsamp_filt=1, gen_full_image=0):
super(GeneratorBoxReconst, self).__init__()
downsamp_layers = []
layers = []
downsamp_layers.extend(get_conv_inorm_relu_block(nc+1, conv_dim, 7, 1, 3, padtype=padtype))
self.g_downsamp_layers = g_downsamp_layers
self.gen_full_image = gen_full_image
# Down-Sampling
curr_dim = conv_dim
for i in range(g_downsamp_layers):
downsamp_layers.extend(get_conv_inorm_relu_block(curr_dim, curr_dim*2, 4, 2, 1, padtype=padtype))
curr_dim = curr_dim * 2
# Bottleneck
# Here- input the target features
dilation=1
if feat_dim > 0:
layers.extend(get_conv_inorm_relu_block(curr_dim+feat_dim, curr_dim, 3, 1, 1, padtype=padtype, dilation=dilation))
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dilation=dilation, padtype=padtype))
if i> dil_start:
# This gives dilation as 1, 1, 2, 4, 8, 16
dilation=dilation*2
# Up-Sampling
for i in range(g_downsamp_layers):
if up_sampling_type== 't_conv':
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
elif up_sampling_type == 'nearest':
layers.append(nn.Upsample(scale_factor=2, mode='nearest'))
layers.append(nn.Conv2d(curr_dim, curr_dim//2, kernel_size=3, stride=1, padding=1, bias=False))
elif up_sampling_type == 'deform':
layers.append(AdaptiveScaleTconv(curr_dim+(self.gen_full_image * curr_dim//2), curr_dim//2, scale=2, n_filters=n_upsamp_filt))
elif up_sampling_type == 'bilinear':
layers.append(AdaptiveScaleTconv(curr_dim+(self.gen_full_image * curr_dim//2), curr_dim//2, scale=2, use_deform=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
layers.append(nn.LeakyReLU(0.1,inplace=True))
curr_dim = curr_dim // 2
pad=3
if padtype=='reflection':
layers.append(nn.ReflectionPad2d(pad)); pad=0
layers.append(nn.Conv2d(curr_dim, nc, kernel_size=7, stride=1, padding=pad, bias=False))
# Remove this non-linearity or use 2.0*tanh ?
layers.append(nn.Tanh())
self.hardtanh = nn.Hardtanh(min_val=-1, max_val=1)
self.downsample = nn.Sequential(*downsamp_layers)
#self.generate = nn.Sequential(*layers)
self.generate = nn.ModuleList(layers)
def forward(self, x, feat, out_diff = False):
w, h = x.size(2), x.size(3)
# This is just to makes sure that when we pass it through the downsampler we don't lose some width and height
xI = F.pad(x,(0,(8-h%8)%8,0,(8 - w%8)%8),mode='replicate')
#print(xI.device, [p.device for p in self.parameters()][0])
if self.gen_full_image:
dowOut = [xI]
for i in xrange(self.g_downsamp_layers+1):
dowOut.append(self.downsample[3*i+2](self.downsample[3*i+1](self.downsample[3*i](dowOut[-1]))))
downsamp_out = dowOut[-1]
else:
downsamp_out = self.downsample(xI)
# replicate spatially and concatenate domain information
if feat is not None:
feat = feat.unsqueeze(2).unsqueeze(3)
feat = feat.expand(feat.size(0), feat.size(1), downsamp_out.size(2), downsamp_out.size(3))
genInp = torch.cat([downsamp_out, feat], dim=1)
else:
genInp = downsamp_out
#net_out = self.generate(genInp)
outs = [genInp]
feat_out = []
d_count = -2
for i,l in enumerate(self.generate):
if type(l) is not AdaptiveScaleTconv:
outs.append(l(outs[i]))
else:
deform_out, deform_params = l(outs[i], extra_inp = None if not self.gen_full_image else dowOut[d_count])
d_count = d_count-1
outs.append(deform_out)
feat_out.append(deform_params)
outImg = outs[-1][:,:,:w,:h]
if not out_diff:
return outImg
else:
return outImg, feat_out
class Discriminator(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6, init_stride=2, classify_branch=1, max_filters=None, nc=3, use_bnorm=False, d_kernel_size = 4, patch_size = 2, use_tv_inp = 0):
super(Discriminator, self).__init__()
layers = []
self.use_tv_inp = use_tv_inp
if self.use_tv_inp:
self.tvWeight = torch.zeros((1,3,3,3))
self.tvWeight[0,:,1,1] = -2.0
self.tvWeight[0,:,1,2] = 1.0; self.tvWeight[0,:,2,1] = 1.0;
self.tvWeight = Variable(self.tvWeight,requires_grad=False).cuda()
# Start training
self.nc=nc + use_tv_inp
# UGLY HACK
dkz = d_kernel_size if d_kernel_size > 1 else 4
if dkz == 3:
layers.append(nn.Conv2d(self.nc, conv_dim, kernel_size=3, stride=1, padding=1))
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(nn.Conv2d(conv_dim, conv_dim, kernel_size=dkz, stride=init_stride, padding=1))
else:
layers.append(nn.Conv2d(self.nc, conv_dim, kernel_size=dkz, stride=init_stride, padding=1))
if use_bnorm:
layers.append(nn.BatchNorm2d(conv_dim))
layers.append(nn.LeakyReLU(0.1, inplace=True))
curr_dim = conv_dim
assert(patch_size <= 64)
n_downSamp = int(np.log2(image_size// patch_size))
for i in range(1, repeat_num):
out_dim = curr_dim*2 if max_filters is None else min(curr_dim*2, max_filters)
stride = 1 if i >= n_downSamp else 2
layers.append(nn.Conv2d(curr_dim, out_dim, kernel_size=dkz, stride=stride, padding=1))
if use_bnorm:
layers.append(nn.BatchNorm2d(out_dim))
layers.append(nn.LeakyReLU(0.1, inplace=True))
curr_dim = out_dim
k_size = int(image_size / np.power(2, repeat_num)) + 2- init_stride
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.classify_branch = classify_branch
if classify_branch==1:
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=k_size, bias=False)
elif classify_branch == 2:
# This is the projection discriminator!
#self.embLayer = nn.utils.weight_norm(nn.Linear(c_dim, curr_dim, bias=False))
self.embLayer = nn.Linear(c_dim, curr_dim, bias=False)
def forward(self, x, label=None):
if self.use_tv_inp:
tvImg = torch.abs(F.conv2d(F.pad(x,(1,1,1,1),mode='replicate'),self.tvWeight))
x = torch.cat([x,tvImg],dim=1)
sz = x.size()
h = self.main(x)
out_real = self.conv1(h)
if self.classify_branch==1:
out_aux = self.conv2(h)
return out_real.view(sz[0],-1), out_aux.squeeze()
elif self.classify_branch==2:
lab_emb = self.embLayer(label)
out_aux = (lab_emb * (F.normalize(F.avg_pool2d(h,2).view(sz[0], -1), dim=1))).sum(dim=1)
return (F.avg_pool2d(out_real,2).view(sz[0]) + out_aux).view(-1,1), None
#return (F.avg_pool2d(out_real,2).squeeze()).view(-1,1)
else:
return out_real.squeeze(), None
class Discriminator_SN(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6, init_stride=2):
super(Discriminator_SN, self).__init__()
layers = []
layers.append(SpectralNorm(nn.Conv2d(3, conv_dim, kernel_size=4, stride=init_stride, padding=1)))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(SpectralNorm(nn.Conv2d(curr_dim,min(curr_dim * 2, 1024) , kernel_size=4, stride=2, padding=1)))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = min(curr_dim * 2, 1024)
k_size = int(image_size / np.power(2, repeat_num)) + 2- init_stride
self.main = nn.Sequential(*layers)
self.conv1 = SpectralNorm(nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False))
self.conv2 = SpectralNorm(nn.Conv2d(curr_dim, c_dim, kernel_size=k_size, bias=False))
def forward(self, x):
h = self.main(x)
out_real = self.conv1(h)
out_aux = self.conv2(h)
return out_real.squeeze(), out_aux.squeeze()
class DiscriminatorSmallPatch(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(Discriminator, self).__init__()
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=1, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = curr_dim * 2
k_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=k_size, bias=False)
def forward(self, x):
h = self.main(x)
out_real = self.conv1(h)
out_aux = self.conv2(h)
return out_real.squeeze(), out_aux.squeeze()
class DiscriminatorGAP(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=3, init_stride=1, max_filters=None, nc=3, use_bnorm=False):
super(DiscriminatorGAP, self).__init__()
layers = []
self.nc=nc
self.c_dim = c_dim
layers.append(nn.Conv2d(nc, conv_dim, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(conv_dim))
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(nn.Conv2d(conv_dim, conv_dim, kernel_size=3, stride=1, padding=1))
layers.append(nn.MaxPool2d(2))
if use_bnorm:
layers.append(nn.BatchNorm2d(conv_dim))
layers.append(nn.LeakyReLU(0.1, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num):
out_dim = curr_dim*2 if max_filters is None else min(curr_dim*2, max_filters)
layers.append(nn.Conv2d(curr_dim, out_dim, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(out_dim))
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(ResidualBlockBnorm(dim_in=out_dim, dilation=1, padtype='zero'))
if (i < 4):
# We want to have 8x8 resolution vefore GAP input
layers.append(nn.MaxPool2d(2))
curr_dim = out_dim
self.main = nn.Sequential(*layers)
self.globalPool = nn.AdaptiveAvgPool2d(1)
self.classifyFC = nn.Linear(curr_dim, c_dim, bias=False)
def forward(self, x, label=None):
bsz = x.size(0)
sz = x.size()
h = self.main(x)
out_aux = self.classifyFC(self.globalPool(h).view(bsz, -1))
return None, out_aux.view(bsz,self.c_dim)
class DiscriminatorGAP_ImageNet(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, c_dim = 5, net_type='vgg19', max_filters=None, global_pool='mean',use_bias=False, class_ftune = 0):
super(DiscriminatorGAP_ImageNet, self).__init__()
layers = []
nFilt = 512 if max_filters is None else max_filters
self.pnet = Vgg19(only_last=True) if net_type == 'vgg19' else None
if class_ftune > 0.:
pAll = list(self.pnet.named_parameters())
# Multiply by two for weight and bias
for pn in pAll[::-1][:2*class_ftune]:
pn[1].requires_grad = True
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(nn.Conv2d(512, nFilt, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(nFilt))
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(nn.Conv2d(nFilt, nFilt, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(nFilt))
layers.append(nn.LeakyReLU(0.1, inplace=True))
self.layers = nn.Sequential(*layers)
self.globalPool = nn.AdaptiveAvgPool2d(1) if global_pool == 'mean' else nn.AdaptiveMaxPool2d(1)
self.classifyFC = nn.Linear(nFilt, c_dim, bias=use_bias)
self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1), requires_grad=False).cuda()
self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1), requires_grad=False).cuda()
self.c_dim = c_dim
def forward(self, x, label=None, get_feat = False):
bsz = x.size(0)
sz = x.size()
x = (x - self.shift.expand_as(x))/self.scale.expand_as(x)
vOut = self.pnet(x)
h = self.layers(vOut)
out_aux = self.classifyFC(self.globalPool(h).view(bsz, -1))
if get_feat:
return None, out_aux.view(bsz,self.c_dim), h
else:
return None, out_aux.view(bsz,self.c_dim)
class DiscriminatorGAP_ImageNet_Weldon(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, c_dim = 5, net_type='vgg19', max_filters=None, global_pool='mean', topk=3, mink=3, use_bias=False):
super(DiscriminatorGAP_ImageNet_Weldon, self).__init__()
layers = []
self.topk = topk
self.mink = mink
nFilt = 512 if max_filters is None else max_filters
self.pnet = Vgg19(only_last=True) if net_type == 'vgg19' else None
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(nn.Conv2d(512, nFilt, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(nFilt))
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(nn.Conv2d(nFilt, nFilt, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(nFilt))
layers.append(nn.LeakyReLU(0.1, inplace=True))
self.layers = nn.Sequential(*layers)
#self.AggrConv = nn.conv2d(nFilt, c_dim, kernel_size=1, stride=1, bias=False)
self.classifyConv = nn.Conv2d(nFilt, c_dim, kernel_size=1, stride=1, bias=use_bias)
self.globalPool = nn.AdaptiveAvgPool2d(1) if global_pool == 'mean' else nn.AdaptiveMaxPool2d(1)
self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1), requires_grad=False).cuda()
self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1), requires_grad=False).cuda()
self.c_dim = c_dim
def forward(self, x, label=None, get_feat = False):
bsz = x.size(0)
sz = x.size()
x = (x - self.shift.expand_as(x))/self.scale.expand_as(x)
vOut = self.pnet(x)
h = self.layers(vOut)
classify_out = self.classifyConv(h)
if self.topk > 0:
topk_vals, topk_idx = classify_out.view(bsz,self.c_dim,-1).topk(self.topk)
out_aux = topk_vals.sum(dim=-1)
if self.mink > 0:
mink_vals, mink_idx = classify_out.view(bsz,self.c_dim,-1).topk(self.mink, largest=False)
out_aux = out_aux + mink_vals.sum(dim=-1)
else:
out_aux = self.globalPool(classify_out).view(bsz,-1)
if get_feat:
return None, out_aux.view(bsz,self.c_dim), h
else:
return None, out_aux.view(bsz,self.c_dim)
class DiscriminatorBBOX(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=64, conv_dim=64, c_dim=5, repeat_num=6):
super(DiscriminatorBBOX, self).__init__()
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = curr_dim * 2
k_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=k_size, bias=False)
def forward(self, x):
h = self.main(x)
out_aux = self.conv2(h)
return out_aux.squeeze()
class DiscriminatorGlobalLocal(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, bbox_size = 64, conv_dim=64, c_dim=5, repeat_num_global=6, repeat_num_local=5, nc=3):
super(DiscriminatorGlobalLocal, self).__init__()
maxFilt = 512 if image_size==128 else 128
globalLayers = []
globalLayers.append(nn.Conv2d(nc, conv_dim, kernel_size=4, stride=2, padding=1,bias=False))
globalLayers.append(nn.LeakyReLU(0.2, inplace=True))
localLayers = []
localLayers.append(nn.Conv2d(nc, conv_dim, kernel_size=4, stride=2, padding=1, bias=False))
localLayers.append(nn.LeakyReLU(0.2, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num_global):
globalLayers.append(nn.Conv2d(curr_dim, min(curr_dim*2,maxFilt), kernel_size=4, stride=2, padding=1, bias=False))
globalLayers.append(nn.LeakyReLU(0.2, inplace=True))
curr_dim = min(curr_dim * 2, maxFilt)
curr_dim = conv_dim
for i in range(1, repeat_num_local):
localLayers.append(nn.Conv2d(curr_dim, min(curr_dim * 2, maxFilt), kernel_size=4, stride=2, padding=1, bias=False))
localLayers.append(nn.LeakyReLU(0.2, inplace=True))
curr_dim = min(curr_dim * 2, maxFilt)
k_size_local = int(bbox_size/ np.power(2, repeat_num_local))
k_size_global = int(image_size/ np.power(2, repeat_num_global))
self.mainGlobal = nn.Sequential(*globalLayers)
self.mainLocal = nn.Sequential(*localLayers)
# FC 1 for doing real/fake
self.fc1 = nn.Linear(curr_dim*(k_size_local**2+k_size_global**2), 1, bias=False)
# FC 2 for doing classification only on local patch
if c_dim > 0:
self.fc2 = nn.Linear(curr_dim*(k_size_local**2), c_dim, bias=False)
else:
self.fc2 = None
def forward(self, x, boxImg, classify=False):
bsz = x.size(0)
h_global = self.mainGlobal(x)
h_local = self.mainLocal(boxImg)
h_append = torch.cat([h_global.view(bsz,-1), h_local.view(bsz,-1)], dim=-1)
out_rf = self.fc1(h_append)
out_cls = self.fc2(h_local.view(bsz,-1)) if classify and (self.fc2 is not None) else None
return out_rf.squeeze(), out_cls, h_append
class DiscriminatorGlobalLocal_SN(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, bbox_size = 64, conv_dim=64, c_dim=5, repeat_num_global=6, repeat_num_local=5, nc=3):
super(DiscriminatorGlobalLocal_SN, self).__init__()
maxFilt = 512 if image_size==128 else 128
globalLayers = []
globalLayers.append(SpectralNorm(nn.Conv2d(nc, conv_dim, kernel_size=4, stride=2, padding=1,bias=False)))
globalLayers.append(nn.LeakyReLU(0.2, inplace=True))
localLayers = []
localLayers.append(SpectralNorm(nn.Conv2d(nc, conv_dim, kernel_size=4, stride=2, padding=1, bias=False)))
localLayers.append(nn.LeakyReLU(0.2, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num_global):
globalLayers.append(SpectralNorm(nn.Conv2d(curr_dim, min(curr_dim*2,maxFilt), kernel_size=4, stride=2, padding=1, bias=False)))
globalLayers.append(nn.LeakyReLU(0.2, inplace=True))
curr_dim = min(curr_dim * 2, maxFilt)
curr_dim = conv_dim
for i in range(1, repeat_num_local):
localLayers.append(SpectralNorm(nn.Conv2d(curr_dim, min(curr_dim * 2, maxFilt), kernel_size=4, stride=2, padding=1, bias=False)))
localLayers.append(nn.LeakyReLU(0.2, inplace=True))
curr_dim = min(curr_dim * 2, maxFilt)
k_size_local = int(bbox_size/ np.power(2, repeat_num_local))
k_size_global = int(image_size/ np.power(2, repeat_num_global))
self.mainGlobal = nn.Sequential(*globalLayers)
self.mainLocal = nn.Sequential(*localLayers)
# FC 1 for doing real/fake
self.fc1 = SpectralNorm(nn.Linear(curr_dim*(k_size_local**2+k_size_global**2), 1, bias=False))
# FC 2 for doing classification only on local patch
self.fc2 = SpectralNorm(nn.Linear(curr_dim*(k_size_local**2), c_dim, bias=False))
def forward(self, x, boxImg, classify=False):
bsz = x.size(0)
h_global = self.mainGlobal(x)
h_local = self.mainLocal(boxImg)
h_append = torch.cat([h_global.view(bsz,-1), h_local.view(bsz,-1)], dim=-1)
out_rf = self.fc1(h_append)
out_cls = self.fc2(h_local.view(bsz,-1)) if classify else None
return out_rf.squeeze(), out_cls, h_append
class BoxFeatEncoder(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size = 64, k = 4, conv_dim=64, feat_dim=512, repeat_num=5, c_dim=0, norm_type='drop', nc=3):
super(BoxFeatEncoder, self).__init__()
maxFilt = 512 if image_size==64 else 128
layers = []
layers.append(nn.Conv2d(nc+c_dim, conv_dim, kernel_size=k, stride=2, padding=1))
if norm_type == 'instance':
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.LeakyReLU(0.01, inplace=True))
if norm_type == 'drop':
layers.append(nn.Dropout(p=0.25))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, min(curr_dim*2,maxFilt), kernel_size=k, stride=2, padding=1))
if norm_type == 'instance':
layers.append(nn.InstanceNorm2d(min(curr_dim*2,maxFilt), affine=True))
layers.append(nn.LeakyReLU(0.01, inplace=True))
if norm_type == 'drop':
layers.append(nn.Dropout(p=0.25))
curr_dim = min(curr_dim * 2, maxFilt)
k_size = int(image_size/ np.power(2, repeat_num))
#layers.append(nn.Dropout(p=0.25))
self.main= nn.Sequential(*layers)
# FC 1 for doing real/fake
self.fc1 = nn.Linear(curr_dim*(k_size**2), feat_dim, bias=False)
def forward(self, x, c=None):
bsz = x.size(0)
if c is not None:
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
xcat = torch.cat([x, c], dim=1)
else:
xcat = x
h= self.main(xcat)
out_feat = self.fc1(h.view(bsz,-1))
return out_feat
class BoxFeatGenerator(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size = 128, k = 4, conv_dim=64, feat_dim=512, repeat_num=6, c_dim=0, use_residual=0, nc=3):
super(BoxFeatGenerator, self).__init__()
maxFilt = 512 if image_size==128 else 128
layers = []
fclayers = []
layers.extend(get_conv_inorm_relu_block(nc+1+c_dim, conv_dim, 7, 1, 3, slope=0.01, padtype='zero'))
curr_dim = conv_dim
if use_residual:
layers.extend(get_conv_inorm_relu_block(conv_dim, conv_dim*2, 3, 1, 1, slope=0.01, padtype='zero'))
layers.append(nn.MaxPool2d(2))
# Down-Sampling
curr_dim = conv_dim*2
dilation=1
for i in range(use_residual, repeat_num):
if use_residual:
layers.append(ResidualBlock(dim_in=curr_dim, dilation=dilation, padtype='zero'))
layers.append(nn.MaxPool2d(2))
else:
layers.extend(get_conv_inorm_relu_block(curr_dim, min(curr_dim*2,maxFilt), k, 2, 1, slope=0.01, padtype='zero'))
curr_dim = min(curr_dim * 2, maxFilt)
if i > 2:
dilation = dilation*2
k_size = int(image_size/ np.power(2, repeat_num))
#layers.append(nn.Dropout(p=0.25))
self.main= nn.Sequential(*layers)
fclayers.append(nn.Linear(curr_dim*(k_size**2), feat_dim*2, bias=False))
fclayers.append(nn.LeakyReLU(0.01,inplace=True))
# FC 1 for doing real/fake
fclayers.append(nn.Linear(feat_dim*2, feat_dim, bias=False))
self.fc1 = nn.Sequential(*fclayers)
def forward(self, x, c=None):
bsz = x.size(0)
if c is not None:
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
xcat = torch.cat([x, c], dim=1)
else:
xcat = x
h= self.main(xcat)
out_feat = self.fc1(h.view(bsz,-1))
return out_feat
##-------------------------------------------------------
## Implementing perceptual loss using VGG19
##-------------------------------------------------------
class NetLinLayer(nn.Module):
''' A single linear layer which does a 1x1 conv '''
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(),] if(use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]
self.model = nn.Sequential(*layers)
def normalize_tensor(in_feat,eps=1e-10):
# norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1)).view(in_feat.size()[0],1,in_feat.size()[2],in_feat.size()[3]).repeat(1,in_feat.size()[1],1,1)
norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1)).view(in_feat.size()[0],1,in_feat.size()[2],in_feat.size()[3])
return in_feat/(norm_factor.expand_as(in_feat)+eps)
class squeezenet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(squeezenet, self).__init__()
pretrained_features = models.squeezenet1_1(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.slice6 = torch.nn.Sequential()
self.slice7 = torch.nn.Sequential()
self.N_slices = 7
for x in range(2):
self.slice1.add_module(str(x), pretrained_features[x])
for x in range(2,5):
self.slice2.add_module(str(x), pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), pretrained_features[x])
for x in range(10, 11):
self.slice5.add_module(str(x), pretrained_features[x])
for x in range(11, 12):
self.slice6.add_module(str(x), pretrained_features[x])
for x in range(12, 13):
self.slice7.add_module(str(x), pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
h = self.slice6(h)
h_relu6 = h
h = self.slice7(h)
h_relu7 = h
out = [h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7]
return out
class VGGLoss(nn.Module):
def __init__(self, network = 'vgg', use_perceptual=True, imagenet_norm = False, use_style_loss=0):
super(VGGLoss, self).__init__()
self.criterion = nn.L1Loss()
self.use_style_loss = use_style_loss
if network == 'vgg':
self.chns = [64,128,256,512,512]
else:
self.chns = [64,128,256,384,384,512,512]
if use_perceptual:
self.use_perceptual = True
self.lin0 = NetLinLayer(self.chns[0],use_dropout=False)
self.lin1 = NetLinLayer(self.chns[1],use_dropout=False)
self.lin2 = NetLinLayer(self.chns[2],use_dropout=False)
self.lin3 = NetLinLayer(self.chns[3],use_dropout=False)
self.lin4 = NetLinLayer(self.chns[4],use_dropout=False)
self.lin0.cuda()
self.lin1.cuda()
self.lin2.cuda()
self.lin3.cuda()
self.lin4.cuda()
# Do this since the tensors have already been normalized to have mean and variance [0.5,0.5,0.5]
self.imagenet_norm = imagenet_norm
if not self.imagenet_norm:
self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1)).cuda()
self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1)).cuda()
self.net_type = network
if network == 'vgg':
self.pnet = Vgg19().cuda()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
else:
self.pnet = squeezenet().cuda()
self.weights = [1.0]*7
if use_perceptual:
self.lin5 = NetLinLayer(self.chns[5],use_dropout=False)
self.lin6 = NetLinLayer(self.chns[6],use_dropout=False)
self.lin5.cuda()
self.lin6.cuda()
if self.use_perceptual:
self.load_state_dict(torch.load('/BS/rshetty-wrk/work/code/controlled-generation/trained_models/perceptualSim/'+network+'.pth'), strict=False)
for param in self.parameters():
param.requires_grad = False
def gram(self, x):
a, b, c, d = x.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = x.view(a * b, c * d) # resise F_XL into \hat F_XL
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
def forward(self, x, y):
x, y = x.expand(x.size(0), 3, x.size(2), x.size(3)), y.expand(y.size(0), 3, y.size(2), y.size(3))
if not self.imagenet_norm:
x = (x - self.shift.expand_as(x))/self.scale.expand_as(x)
y = (y - self.shift.expand_as(y))/self.scale.expand_as(y)
x_vgg, y_vgg = self.pnet(x), self.pnet(y)
loss = 0
if self.use_perceptual:
normed_x = [normalize_tensor(x_vgg[kk]) for (kk, out0) in enumerate(x_vgg)]
normed_y = [normalize_tensor(y_vgg[kk]) for (kk, out0) in enumerate(y_vgg)]
diffs = [(normed_x[kk]-normed_y[kk].detach())**2 for (kk,out0) in enumerate(x_vgg)]
loss = self.lin0.model(diffs[0]).mean()
loss = loss + self.lin1.model(diffs[1]).mean()
loss = loss + self.lin2.model(diffs[2]).mean()
loss = loss + self.lin3.model(diffs[3]).mean()
loss = loss + self.lin4.model(diffs[4]).mean()
if(self.net_type=='squeeze'):
loss = loss + self.lin5.model(diffs[5]).mean()
loss = loss + self.lin6.model(diffs[6]).mean()
if self.use_style_loss:
style_loss = 0.
for kk in xrange(3, len(x_vgg)):
style_loss += self.criterion(self.gram(x_vgg[kk]), self.gram(y_vgg[kk]))
loss += self.use_style_loss * style_loss
else:
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
| [
"torch.nn.Dropout",
"torchvision.models.vgg19",
"torch.nn.AdaptiveMaxPool2d",
"torch.cat",
"torch.nn.InstanceNorm2d",
"torch.nn.functional.tanh",
"torch.nn.functional.pad",
"torch.__version__.split",
"torch.nn.ReflectionPad2d",
"numpy.power",
"torch.load",
"torch.nn.functional.avg_pool2d",
"... | [((544, 565), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (563, 565), False, 'import torch\n'), ((588, 609), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (607, 609), False, 'import torch\n'), ((632, 653), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (651, 653), False, 'import torch\n'), ((676, 697), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (695, 697), False, 'import torch\n'), ((720, 741), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (739, 741), False, 'import torch\n'), ((5265, 5287), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (5278, 5287), True, 'import torch.nn as nn\n'), ((6446, 6468), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (6459, 6468), True, 'import torch.nn as nn\n'), ((7531, 7553), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (7544, 7553), True, 'import torch.nn as nn\n'), ((8988, 9010), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (9001, 9010), True, 'import torch.nn as nn\n'), ((9223, 9247), 'torch.cat', 'torch.cat', (['[x, c]'], {'dim': '(1)'}), '([x, c], dim=1)\n', (9232, 9247), False, 'import torch\n'), ((10718, 10752), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(-1)', 'max_val': '(1)'}), '(min_val=-1, max_val=1)\n', (10729, 10752), True, 'import torch.nn as nn\n'), ((10773, 10795), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (10786, 10795), True, 'import torch.nn as nn\n'), ((11029, 11053), 'torch.cat', 'torch.cat', (['[x, c]'], {'dim': '(1)'}), '([x, c], dim=1)\n', (11038, 11053), False, 'import torch\n'), ((12189, 12204), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (12202, 12204), True, 'import torch.nn as nn\n'), ((12237, 12252), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (12250, 12252), True, 'import torch.nn as nn\n'), ((12284, 12299), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (12297, 12299), True, 'import torch.nn as nn\n'), ((12684, 12758), 'torch.nn.Conv2d', 'nn.Conv2d', (['(curr_dim + 3)', '(3)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(curr_dim + 3, 3, kernel_size=7, stride=1, padding=3, bias=False)\n', (12693, 12758), True, 'import torch.nn as nn\n'), ((12933, 12942), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (12940, 12942), True, 'import torch.nn as nn\n'), ((12968, 13002), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(-1)', 'max_val': '(1)'}), '(min_val=-1, max_val=1)\n', (12979, 13002), True, 'import torch.nn as nn\n'), ((13023, 13045), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (13036, 13045), True, 'import torch.nn as nn\n'), ((13279, 13303), 'torch.cat', 'torch.cat', (['[x, c]'], {'dim': '(1)'}), '([x, c], dim=1)\n', (13288, 13303), False, 'import torch\n'), ((15082, 15097), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15095, 15097), True, 'import torch.nn as nn\n'), ((15130, 15145), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15143, 15145), True, 'import torch.nn as nn\n'), ((15177, 15192), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15190, 15192), True, 'import torch.nn as nn\n'), ((15271, 15286), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15284, 15286), True, 'import torch.nn as nn\n'), ((15324, 15339), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15337, 15339), True, 'import torch.nn as nn\n'), ((15377, 15392), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15390, 15392), True, 'import torch.nn as nn\n'), ((16118, 16192), 'torch.nn.Conv2d', 'nn.Conv2d', (['(curr_dim + 3)', '(3)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(curr_dim + 3, 3, kernel_size=7, stride=1, padding=3, bias=False)\n', (16127, 16192), True, 'import torch.nn as nn\n'), ((16367, 16376), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (16374, 16376), True, 'import torch.nn as nn\n'), ((16410, 16484), 'torch.nn.Conv2d', 'nn.Conv2d', (['(curr_dim + 3)', '(1)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(curr_dim + 3, 1, kernel_size=7, stride=1, padding=3, bias=False)\n', (16419, 16484), True, 'import torch.nn as nn\n'), ((16515, 16527), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (16525, 16527), True, 'import torch.nn as nn\n'), ((16554, 16588), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(-1)', 'max_val': '(1)'}), '(min_val=-1, max_val=1)\n', (16565, 16588), True, 'import torch.nn as nn\n'), ((16609, 16631), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (16622, 16631), True, 'import torch.nn as nn\n'), ((16865, 16889), 'torch.cat', 'torch.cat', (['[x, c]'], {'dim': '(1)'}), '([x, c], dim=1)\n', (16874, 16889), False, 'import torch\n'), ((19163, 19178), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (19176, 19178), True, 'import torch.nn as nn\n'), ((19211, 19226), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (19224, 19226), True, 'import torch.nn as nn\n'), ((19258, 19273), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (19271, 19273), True, 'import torch.nn as nn\n'), ((19660, 19734), 'torch.nn.Conv2d', 'nn.Conv2d', (['(curr_dim + 3)', '(3)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(curr_dim + 3, 3, kernel_size=7, stride=1, padding=3, bias=False)\n', (19669, 19734), True, 'import torch.nn as nn\n'), ((19909, 19918), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (19916, 19918), True, 'import torch.nn as nn\n'), ((19952, 20026), 'torch.nn.Conv2d', 'nn.Conv2d', (['(curr_dim + 3)', '(1)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(curr_dim + 3, 1, kernel_size=7, stride=1, padding=3, bias=False)\n', (19961, 20026), True, 'import torch.nn as nn\n'), ((20057, 20069), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (20067, 20069), True, 'import torch.nn as nn\n'), ((20463, 20497), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(-1)', 'max_val': '(1)'}), '(min_val=-1, max_val=1)\n', (20474, 20497), True, 'import torch.nn as nn\n'), ((20557, 20579), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (20570, 20579), True, 'import torch.nn as nn\n'), ((20813, 20837), 'torch.cat', 'torch.cat', (['[x, c]'], {'dim': '(1)'}), '([x, c], dim=1)\n', (20822, 20837), False, 'import torch\n'), ((22291, 22378), 'torch.nn.Conv2d', 'nn.Conv2d', (['i', 'o'], {'kernel_size': 'k', 'stride': 's', 'padding': 'p', 'dilation': 'dilation', 'bias': '(False)'}), '(i, o, kernel_size=k, stride=s, padding=p, dilation=dilation, bias\n =False)\n', (22300, 22378), True, 'import torch.nn as nn\n'), ((22393, 22426), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['o'], {'affine': '(True)'}), '(o, affine=True)\n', (22410, 22426), True, 'import torch.nn as nn\n'), ((22446, 22479), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['slope'], {'inplace': '(True)'}), '(slope, inplace=True)\n', (22458, 22479), True, 'import torch.nn as nn\n'), ((23407, 23422), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (23420, 23422), True, 'import torch.nn as nn\n'), ((23455, 23470), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (23468, 23470), True, 'import torch.nn as nn\n'), ((23502, 23517), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (23515, 23517), True, 'import torch.nn as nn\n'), ((23909, 23983), 'torch.nn.Conv2d', 'nn.Conv2d', (['(curr_dim + 3)', '(1)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(curr_dim + 3, 1, kernel_size=7, stride=1, padding=3, bias=False)\n', (23918, 23983), True, 'import torch.nn as nn\n'), ((24014, 24026), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (24024, 24026), True, 'import torch.nn as nn\n'), ((24455, 24477), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (24468, 24477), True, 'import torch.nn as nn\n'), ((24711, 24735), 'torch.cat', 'torch.cat', (['[x, c]'], {'dim': '(1)'}), '([x, c], dim=1)\n', (24720, 24735), False, 'import torch\n'), ((27667, 27682), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (27680, 27682), True, 'import torch.nn as nn\n'), ((29951, 30104), 'torch.nn.Conv2d', 'nn.Conv2d', (['(curr_dim + extra_dim)', '(c_dim + 1 if per_classMask else 1)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(True if mask_normalize else use_bias)'}), '(curr_dim + extra_dim, c_dim + 1 if per_classMask else 1,\n kernel_size=7, stride=1, padding=3, bias=True if mask_normalize else\n use_bias)\n', (29960, 30104), True, 'import torch.nn as nn\n'), ((30125, 30137), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (30135, 30137), True, 'import torch.nn as nn\n'), ((30566, 30588), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (30579, 30588), True, 'import torch.nn as nn\n'), ((34580, 34595), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (34593, 34595), True, 'import torch.nn as nn\n'), ((36363, 36507), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512 + extra_dim)', '(c_dim + 1 if per_classMask else 1)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(True if mask_normalize else use_bias)'}), '(512 + extra_dim, c_dim + 1 if per_classMask else 1, kernel_size=7,\n stride=1, padding=3, bias=True if mask_normalize else use_bias)\n', (36372, 36507), True, 'import torch.nn as nn\n'), ((36532, 36544), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (36542, 36544), True, 'import torch.nn as nn\n'), ((40646, 40661), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (40659, 40661), True, 'import torch.nn as nn\n'), ((43224, 43378), 'torch.nn.Conv2d', 'nn.Conv2d', (['(start_dim + extra_dim)', '(c_dim + 1 if per_classMask else 1)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(True if mask_normalize else use_bias)'}), '(start_dim + extra_dim, c_dim + 1 if per_classMask else 1,\n kernel_size=7, stride=1, padding=3, bias=True if mask_normalize else\n use_bias)\n', (43233, 43378), True, 'import torch.nn as nn\n'), ((43399, 43411), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (43409, 43411), True, 'import torch.nn as nn\n'), ((49401, 49435), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(-1)', 'max_val': '(1)'}), '(min_val=-1, max_val=1)\n', (49412, 49435), True, 'import torch.nn as nn\n'), ((49462, 49493), 'torch.nn.Sequential', 'nn.Sequential', (['*downsamp_layers'], {}), '(*downsamp_layers)\n', (49475, 49493), True, 'import torch.nn as nn\n'), ((49566, 49587), 'torch.nn.ModuleList', 'nn.ModuleList', (['layers'], {}), '(layers)\n', (49579, 49587), True, 'import torch.nn as nn\n'), ((49806, 49874), 'torch.nn.functional.pad', 'F.pad', (['x', '(0, (8 - h % 8) % 8, 0, (8 - w % 8) % 8)'], {'mode': '"""replicate"""'}), "(x, (0, (8 - h % 8) % 8, 0, (8 - w % 8) % 8), mode='replicate')\n", (49811, 49874), True, 'import torch.nn.functional as F\n'), ((53223, 53245), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (53236, 53245), True, 'import torch.nn as nn\n'), ((53267, 53337), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)\n', (53276, 53337), True, 'import torch.nn as nn\n'), ((55340, 55362), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (55353, 55362), True, 'import torch.nn as nn\n'), ((56431, 56453), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (56444, 56453), True, 'import torch.nn as nn\n'), ((56475, 56545), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)\n', (56484, 56545), True, 'import torch.nn as nn\n'), ((56567, 56625), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', 'c_dim'], {'kernel_size': 'k_size', 'bias': '(False)'}), '(curr_dim, c_dim, kernel_size=k_size, bias=False)\n', (56576, 56625), True, 'import torch.nn as nn\n'), ((58199, 58221), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (58212, 58221), True, 'import torch.nn as nn\n'), ((58248, 58271), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (58268, 58271), True, 'import torch.nn as nn\n'), ((58298, 58336), 'torch.nn.Linear', 'nn.Linear', (['curr_dim', 'c_dim'], {'bias': '(False)'}), '(curr_dim, c_dim, bias=False)\n', (58307, 58336), True, 'import torch.nn as nn\n'), ((59665, 59687), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (59678, 59687), True, 'import torch.nn as nn\n'), ((59818, 59856), 'torch.nn.Linear', 'nn.Linear', (['nFilt', 'c_dim'], {'bias': 'use_bias'}), '(nFilt, c_dim, bias=use_bias)\n', (59827, 59856), True, 'import torch.nn as nn\n'), ((61509, 61531), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (61522, 61531), True, 'import torch.nn as nn\n'), ((61646, 61709), 'torch.nn.Conv2d', 'nn.Conv2d', (['nFilt', 'c_dim'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': 'use_bias'}), '(nFilt, c_dim, kernel_size=1, stride=1, bias=use_bias)\n', (61655, 61709), True, 'import torch.nn as nn\n'), ((63629, 63651), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (63642, 63651), True, 'import torch.nn as nn\n'), ((63673, 63731), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', 'c_dim'], {'kernel_size': 'k_size', 'bias': '(False)'}), '(curr_dim, c_dim, kernel_size=k_size, bias=False)\n', (63682, 63731), True, 'import torch.nn as nn\n'), ((65335, 65363), 'torch.nn.Sequential', 'nn.Sequential', (['*globalLayers'], {}), '(*globalLayers)\n', (65348, 65363), True, 'import torch.nn as nn\n'), ((65389, 65416), 'torch.nn.Sequential', 'nn.Sequential', (['*localLayers'], {}), '(*localLayers)\n', (65402, 65416), True, 'import torch.nn as nn\n'), ((65472, 65549), 'torch.nn.Linear', 'nn.Linear', (['(curr_dim * (k_size_local ** 2 + k_size_global ** 2))', '(1)'], {'bias': '(False)'}), '(curr_dim * (k_size_local ** 2 + k_size_global ** 2), 1, bias=False)\n', (65481, 65549), True, 'import torch.nn as nn\n'), ((67718, 67746), 'torch.nn.Sequential', 'nn.Sequential', (['*globalLayers'], {}), '(*globalLayers)\n', (67731, 67746), True, 'import torch.nn as nn\n'), ((67772, 67799), 'torch.nn.Sequential', 'nn.Sequential', (['*localLayers'], {}), '(*localLayers)\n', (67785, 67799), True, 'import torch.nn as nn\n'), ((69743, 69765), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (69756, 69765), True, 'import torch.nn as nn\n'), ((69821, 69876), 'torch.nn.Linear', 'nn.Linear', (['(curr_dim * k_size ** 2)', 'feat_dim'], {'bias': '(False)'}), '(curr_dim * k_size ** 2, feat_dim, bias=False)\n', (69830, 69876), True, 'import torch.nn as nn\n'), ((71588, 71610), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (71601, 71610), True, 'import torch.nn as nn\n'), ((71874, 71898), 'torch.nn.Sequential', 'nn.Sequential', (['*fclayers'], {}), '(*fclayers)\n', (71887, 71898), True, 'import torch.nn as nn\n'), ((72791, 72813), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (72804, 72813), True, 'import torch.nn as nn\n'), ((73432, 73453), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (73451, 73453), False, 'import torch\n'), ((73476, 73497), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (73495, 73497), False, 'import torch\n'), ((73520, 73541), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (73539, 73541), False, 'import torch\n'), ((73564, 73585), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (73583, 73585), False, 'import torch\n'), ((73608, 73629), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (73627, 73629), False, 'import torch\n'), ((73652, 73673), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (73671, 73673), False, 'import torch\n'), ((73696, 73717), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (73715, 73717), False, 'import torch\n'), ((75190, 75201), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (75199, 75201), True, 'import torch.nn as nn\n'), ((483, 512), 'torchvision.models.vgg19', 'models.vgg19', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (495, 512), False, 'from torchvision import models\n'), ((2436, 2484), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'scale', 'mode': '"""bilinear"""'}), "(scale_factor=scale, mode='bilinear')\n", (2447, 2484), True, 'import torch.nn as nn\n'), ((2530, 2599), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': 'scale', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(scale_factor=scale, mode='bilinear', align_corners=False)\n", (2541, 2599), True, 'import torch.nn as nn\n'), ((2842, 2916), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_out'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False)\n', (2851, 2916), True, 'import torch.nn as nn\n'), ((3008, 3093), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', '(2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'dilation': '(1)', 'bias': '(False)'}), '(dim_in, 2, kernel_size=3, stride=1, padding=1, dilation=1, bias=False\n )\n', (3017, 3093), True, 'import torch.nn as nn\n'), ((7871, 7949), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3 + c_dim)', 'conv_dim'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(3 + c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)\n', (7880, 7949), True, 'import torch.nn as nn\n'), ((7971, 8011), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['conv_dim'], {'affine': '(True)'}), '(conv_dim, affine=True)\n', (7988, 8011), True, 'import torch.nn as nn\n'), ((8035, 8056), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8042, 8056), True, 'import torch.nn as nn\n'), ((8863, 8933), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(3)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False)\n', (8872, 8933), True, 'import torch.nn as nn\n'), ((8957, 8966), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (8964, 8966), True, 'import torch.nn as nn\n'), ((9543, 9621), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3 + c_dim)', 'conv_dim'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(3 + c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)\n', (9552, 9621), True, 'import torch.nn as nn\n'), ((9643, 9683), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['conv_dim'], {'affine': '(True)'}), '(conv_dim, affine=True)\n', (9660, 9683), True, 'import torch.nn as nn\n'), ((9707, 9728), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9714, 9728), True, 'import torch.nn as nn\n'), ((10535, 10605), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(3)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False)\n', (10544, 10605), True, 'import torch.nn as nn\n'), ((10683, 10692), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (10690, 10692), True, 'import torch.nn as nn\n'), ((11483, 11561), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3 + c_dim)', 'conv_dim'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(3 + c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)\n', (11492, 11561), True, 'import torch.nn as nn\n'), ((11583, 11623), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['conv_dim'], {'affine': '(True)'}), '(conv_dim, affine=True)\n', (11600, 11623), True, 'import torch.nn as nn\n'), ((11647, 11668), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (11654, 11668), True, 'import torch.nn as nn\n'), ((14356, 14434), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3 + c_dim)', 'conv_dim'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(3 + c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)\n', (14365, 14434), True, 'import torch.nn as nn\n'), ((14456, 14496), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['conv_dim'], {'affine': '(True)'}), '(conv_dim, affine=True)\n', (14473, 14496), True, 'import torch.nn as nn\n'), ((14520, 14541), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (14527, 14541), True, 'import torch.nn as nn\n'), ((18437, 18515), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3 + c_dim)', 'conv_dim'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(3)', 'bias': '(False)'}), '(3 + c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)\n', (18446, 18515), True, 'import torch.nn as nn\n'), ((18537, 18577), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['conv_dim'], {'affine': '(True)'}), '(conv_dim, affine=True)\n', (18554, 18577), True, 'import torch.nn as nn\n'), ((18601, 18622), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (18608, 18622), True, 'import torch.nn as nn\n'), ((20408, 20437), 'torch.nn.Sequential', 'nn.Sequential', (['*smooth_layers'], {}), '(*smooth_layers)\n', (20421, 20437), True, 'import torch.nn as nn\n'), ((22159, 22180), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['p'], {}), '(p)\n', (22177, 22180), True, 'import torch.nn as nn\n'), ((24365, 24394), 'torch.nn.Sequential', 'nn.Sequential', (['*smooth_layers'], {}), '(*smooth_layers)\n', (24378, 24394), True, 'import torch.nn as nn\n'), ((27753, 27768), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (27766, 27768), True, 'import torch.nn as nn\n'), ((27804, 27819), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (27817, 27819), True, 'import torch.nn as nn\n'), ((28377, 28406), 'torch.nn.Sequential', 'nn.Sequential', (['*featGenLayers'], {}), '(*featGenLayers)\n', (28390, 28406), True, 'import torch.nn as nn\n'), ((28437, 28470), 'torch.nn.Linear', 'nn.Linear', (['curr_dim', 'out_feat_dim'], {}), '(curr_dim, out_feat_dim)\n', (28446, 28470), True, 'import torch.nn as nn\n'), ((30476, 30505), 'torch.nn.Sequential', 'nn.Sequential', (['*smooth_layers'], {}), '(*smooth_layers)\n', (30489, 30505), True, 'import torch.nn as nn\n'), ((31481, 31505), 'torch.cat', 'torch.cat', (['[x, c]'], {'dim': '(1)'}), '([x, c], dim=1)\n', (31490, 31505), False, 'import torch\n'), ((35207, 35274), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512 + extra_dim)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(512 + extra_dim, 512, kernel_size=3, stride=1, padding=1)\n', (35216, 35274), True, 'import torch.nn as nn\n'), ((35296, 35313), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (35308, 35313), True, 'import torch.nn as nn\n'), ((35408, 35451), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (35419, 35451), True, 'import torch.nn as nn\n'), ((35672, 35740), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024 + extra_dim)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(1024 + extra_dim, 512, kernel_size=3, stride=1, padding=1)\n', (35681, 35740), True, 'import torch.nn as nn\n'), ((35762, 35779), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (35774, 35779), True, 'import torch.nn as nn\n'), ((35874, 35917), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (35885, 35917), True, 'import torch.nn as nn\n'), ((36117, 36190), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512 + 256 + extra_dim)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(512 + 256 + extra_dim, 512, kernel_size=3, stride=1, padding=1)\n', (36126, 36190), True, 'import torch.nn as nn\n'), ((36210, 36227), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (36222, 36227), True, 'import torch.nn as nn\n'), ((41899, 41935), 'torch.nn.Sequential', 'nn.Sequential', (['*cond_parallel_layers'], {}), '(*cond_parallel_layers)\n', (41912, 41935), True, 'import torch.nn as nn\n'), ((41959, 42050), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512 + extra_dim + gt_cond_dim)', 'start_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(512 + extra_dim + gt_cond_dim, start_dim, kernel_size=3, stride=1,\n padding=1)\n', (41968, 42050), True, 'import torch.nn as nn\n'), ((42068, 42099), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (42080, 42099), True, 'import torch.nn as nn\n'), ((42465, 42553), 'torch.nn.Conv2d', 'nn.Conv2d', (['(start_dim + extra_dim)', '(start_dim // 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(start_dim + extra_dim, start_dim // 2, kernel_size=3, stride=1,\n padding=1)\n', (42474, 42553), True, 'import torch.nn as nn\n'), ((42604, 42635), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (42616, 42635), True, 'import torch.nn as nn\n'), ((42912, 43000), 'torch.nn.Conv2d', 'nn.Conv2d', (['(start_dim + extra_dim)', '(start_dim // 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(start_dim + extra_dim, start_dim // 2, kernel_size=3, stride=1,\n padding=1)\n', (42921, 43000), True, 'import torch.nn as nn\n'), ((43051, 43082), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (43063, 43082), True, 'import torch.nn as nn\n'), ((49215, 49288), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', 'nc'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': 'pad', 'bias': '(False)'}), '(curr_dim, nc, kernel_size=7, stride=1, padding=pad, bias=False)\n', (49224, 49288), True, 'import torch.nn as nn\n'), ((49366, 49375), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (49373, 49375), True, 'import torch.nn as nn\n'), ((50522, 50560), 'torch.cat', 'torch.cat', (['[downsamp_out, feat]'], {'dim': '(1)'}), '([downsamp_out, feat], dim=1)\n', (50531, 50560), False, 'import torch\n'), ((51683, 51708), 'torch.zeros', 'torch.zeros', (['(1, 3, 3, 3)'], {}), '((1, 3, 3, 3))\n', (51694, 51708), False, 'import torch\n'), ((52523, 52554), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (52535, 52554), True, 'import torch.nn as nn\n'), ((52643, 52676), 'numpy.log2', 'np.log2', (['(image_size // patch_size)'], {}), '(image_size // patch_size)\n', (52650, 52676), True, 'import numpy as np\n'), ((53441, 53499), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', 'c_dim'], {'kernel_size': 'k_size', 'bias': '(False)'}), '(curr_dim, c_dim, kernel_size=k_size, bias=False)\n', (53450, 53499), True, 'import torch.nn as nn\n'), ((53918, 53946), 'torch.cat', 'torch.cat', (['[x, tvImg]'], {'dim': '(1)'}), '([x, tvImg], dim=1)\n', (53927, 53946), False, 'import torch\n'), ((54912, 54944), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.01)'], {'inplace': '(True)'}), '(0.01, inplace=True)\n', (54924, 54944), True, 'import torch.nn as nn\n'), ((55397, 55467), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)\n', (55406, 55467), True, 'import torch.nn as nn\n'), ((55503, 55561), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', 'c_dim'], {'kernel_size': 'k_size', 'bias': '(False)'}), '(curr_dim, c_dim, kernel_size=k_size, bias=False)\n', (55512, 55561), True, 'import torch.nn as nn\n'), ((55976, 56034), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'conv_dim'], {'kernel_size': '(4)', 'stride': '(1)', 'padding': '(1)'}), '(3, conv_dim, kernel_size=4, stride=1, padding=1)\n', (55985, 56034), True, 'import torch.nn as nn\n'), ((56058, 56090), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.01)'], {'inplace': '(True)'}), '(0.01, inplace=True)\n', (56070, 56090), True, 'import torch.nn as nn\n'), ((57138, 57197), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'conv_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nc, conv_dim, kernel_size=3, stride=1, padding=1)\n', (57147, 57197), True, 'import torch.nn as nn\n'), ((57221, 57245), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['conv_dim'], {}), '(conv_dim)\n', (57235, 57245), True, 'import torch.nn as nn\n'), ((57269, 57300), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (57281, 57300), True, 'import torch.nn as nn\n'), ((57324, 57389), 'torch.nn.Conv2d', 'nn.Conv2d', (['conv_dim', 'conv_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(conv_dim, conv_dim, kernel_size=3, stride=1, padding=1)\n', (57333, 57389), True, 'import torch.nn as nn\n'), ((57413, 57428), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (57425, 57428), True, 'import torch.nn as nn\n'), ((57526, 57557), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (57538, 57557), True, 'import torch.nn as nn\n'), ((59246, 59277), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (59258, 59277), True, 'import torch.nn as nn\n'), ((59301, 59358), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'nFilt'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(512, nFilt, kernel_size=3, stride=1, padding=1)\n', (59310, 59358), True, 'import torch.nn as nn\n'), ((59382, 59403), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nFilt'], {}), '(nFilt)\n', (59396, 59403), True, 'import torch.nn as nn\n'), ((59427, 59458), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (59439, 59458), True, 'import torch.nn as nn\n'), ((59482, 59541), 'torch.nn.Conv2d', 'nn.Conv2d', (['nFilt', 'nFilt'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nFilt, nFilt, kernel_size=3, stride=1, padding=1)\n', (59491, 59541), True, 'import torch.nn as nn\n'), ((59565, 59586), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nFilt'], {}), '(nFilt)\n', (59579, 59586), True, 'import torch.nn as nn\n'), ((59610, 59641), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (59622, 59641), True, 'import torch.nn as nn\n'), ((59714, 59737), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (59734, 59737), True, 'import torch.nn as nn\n'), ((59768, 59791), 'torch.nn.AdaptiveMaxPool2d', 'nn.AdaptiveMaxPool2d', (['(1)'], {}), '(1)\n', (59788, 59791), True, 'import torch.nn as nn\n'), ((61090, 61121), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (61102, 61121), True, 'import torch.nn as nn\n'), ((61145, 61202), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'nFilt'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(512, nFilt, kernel_size=3, stride=1, padding=1)\n', (61154, 61202), True, 'import torch.nn as nn\n'), ((61226, 61247), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nFilt'], {}), '(nFilt)\n', (61240, 61247), True, 'import torch.nn as nn\n'), ((61271, 61302), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (61283, 61302), True, 'import torch.nn as nn\n'), ((61326, 61385), 'torch.nn.Conv2d', 'nn.Conv2d', (['nFilt', 'nFilt'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(nFilt, nFilt, kernel_size=3, stride=1, padding=1)\n', (61335, 61385), True, 'import torch.nn as nn\n'), ((61409, 61430), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nFilt'], {}), '(nFilt)\n', (61423, 61430), True, 'import torch.nn as nn\n'), ((61454, 61485), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (61466, 61485), True, 'import torch.nn as nn\n'), ((61736, 61759), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (61756, 61759), True, 'import torch.nn as nn\n'), ((61790, 61813), 'torch.nn.AdaptiveMaxPool2d', 'nn.AdaptiveMaxPool2d', (['(1)'], {}), '(1)\n', (61810, 61813), True, 'import torch.nn as nn\n'), ((63174, 63232), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'conv_dim'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(3, conv_dim, kernel_size=4, stride=2, padding=1)\n', (63183, 63232), True, 'import torch.nn as nn\n'), ((63256, 63288), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.01)'], {'inplace': '(True)'}), '(0.01, inplace=True)\n', (63268, 63288), True, 'import torch.nn as nn\n'), ((64215, 64286), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'conv_dim'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(nc, conv_dim, kernel_size=4, stride=2, padding=1, bias=False)\n', (64224, 64286), True, 'import torch.nn as nn\n'), ((64315, 64346), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (64327, 64346), True, 'import torch.nn as nn\n'), ((64401, 64472), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'conv_dim'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(nc, conv_dim, kernel_size=4, stride=2, padding=1, bias=False)\n', (64410, 64472), True, 'import torch.nn as nn\n'), ((64501, 64532), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (64513, 64532), True, 'import torch.nn as nn\n'), ((65648, 65706), 'torch.nn.Linear', 'nn.Linear', (['(curr_dim * k_size_local ** 2)', 'c_dim'], {'bias': '(False)'}), '(curr_dim * k_size_local ** 2, c_dim, bias=False)\n', (65657, 65706), True, 'import torch.nn as nn\n'), ((66656, 66687), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (66668, 66687), True, 'import torch.nn as nn\n'), ((66856, 66887), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (66868, 66887), True, 'import torch.nn as nn\n'), ((67868, 67945), 'torch.nn.Linear', 'nn.Linear', (['(curr_dim * (k_size_local ** 2 + k_size_global ** 2))', '(1)'], {'bias': '(False)'}), '(curr_dim * (k_size_local ** 2 + k_size_global ** 2), 1, bias=False)\n', (67877, 67945), True, 'import torch.nn as nn\n'), ((68032, 68090), 'torch.nn.Linear', 'nn.Linear', (['(curr_dim * k_size_local ** 2)', 'c_dim'], {'bias': '(False)'}), '(curr_dim * k_size_local ** 2, c_dim, bias=False)\n', (68041, 68090), True, 'import torch.nn as nn\n'), ((68817, 68884), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nc + c_dim)', 'conv_dim'], {'kernel_size': 'k', 'stride': '(2)', 'padding': '(1)'}), '(nc + c_dim, conv_dim, kernel_size=k, stride=2, padding=1)\n', (68826, 68884), True, 'import torch.nn as nn\n'), ((69010, 69042), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.01)'], {'inplace': '(True)'}), '(0.01, inplace=True)\n', (69022, 69042), True, 'import torch.nn as nn\n'), ((70092, 70116), 'torch.cat', 'torch.cat', (['[x, c]'], {'dim': '(1)'}), '([x, c], dim=1)\n', (70101, 70116), False, 'import torch\n'), ((71636, 71695), 'torch.nn.Linear', 'nn.Linear', (['(curr_dim * k_size ** 2)', '(feat_dim * 2)'], {'bias': '(False)'}), '(curr_dim * k_size ** 2, feat_dim * 2, bias=False)\n', (71645, 71695), True, 'import torch.nn as nn\n'), ((71717, 71749), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.01)'], {'inplace': '(True)'}), '(0.01, inplace=True)\n', (71729, 71749), True, 'import torch.nn as nn\n'), ((71810, 71855), 'torch.nn.Linear', 'nn.Linear', (['(feat_dim * 2)', 'feat_dim'], {'bias': '(False)'}), '(feat_dim * 2, feat_dim, bias=False)\n', (71819, 71855), True, 'import torch.nn as nn\n'), ((72116, 72140), 'torch.cat', 'torch.cat', (['[x, c]'], {'dim': '(1)'}), '([x, c], dim=1)\n', (72125, 72140), False, 'import torch\n'), ((72705, 72767), 'torch.nn.Conv2d', 'nn.Conv2d', (['chn_in', 'chn_out', '(1)'], {'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(chn_in, chn_out, 1, stride=1, padding=0, bias=False)\n', (72714, 72767), True, 'import torch.nn as nn\n'), ((73357, 73400), 'torchvision.models.squeezenet1_1', 'models.squeezenet1_1', ([], {'pretrained': 'pretrained'}), '(pretrained=pretrained)\n', (73377, 73400), False, 'from torchvision import models\n'), ((3485, 3522), 'torch.cat', 'torch.cat', (['[up_out, extra_inp]'], {'dim': '(1)'}), '([up_out, extra_inp], dim=1)\n', (3494, 3522), False, 'import torch\n'), ((3820, 3839), 'torch.nn.functional.tanh', 'F.tanh', (['cord_offset'], {}), '(cord_offset)\n', (3826, 3839), True, 'import torch.nn.functional as F\n'), ((4442, 4465), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['pad'], {}), '(pad)\n', (4460, 4465), True, 'import torch.nn as nn\n'), ((4595, 4694), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_in'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': 'pad', 'dilation': 'dilation', 'bias': '(False)'}), '(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=\n dilation, bias=False)\n', (4604, 4694), True, 'import torch.nn as nn\n'), ((4703, 4741), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['dim_in'], {'affine': '(True)'}), '(dim_in, affine=True)\n', (4720, 4741), True, 'import torch.nn as nn\n'), ((4755, 4786), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (4767, 4786), True, 'import torch.nn as nn\n'), ((4873, 4896), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['pad'], {}), '(pad)\n', (4891, 4896), True, 'import torch.nn as nn\n'), ((5038, 5137), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_in'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': 'pad', 'dilation': 'dilation', 'bias': '(False)'}), '(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=\n dilation, bias=False)\n', (5047, 5137), True, 'import torch.nn as nn\n'), ((5146, 5184), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['dim_in'], {'affine': '(True)'}), '(dim_in, affine=True)\n', (5163, 5184), True, 'import torch.nn as nn\n'), ((5198, 5229), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (5210, 5229), True, 'import torch.nn as nn\n'), ((5629, 5652), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['pad'], {}), '(pad)\n', (5647, 5652), True, 'import torch.nn as nn\n'), ((5782, 5881), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_in'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': 'pad', 'dilation': 'dilation', 'bias': '(False)'}), '(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=\n dilation, bias=False)\n', (5791, 5881), True, 'import torch.nn as nn\n'), ((5890, 5925), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['dim_in'], {'affine': '(True)'}), '(dim_in, affine=True)\n', (5904, 5925), True, 'import torch.nn as nn\n'), ((5939, 5970), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (5951, 5970), True, 'import torch.nn as nn\n'), ((6057, 6080), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['pad'], {}), '(pad)\n', (6075, 6080), True, 'import torch.nn as nn\n'), ((6222, 6321), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_in'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': 'pad', 'dilation': 'dilation', 'bias': '(False)'}), '(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=\n dilation, bias=False)\n', (6231, 6321), True, 'import torch.nn as nn\n'), ((6330, 6365), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['dim_in'], {'affine': '(True)'}), '(dim_in, affine=True)\n', (6344, 6365), True, 'import torch.nn as nn\n'), ((6379, 6410), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (6391, 6410), True, 'import torch.nn as nn\n'), ((6812, 6835), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['pad'], {}), '(pad)\n', (6830, 6835), True, 'import torch.nn as nn\n'), ((6965, 7064), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_in'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': 'pad', 'dilation': 'dilation', 'bias': '(False)'}), '(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=\n dilation, bias=False)\n', (6974, 7064), True, 'import torch.nn as nn\n'), ((7073, 7104), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7085, 7104), True, 'import torch.nn as nn\n'), ((7191, 7214), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['pad'], {}), '(pad)\n', (7209, 7214), True, 'import torch.nn as nn\n'), ((7356, 7455), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_in'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': 'pad', 'dilation': 'dilation', 'bias': '(False)'}), '(dim_in, dim_in, kernel_size=3, stride=1, padding=pad, dilation=\n dilation, bias=False)\n', (7365, 7455), True, 'import torch.nn as nn\n'), ((7464, 7495), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (7476, 7495), True, 'import torch.nn as nn\n'), ((8164, 8250), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(curr_dim * 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1, bias=\n False)\n', (8173, 8250), True, 'import torch.nn as nn\n'), ((8271, 8315), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim * 2)'], {'affine': '(True)'}), '(curr_dim * 2, affine=True)\n', (8288, 8315), True, 'import torch.nn as nn\n'), ((8341, 8362), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8348, 8362), True, 'import torch.nn as nn\n'), ((8592, 8687), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['curr_dim', '(curr_dim // 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, curr_dim // 2, kernel_size=4, stride=2,\n padding=1, bias=False)\n', (8610, 8687), True, 'import torch.nn as nn\n'), ((8709, 8754), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim // 2)'], {'affine': '(True)'}), '(curr_dim // 2, affine=True)\n', (8726, 8754), True, 'import torch.nn as nn\n'), ((8780, 8801), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8787, 8801), True, 'import torch.nn as nn\n'), ((9836, 9922), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(curr_dim * 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1, bias=\n False)\n', (9845, 9922), True, 'import torch.nn as nn\n'), ((9943, 9987), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim * 2)'], {'affine': '(True)'}), '(curr_dim * 2, affine=True)\n', (9960, 9987), True, 'import torch.nn as nn\n'), ((10013, 10034), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10020, 10034), True, 'import torch.nn as nn\n'), ((10264, 10359), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['curr_dim', '(curr_dim // 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, curr_dim // 2, kernel_size=4, stride=2,\n padding=1, bias=False)\n', (10282, 10359), True, 'import torch.nn as nn\n'), ((10381, 10426), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim // 2)'], {'affine': '(True)'}), '(curr_dim // 2, affine=True)\n', (10398, 10426), True, 'import torch.nn as nn\n'), ((10452, 10473), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (10459, 10473), True, 'import torch.nn as nn\n'), ((11776, 11862), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(curr_dim * 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1, bias=\n False)\n', (11785, 11862), True, 'import torch.nn as nn\n'), ((11883, 11927), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim * 2)'], {'affine': '(True)'}), '(curr_dim * 2, affine=True)\n', (11900, 11927), True, 'import torch.nn as nn\n'), ((11953, 11974), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (11960, 11974), True, 'import torch.nn as nn\n'), ((12374, 12473), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(curr_dim + 3)', '(curr_dim // 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim + 3, curr_dim // 2, kernel_size=4, stride=2,\n padding=1, bias=False)\n', (12392, 12473), True, 'import torch.nn as nn\n'), ((12509, 12554), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim // 2)'], {'affine': '(True)'}), '(curr_dim // 2, affine=True)\n', (12526, 12554), True, 'import torch.nn as nn\n'), ((12595, 12617), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (12602, 12617), True, 'import torch.nn as nn\n'), ((14649, 14735), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(curr_dim * 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1, bias=\n False)\n', (14658, 14735), True, 'import torch.nn as nn\n'), ((14756, 14800), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim * 2)'], {'affine': '(True)'}), '(curr_dim * 2, affine=True)\n', (14773, 14800), True, 'import torch.nn as nn\n'), ((14826, 14847), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (14833, 14847), True, 'import torch.nn as nn\n'), ((15467, 15566), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(curr_dim + 3)', '(curr_dim // 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim + 3, curr_dim // 2, kernel_size=4, stride=2,\n padding=1, bias=False)\n', (15485, 15566), True, 'import torch.nn as nn\n'), ((15602, 15647), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim // 2)'], {'affine': '(True)'}), '(curr_dim // 2, affine=True)\n', (15619, 15647), True, 'import torch.nn as nn\n'), ((15688, 15710), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (15695, 15710), True, 'import torch.nn as nn\n'), ((15798, 15897), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(curr_dim + 3)', '(curr_dim // 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim + 3, curr_dim // 2, kernel_size=4, stride=2,\n padding=1, bias=False)\n', (15816, 15897), True, 'import torch.nn as nn\n'), ((15938, 15983), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim // 2)'], {'affine': '(True)'}), '(curr_dim // 2, affine=True)\n', (15955, 15983), True, 'import torch.nn as nn\n'), ((16029, 16051), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (16036, 16051), True, 'import torch.nn as nn\n'), ((18730, 18816), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(curr_dim * 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1, bias=\n False)\n', (18739, 18816), True, 'import torch.nn as nn\n'), ((18837, 18881), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim * 2)'], {'affine': '(True)'}), '(curr_dim * 2, affine=True)\n', (18854, 18881), True, 'import torch.nn as nn\n'), ((18907, 18928), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (18914, 18928), True, 'import torch.nn as nn\n'), ((19349, 19448), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(curr_dim + 3)', '(curr_dim // 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim + 3, curr_dim // 2, kernel_size=4, stride=2,\n padding=1, bias=False)\n', (19367, 19448), True, 'import torch.nn as nn\n'), ((19484, 19529), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim // 2)'], {'affine': '(True)'}), '(curr_dim // 2, affine=True)\n', (19501, 19529), True, 'import torch.nn as nn\n'), ((19570, 19592), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (19577, 19592), True, 'import torch.nn as nn\n'), ((22244, 22266), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['p'], {}), '(p)\n', (22263, 22266), True, 'import torch.nn as nn\n'), ((23593, 23692), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(curr_dim + 3)', '(curr_dim // 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim + 3, curr_dim // 2, kernel_size=4, stride=2,\n padding=1, bias=False)\n', (23611, 23692), True, 'import torch.nn as nn\n'), ((23728, 23773), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim // 2)'], {'affine': '(True)'}), '(curr_dim // 2, affine=True)\n', (23745, 23773), True, 'import torch.nn as nn\n'), ((23814, 23836), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (23821, 23836), True, 'import torch.nn as nn\n'), ((41542, 41594), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(1, 64, kernel_size=3, stride=1, padding=1)\n', (41551, 41594), True, 'import torch.nn as nn\n'), ((41636, 41667), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (41648, 41667), True, 'import torch.nn as nn\n'), ((41709, 41784), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'self.cond_parallel_track'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(64, self.cond_parallel_track, kernel_size=3, stride=1, padding=1)\n', (41718, 41784), True, 'import torch.nn as nn\n'), ((41826, 41857), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (41838, 41857), True, 'import torch.nn as nn\n'), ((48946, 48991), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim // 2)'], {'affine': '(True)'}), '(curr_dim // 2, affine=True)\n', (48963, 48991), True, 'import torch.nn as nn\n'), ((49017, 49048), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (49029, 49048), True, 'import torch.nn as nn\n'), ((49161, 49184), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['pad'], {}), '(pad)\n', (49179, 49184), True, 'import torch.nn as nn\n'), ((52079, 52143), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.nc', 'conv_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(self.nc, conv_dim, kernel_size=3, stride=1, padding=1)\n', (52088, 52143), True, 'import torch.nn as nn\n'), ((52171, 52202), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (52183, 52202), True, 'import torch.nn as nn\n'), ((52230, 52307), 'torch.nn.Conv2d', 'nn.Conv2d', (['conv_dim', 'conv_dim'], {'kernel_size': 'dkz', 'stride': 'init_stride', 'padding': '(1)'}), '(conv_dim, conv_dim, kernel_size=dkz, stride=init_stride, padding=1)\n', (52239, 52307), True, 'import torch.nn as nn\n'), ((52349, 52425), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.nc', 'conv_dim'], {'kernel_size': 'dkz', 'stride': 'init_stride', 'padding': '(1)'}), '(self.nc, conv_dim, kernel_size=dkz, stride=init_stride, padding=1)\n', (52358, 52425), True, 'import torch.nn as nn\n'), ((52475, 52499), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['conv_dim'], {}), '(conv_dim)\n', (52489, 52499), True, 'import torch.nn as nn\n'), ((52882, 52953), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', 'out_dim'], {'kernel_size': 'dkz', 'stride': 'stride', 'padding': '(1)'}), '(curr_dim, out_dim, kernel_size=dkz, stride=stride, padding=1)\n', (52891, 52953), True, 'import torch.nn as nn\n'), ((53062, 53093), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (53074, 53093), True, 'import torch.nn as nn\n'), ((53705, 53743), 'torch.nn.Linear', 'nn.Linear', (['c_dim', 'curr_dim'], {'bias': '(False)'}), '(c_dim, curr_dim, bias=False)\n', (53714, 53743), True, 'import torch.nn as nn\n'), ((54819, 54887), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'conv_dim'], {'kernel_size': '(4)', 'stride': 'init_stride', 'padding': '(1)'}), '(3, conv_dim, kernel_size=4, stride=init_stride, padding=1)\n', (54828, 54887), True, 'import torch.nn as nn\n'), ((55162, 55194), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.01)'], {'inplace': '(True)'}), '(0.01, inplace=True)\n', (55174, 55194), True, 'import torch.nn as nn\n'), ((56186, 56255), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(curr_dim * 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1)\n', (56195, 56255), True, 'import torch.nn as nn\n'), ((56281, 56313), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.01)'], {'inplace': '(True)'}), '(0.01, inplace=True)\n', (56293, 56313), True, 'import torch.nn as nn\n'), ((56386, 56409), 'numpy.power', 'np.power', (['(2)', 'repeat_num'], {}), '(2, repeat_num)\n', (56394, 56409), True, 'import numpy as np\n'), ((57478, 57502), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['conv_dim'], {}), '(conv_dim)\n', (57492, 57502), True, 'import torch.nn as nn\n'), ((57744, 57808), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', 'out_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(curr_dim, out_dim, kernel_size=3, stride=1, padding=1)\n', (57753, 57808), True, 'import torch.nn as nn\n'), ((57836, 57859), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_dim'], {}), '(out_dim)\n', (57850, 57859), True, 'import torch.nn as nn\n'), ((57887, 57918), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {'inplace': '(True)'}), '(0.1, inplace=True)\n', (57899, 57918), True, 'import torch.nn as nn\n'), ((63384, 63453), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(curr_dim * 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1)\n', (63393, 63453), True, 'import torch.nn as nn\n'), ((63479, 63511), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.01)'], {'inplace': '(True)'}), '(0.01, inplace=True)\n', (63491, 63511), True, 'import torch.nn as nn\n'), ((63584, 63607), 'numpy.power', 'np.power', (['(2)', 'repeat_num'], {}), '(2, repeat_num)\n', (63592, 63607), True, 'import numpy as np\n'), ((64767, 64798), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (64779, 64798), True, 'import torch.nn as nn\n'), ((65083, 65114), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (65095, 65114), True, 'import torch.nn as nn\n'), ((65205, 65234), 'numpy.power', 'np.power', (['(2)', 'repeat_num_local'], {}), '(2, repeat_num_local)\n', (65213, 65234), True, 'import numpy as np\n'), ((65276, 65306), 'numpy.power', 'np.power', (['(2)', 'repeat_num_global'], {}), '(2, repeat_num_global)\n', (65284, 65306), True, 'import numpy as np\n'), ((66555, 66626), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'conv_dim'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(nc, conv_dim, kernel_size=4, stride=2, padding=1, bias=False)\n', (66564, 66626), True, 'import torch.nn as nn\n'), ((66755, 66826), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'conv_dim'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(nc, conv_dim, kernel_size=4, stride=2, padding=1, bias=False)\n', (66764, 66826), True, 'import torch.nn as nn\n'), ((67136, 67167), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (67148, 67167), True, 'import torch.nn as nn\n'), ((67466, 67497), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (67478, 67497), True, 'import torch.nn as nn\n'), ((67588, 67617), 'numpy.power', 'np.power', (['(2)', 'repeat_num_local'], {}), '(2, repeat_num_local)\n', (67596, 67617), True, 'import numpy as np\n'), ((67659, 67689), 'numpy.power', 'np.power', (['(2)', 'repeat_num_global'], {}), '(2, repeat_num_global)\n', (67667, 67689), True, 'import numpy as np\n'), ((68946, 68986), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['conv_dim'], {'affine': '(True)'}), '(conv_dim, affine=True)\n', (68963, 68986), True, 'import torch.nn as nn\n'), ((69102, 69120), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.25)'}), '(p=0.25)\n', (69112, 69120), True, 'import torch.nn as nn\n'), ((69451, 69483), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.01)'], {'inplace': '(True)'}), '(0.01, inplace=True)\n', (69463, 69483), True, 'import torch.nn as nn\n'), ((69655, 69678), 'numpy.power', 'np.power', (['(2)', 'repeat_num'], {}), '(2, repeat_num)\n', (69663, 69678), True, 'import numpy as np\n'), ((70882, 70897), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (70894, 70897), True, 'import torch.nn as nn\n'), ((71500, 71523), 'numpy.power', 'np.power', (['(2)', 'repeat_num'], {}), '(2, repeat_num)\n', (71508, 71523), True, 'import numpy as np\n'), ((72647, 72659), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (72657, 72659), True, 'import torch.nn as nn\n'), ((73039, 73069), 'torch.sum', 'torch.sum', (['(in_feat ** 2)'], {'dim': '(1)'}), '(in_feat ** 2, dim=1)\n', (73048, 73069), False, 'import torch\n'), ((76900, 77020), 'torch.load', 'torch.load', (["(\n '/BS/rshetty-wrk/work/code/controlled-generation/trained_models/perceptualSim/'\n + network + '.pth')"], {}), "(\n '/BS/rshetty-wrk/work/code/controlled-generation/trained_models/perceptualSim/'\n + network + '.pth')\n", (76910, 77020), False, 'import torch\n'), ((2369, 2397), 'torch.__version__.split', 'torch.__version__.split', (['"""."""'], {}), "('.')\n", (2392, 2397), False, 'import torch\n'), ((4539, 4561), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['p'], {}), '(p)\n', (4558, 4561), True, 'import torch.nn as nn\n'), ((4970, 4992), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['p'], {}), '(p)\n', (4989, 4992), True, 'import torch.nn as nn\n'), ((5726, 5748), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['p'], {}), '(p)\n', (5745, 5748), True, 'import torch.nn as nn\n'), ((6154, 6176), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['p'], {}), '(p)\n', (6173, 6176), True, 'import torch.nn as nn\n'), ((6909, 6931), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['p'], {}), '(p)\n', (6928, 6931), True, 'import torch.nn as nn\n'), ((7288, 7310), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['p'], {}), '(p)\n', (7307, 7310), True, 'import torch.nn as nn\n'), ((13408, 13451), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_downscale'], {}), '(x, curr_downscale)\n', (13432, 13451), True, 'import torch.nn as nn\n'), ((16994, 17037), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_downscale'], {}), '(x, curr_downscale)\n', (17018, 17037), True, 'import torch.nn as nn\n'), ((20263, 20326), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(3, 3, kernel_size=3, stride=1, padding=1, bias=False)\n', (20272, 20326), True, 'import torch.nn as nn\n'), ((20365, 20374), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (20372, 20374), True, 'import torch.nn as nn\n'), ((20942, 20985), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_downscale'], {}), '(x, curr_downscale)\n', (20966, 20985), True, 'import torch.nn as nn\n'), ((24220, 24283), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(3, 3, kernel_size=3, stride=1, padding=1, bias=False)\n', (24229, 24283), True, 'import torch.nn as nn\n'), ((24322, 24331), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (24329, 24331), True, 'import torch.nn as nn\n'), ((24840, 24883), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_downscale'], {}), '(x, curr_downscale)\n', (24864, 24883), True, 'import torch.nn as nn\n'), ((29515, 29560), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(curr_dim // 2)'], {'affine': '(True)'}), '(curr_dim // 2, affine=True)\n', (29532, 29560), True, 'import torch.nn as nn\n'), ((29605, 29627), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (29612, 29627), True, 'import torch.nn as nn\n'), ((30331, 30394), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(3, 3, kernel_size=3, stride=1, padding=1, bias=False)\n', (30340, 30394), True, 'import torch.nn as nn\n'), ((30433, 30442), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (30440, 30442), True, 'import torch.nn as nn\n'), ((30718, 30757), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_scale'], {}), '(x, curr_scale)\n', (30742, 30757), True, 'import torch.nn as nn\n'), ((32528, 32554), 'torch.nn.functional.softmax', 'F.softmax', (['allmasks'], {'dim': '(1)'}), '(allmasks, dim=1)\n', (32537, 32554), True, 'import torch.nn.functional as F\n'), ((38445, 38471), 'torch.nn.functional.softmax', 'F.softmax', (['allmasks'], {'dim': '(1)'}), '(allmasks, dim=1)\n', (38454, 38471), True, 'import torch.nn.functional as F\n'), ((45751, 45777), 'torch.nn.functional.softmax', 'F.softmax', (['allmasks'], {'dim': '(1)'}), '(allmasks, dim=1)\n', (45760, 45777), True, 'import torch.nn.functional as F\n'), ((48218, 48313), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['curr_dim', '(curr_dim // 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, curr_dim // 2, kernel_size=4, stride=2,\n padding=1, bias=False)\n', (48236, 48313), True, 'import torch.nn as nn\n'), ((51848, 51892), 'torch.autograd.Variable', 'Variable', (['self.tvWeight'], {'requires_grad': '(False)'}), '(self.tvWeight, requires_grad=False)\n', (51856, 51892), False, 'from torch.autograd import Variable\n'), ((53011, 53034), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_dim'], {}), '(out_dim)\n', (53025, 53034), True, 'import torch.nn as nn\n'), ((53850, 53890), 'torch.nn.functional.pad', 'F.pad', (['x', '(1, 1, 1, 1)'], {'mode': '"""replicate"""'}), "(x, (1, 1, 1, 1), mode='replicate')\n", (53855, 53890), True, 'import torch.nn.functional as F\n'), ((58130, 58145), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (58142, 58145), True, 'import torch.nn as nn\n'), ((69551, 69569), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.25)'}), '(p=0.25)\n', (69561, 69569), True, 'import torch.nn as nn\n'), ((71187, 71202), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (71199, 71202), True, 'import torch.nn as nn\n'), ((2673, 2774), 'torch.nn.Conv2d', 'nn.Conv2d', (['(dim_in if i == 0 else dim_out)', 'dim_out'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(dim_in if i == 0 else dim_out, dim_out, kernel_size=3, stride=1,\n padding=1, bias=False)\n', (2682, 2774), True, 'import torch.nn as nn\n'), ((13794, 13837), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_downscale'], {}), '(x, curr_downscale)\n', (13818, 13837), True, 'import torch.nn as nn\n'), ((17469, 17512), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_downscale'], {}), '(x, curr_downscale)\n', (17493, 17512), True, 'import torch.nn as nn\n'), ((17765, 17808), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_downscale'], {}), '(x, curr_downscale)\n', (17789, 17808), True, 'import torch.nn as nn\n'), ((21353, 21396), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_downscale'], {}), '(x, curr_downscale)\n', (21377, 21396), True, 'import torch.nn as nn\n'), ((25251, 25294), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['x', 'curr_downscale'], {}), '(x, curr_downscale)\n', (25275, 25294), True, 'import torch.nn as nn\n'), ((28300, 28315), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (28312, 28315), True, 'import torch.nn as nn\n'), ((28328, 28343), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(4)'], {}), '(4)\n', (28340, 28343), True, 'import torch.nn as nn\n'), ((28657, 28767), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(curr_dim + extra_dim)', '(curr_dim // 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': 'use_bias'}), '(curr_dim + extra_dim, curr_dim // 2, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n', (28675, 28767), True, 'import torch.nn as nn\n'), ((30849, 30888), 'torch.nn.functional.avg_pool2d', 'nn.functional.avg_pool2d', (['c', 'curr_scale'], {}), '(c, curr_scale)\n', (30873, 30888), True, 'import torch.nn as nn\n'), ((38192, 38250), 'torch.cat', 'torch.cat', (['[up_inp[-1], vgg_out[-1 - (i + 1) // 4]]'], {'dim': '(1)'}), '([up_inp[-1], vgg_out[-1 - (i + 1) // 4]], dim=1)\n', (38201, 38250), False, 'import torch\n'), ((48387, 48430), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (48398, 48430), True, 'import torch.nn as nn\n'), ((48462, 48549), 'torch.nn.Conv2d', 'nn.Conv2d', (['curr_dim', '(curr_dim // 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(curr_dim, curr_dim // 2, kernel_size=3, stride=1, padding=1, bias\n =False)\n', (48471, 48549), True, 'import torch.nn as nn\n'), ((53161, 53184), 'numpy.power', 'np.power', (['(2)', 'repeat_num'], {}), '(2, repeat_num)\n', (53169, 53184), True, 'import numpy as np\n'), ((55278, 55301), 'numpy.power', 'np.power', (['(2)', 'repeat_num'], {}), '(2, repeat_num)\n', (55286, 55301), True, 'import numpy as np\n'), ((28868, 28911), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (28879, 28911), True, 'import torch.nn as nn\n'), ((28968, 29069), 'torch.nn.Conv2d', 'nn.Conv2d', (['(curr_dim + extra_dim)', '(curr_dim // 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': 'use_bias'}), '(curr_dim + extra_dim, curr_dim // 2, kernel_size=3, stride=1,\n padding=1, bias=use_bias)\n', (28977, 29069), True, 'import torch.nn as nn\n'), ((32556, 32593), 'torch.zeros_like', 'torch.zeros_like', (['allmasks[:, 0:1, :]'], {}), '(allmasks[:, 0:1, :])\n', (32572, 32593), False, 'import torch\n'), ((36591, 36628), 'torch.Tensor', 'torch.Tensor', (['[-0.03, -0.088, -0.188]'], {}), '([-0.03, -0.088, -0.188])\n', (36603, 36628), False, 'import torch\n'), ((36715, 36749), 'torch.Tensor', 'torch.Tensor', (['[0.458, 0.448, 0.45]'], {}), '([0.458, 0.448, 0.45])\n', (36727, 36749), False, 'import torch\n'), ((38473, 38510), 'torch.zeros_like', 'torch.zeros_like', (['allmasks[:, 0:1, :]'], {}), '(allmasks[:, 0:1, :])\n', (38489, 38510), False, 'import torch\n'), ((43497, 43534), 'torch.Tensor', 'torch.Tensor', (['[-0.03, -0.088, -0.188]'], {}), '([-0.03, -0.088, -0.188])\n', (43509, 43534), False, 'import torch\n'), ((43621, 43655), 'torch.Tensor', 'torch.Tensor', (['[0.458, 0.448, 0.45]'], {}), '([0.458, 0.448, 0.45])\n', (43633, 43655), False, 'import torch\n'), ((45779, 45816), 'torch.zeros_like', 'torch.zeros_like', (['allmasks[:, 0:1, :]'], {}), '(allmasks[:, 0:1, :])\n', (45795, 45816), False, 'import torch\n'), ((59902, 59939), 'torch.Tensor', 'torch.Tensor', (['[-0.03, -0.088, -0.188]'], {}), '([-0.03, -0.088, -0.188])\n', (59914, 59939), False, 'import torch\n'), ((60026, 60060), 'torch.Tensor', 'torch.Tensor', (['[0.458, 0.448, 0.45]'], {}), '([0.458, 0.448, 0.45])\n', (60038, 60060), False, 'import torch\n'), ((61859, 61896), 'torch.Tensor', 'torch.Tensor', (['[-0.03, -0.088, -0.188]'], {}), '([-0.03, -0.088, -0.188])\n', (61871, 61896), False, 'import torch\n'), ((61983, 62017), 'torch.Tensor', 'torch.Tensor', (['[0.458, 0.448, 0.45]'], {}), '([0.458, 0.448, 0.45])\n', (61995, 62017), False, 'import torch\n'), ((76175, 76212), 'torch.Tensor', 'torch.Tensor', (['[-0.03, -0.088, -0.188]'], {}), '([-0.03, -0.088, -0.188])\n', (76187, 76212), False, 'import torch\n'), ((76282, 76316), 'torch.Tensor', 'torch.Tensor', (['[0.458, 0.448, 0.45]'], {}), '([0.458, 0.448, 0.45])\n', (76294, 76316), False, 'import torch\n'), ((54286, 54304), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['h', '(2)'], {}), '(h, 2)\n', (54298, 54304), True, 'import torch.nn.functional as F\n'), ((54361, 54386), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out_real', '(2)'], {}), '(out_real, 2)\n', (54373, 54386), True, 'import torch.nn.functional as F\n')] |
import argparse
import pandas as pd
import numpy as np
# 18 total
exch_index2id = np.array([[1, 3], [3, 1], [0, 4], [4, 0], [3, 2], [2, 3], [1, 2], [2, 1], [
0, 2], [2, 0], [3, 0], [0, 3], [1, 0], [0, 1], [1, 4], [4, 1], [4, 2], [2, 4]], dtype=int)
rate2pair = {}
symbols = ["USD", "EUR", "JPY", "GBP", "CHF"]
def print_orders(raw):
# input is a binary string
pkt_num = len(raw) // 256 # 256 bytes per order entry
for i in range(pkt_num):
# split into orders
order = raw[i*256:(i+1)*256]
# extract rate information
rate = int(order[157:167].decode())
print(f"order{i}: {rate}")
# exch_index_start, exch_index_to = rate2pair[rate]
# print(exch_index_start, exch_index_to)
def construct_map(df):
px = df["MDEntryPx"]
exch_index = df["SecurityID"] // 1024 - 1 # get exch_index
for i in range(df.shape[0]):
rate2pair[px[i]] = exch_index[i]
# print(rate2pair)
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--csv', help="path of csv file", required=True)
parser.add_argument(
'-e', '--orderentry', help="path of orderentry output", required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arg()
f = open(args.orderentry, "rb")
raw = f.read()
f.close()
df = pd.read_csv(args.csv)
construct_map(df)
print_orders(raw)
| [
"pandas.read_csv",
"numpy.array",
"argparse.ArgumentParser"
] | [((82, 256), 'numpy.array', 'np.array', (['[[1, 3], [3, 1], [0, 4], [4, 0], [3, 2], [2, 3], [1, 2], [2, 1], [0, 2], [2,\n 0], [3, 0], [0, 3], [1, 0], [0, 1], [1, 4], [4, 1], [4, 2], [2, 4]]'], {'dtype': 'int'}), '([[1, 3], [3, 1], [0, 4], [4, 0], [3, 2], [2, 3], [1, 2], [2, 1], [\n 0, 2], [2, 0], [3, 0], [0, 3], [1, 0], [0, 1], [1, 4], [4, 1], [4, 2],\n [2, 4]], dtype=int)\n', (90, 256), True, 'import numpy as np\n'), ((1002, 1027), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1025, 1027), False, 'import argparse\n'), ((1397, 1418), 'pandas.read_csv', 'pd.read_csv', (['args.csv'], {}), '(args.csv)\n', (1408, 1418), True, 'import pandas as pd\n')] |
import numpy as np
from deduplipy.classifier_pipeline.classifier_pipeline import ClassifierPipeline
def test_base_case():
myClassifierPipeline = ClassifierPipeline()
assert list(myClassifierPipeline.classifier.named_steps.keys()) == ['standardscaler', 'logisticregression']
X = [[0], [1]]
y = [0, 1]
myClassifierPipeline.fit(X, y)
preds = myClassifierPipeline.predict(X)
pred_proba = myClassifierPipeline.predict_proba(X)
assert isinstance(preds, np.ndarray)
np.testing.assert_array_equal(preds, [0, 1])
assert preds.dtype == np.int64
assert isinstance(pred_proba, np.ndarray)
assert pred_proba.shape == (2, 2)
assert pred_proba.dtype == np.float64
| [
"numpy.testing.assert_array_equal",
"deduplipy.classifier_pipeline.classifier_pipeline.ClassifierPipeline"
] | [((152, 172), 'deduplipy.classifier_pipeline.classifier_pipeline.ClassifierPipeline', 'ClassifierPipeline', ([], {}), '()\n', (170, 172), False, 'from deduplipy.classifier_pipeline.classifier_pipeline import ClassifierPipeline\n'), ((498, 542), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['preds', '[0, 1]'], {}), '(preds, [0, 1])\n', (527, 542), True, 'import numpy as np\n')] |
import json
import cv2
import os
import code
import sys
import torch
import numpy as np
from Progress import ProgressBar
class HandwrittenDataset:
def __init__(self, path, max_load=None):
self.path = path
with open(os.path.join(self.path, 'meta.json'), 'r') as file:
self.meta = json.loads(file.read())['classes']
self.data = []
files = [e for e in os.listdir(self.path) if e.split('.')[-1] == 'npy']
files = sorted(files)
if max_load is not None:
idx = np.random.choice(range(len(files)), max_load, replace=False)
f = []
m = []
for i in idx:
f.append(files[i])
m.append(self.meta[i])
self.meta = m
files = f
pb = ProgressBar("Loading", 15, len(files), update_every=2000, ea=15)
for i, file in enumerate(files):
f = os.path.join(self.path, file)
self.data.append(np.load(f))
pb.update(i + 1)
pb.finish()
def configure(self, split=0.9, device='cpu'):
charset = "abcdefghijklmnopqrstuvwxyz"
charset += "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
charset += "0123456789"
self.class_meta = {}
for i, c in enumerate(charset):
self.class_meta[c] = i
self.n_train = int(round(split * len(self.data)))
self.n_val = len(self.data) - self.n_train
self.train_indices = np.random.choice(
len(self.data), self.n_train, replace=False
)
self.val_indices = [
v for v in range(len(self.data)) if v not in self.train_indices
]
self.train_inputs = [self.data[i] for i in self.train_indices]
self.val_inputs = [self.data[i] for i in self.val_indices]
self.train_labels = [
self.class_meta[self.meta[i]] for i in self.train_indices
]
self.val_labels = [
self.class_meta[self.meta[i]] for i in self.val_indices
]
self.t_train_inputs = torch.tensor(self.train_inputs)
self.t_train_inputs = self.t_train_inputs.type(torch.FloatTensor)
self.t_train_inputs = self.t_train_inputs.reshape(
len(self.train_inputs), 1, self.data[0].shape[0], self.data[0].shape[1]
)
self.t_train_inputs = self.t_train_inputs.to(device)
self.t_train_labels = torch.tensor(self.train_labels)
self.t_train_labels = self.t_train_labels.to(device)
self.t_val_inputs = torch.tensor(self.val_inputs)
self.t_val_inputs = self.t_val_inputs.type(torch.FloatTensor)
self.t_val_inputs = self.t_val_inputs.reshape(
len(self.val_inputs), 1, self.data[0].shape[0], self.data[0].shape[1]
)
self.t_val_inputs = self.t_val_inputs.to(device)
self.t_val_labels = torch.tensor(self.val_labels)
self.t_val_labels = self.t_val_labels.to(device)
return (
self.t_train_inputs,
self.t_train_labels,
self.t_val_inputs,
self.t_val_labels
)
def lookupTrainingInput(self, idx):
index = self.train_indices[idx]
return index, self.data[index], self.meta[index]
def lookupValidationInput(self, idx):
index = self.val_indices[idx]
return index, self.data[index], self.meta[index]
if __name__ == '__main__':
d = HandwrittenDataset('../nist_19_28/', max_load=50000)
d.configure(device='cuda:0')
code.interact(local=locals()) | [
"numpy.load",
"os.path.join",
"os.listdir",
"torch.tensor"
] | [((1733, 1764), 'torch.tensor', 'torch.tensor', (['self.train_inputs'], {}), '(self.train_inputs)\n', (1745, 1764), False, 'import torch\n'), ((2046, 2077), 'torch.tensor', 'torch.tensor', (['self.train_labels'], {}), '(self.train_labels)\n', (2058, 2077), False, 'import torch\n'), ((2156, 2185), 'torch.tensor', 'torch.tensor', (['self.val_inputs'], {}), '(self.val_inputs)\n', (2168, 2185), False, 'import torch\n'), ((2451, 2480), 'torch.tensor', 'torch.tensor', (['self.val_labels'], {}), '(self.val_labels)\n', (2463, 2480), False, 'import torch\n'), ((775, 804), 'os.path.join', 'os.path.join', (['self.path', 'file'], {}), '(self.path, file)\n', (787, 804), False, 'import os\n'), ((224, 260), 'os.path.join', 'os.path.join', (['self.path', '"""meta.json"""'], {}), "(self.path, 'meta.json')\n", (236, 260), False, 'import os\n'), ((369, 390), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (379, 390), False, 'import os\n'), ((825, 835), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (832, 835), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import joblib
from tensorflow.keras.models import load_model
import tensorflow
from models.turnikecg import Turnikv7
from get_ecg_features import get_ecg_features
def run_12ECG_classifier(data, header_data, classes,model):
num_classes = len(classes)
current_label = np.zeros(num_classes, dtype=int)
current_score = np.zeros(num_classes)
# Use your classifier here to obtain a label and score for each class.
# features=np.asarray(get_12ECG_features(data,header_data))
features = get_ecg_features(data)
feats_reshape = np.expand_dims(features, axis=0)
score = model.predict(feats_reshape)
label = np.argmax(score, axis=1)
current_label[label] = 1
for i in range(num_classes):
current_score[i] = np.array(score[0][i])
return current_label, current_score
def load_12ECG_model():
# load the model from disk
img_rows, img_cols = 5000, 12
num_classes = 9
weights_file ='models/Turnikv7_best_model.h5'
# loaded_model = load_model(weights_file)
loaded_model = Turnikv7(input_shape=(img_rows, img_cols), n_classes=num_classes)
loaded_model.load_weights(weights_file)
return loaded_model
| [
"numpy.argmax",
"numpy.zeros",
"numpy.expand_dims",
"models.turnikecg.Turnikv7",
"numpy.array",
"get_ecg_features.get_ecg_features"
] | [((319, 351), 'numpy.zeros', 'np.zeros', (['num_classes'], {'dtype': 'int'}), '(num_classes, dtype=int)\n', (327, 351), True, 'import numpy as np\n'), ((372, 393), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (380, 393), True, 'import numpy as np\n'), ((550, 572), 'get_ecg_features.get_ecg_features', 'get_ecg_features', (['data'], {}), '(data)\n', (566, 572), False, 'from get_ecg_features import get_ecg_features\n'), ((593, 625), 'numpy.expand_dims', 'np.expand_dims', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (607, 625), True, 'import numpy as np\n'), ((680, 704), 'numpy.argmax', 'np.argmax', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (689, 704), True, 'import numpy as np\n'), ((1085, 1150), 'models.turnikecg.Turnikv7', 'Turnikv7', ([], {'input_shape': '(img_rows, img_cols)', 'n_classes': 'num_classes'}), '(input_shape=(img_rows, img_cols), n_classes=num_classes)\n', (1093, 1150), False, 'from models.turnikecg import Turnikv7\n'), ((796, 817), 'numpy.array', 'np.array', (['score[0][i]'], {}), '(score[0][i])\n', (804, 817), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
npy.py
It is used to transform binartproto file to npy file.
--liuyuan
"""
import numpy as np
caffe_root = '../'
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
blob = caffe.proto.caffe_pb2.BlobProto()
data = open('../../' + caffe_root + 'path/to/mean/file/ImageNet-tiny-256x256-sRGB_mean.binaryproto', 'rb').read()
blob.ParseFromString(data)
arr = np.array(caffe.io.blobproto_to_array(blob))
out = arr[0]
np.save(caffe_root + 'path/to/save/new/mean/file/ImageNet-tiny-256x256-sRGB_mean.npy', out)
| [
"caffe.io.blobproto_to_array",
"numpy.save",
"sys.path.insert",
"caffe.proto.caffe_pb2.BlobProto"
] | [((152, 193), 'sys.path.insert', 'sys.path.insert', (['(0)', "(caffe_root + 'python')"], {}), "(0, caffe_root + 'python')\n", (167, 193), False, 'import sys\n'), ((216, 249), 'caffe.proto.caffe_pb2.BlobProto', 'caffe.proto.caffe_pb2.BlobProto', ([], {}), '()\n', (247, 249), False, 'import caffe\n'), ((454, 549), 'numpy.save', 'np.save', (["(caffe_root + 'path/to/save/new/mean/file/ImageNet-tiny-256x256-sRGB_mean.npy')", 'out'], {}), "(caffe_root +\n 'path/to/save/new/mean/file/ImageNet-tiny-256x256-sRGB_mean.npy', out)\n", (461, 549), True, 'import numpy as np\n'), ((406, 439), 'caffe.io.blobproto_to_array', 'caffe.io.blobproto_to_array', (['blob'], {}), '(blob)\n', (433, 439), False, 'import caffe\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.