code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from schmetterling.core.log import log_config, log_params_return
from schmetterling.log.state import LogState
@log_params_return('info')
def execute(state, log_dir, name, level):
log_handlers = log_config(log_dir, name, level)
return LogState(__name__, log_handlers['file_handler'].baseFilename)
|
[
"schmetterling.core.log.log_params_return",
"schmetterling.log.state.LogState",
"schmetterling.core.log.log_config"
] |
[((113, 138), 'schmetterling.core.log.log_params_return', 'log_params_return', (['"""info"""'], {}), "('info')\n", (130, 138), False, 'from schmetterling.core.log import log_config, log_params_return\n'), ((200, 232), 'schmetterling.core.log.log_config', 'log_config', (['log_dir', 'name', 'level'], {}), '(log_dir, name, level)\n', (210, 232), False, 'from schmetterling.core.log import log_config, log_params_return\n'), ((244, 305), 'schmetterling.log.state.LogState', 'LogState', (['__name__', "log_handlers['file_handler'].baseFilename"], {}), "(__name__, log_handlers['file_handler'].baseFilename)\n", (252, 305), False, 'from schmetterling.log.state import LogState\n')]
|
import tensorflowjs as tfjs
import tensorflow as tf
model = tf.keras.models.load_model("model.h5")
tfjs.converters.save_keras_model(model, "tfjs")
|
[
"tensorflowjs.converters.save_keras_model",
"tensorflow.keras.models.load_model"
] |
[((61, 99), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""model.h5"""'], {}), "('model.h5')\n", (87, 99), True, 'import tensorflow as tf\n'), ((100, 147), 'tensorflowjs.converters.save_keras_model', 'tfjs.converters.save_keras_model', (['model', '"""tfjs"""'], {}), "(model, 'tfjs')\n", (132, 147), True, 'import tensorflowjs as tfjs\n')]
|
import unittest
from src.api import Settings
class SettingsTestCase(unittest.TestCase):
"""Tests the Settings class."""
def setUp(self):
self.settings = Settings(800, 600, 60, "3D Engine", use_antialiasing=False)
def test_keyword_arguments(self):
"""Check that the keyword arguments are being parsed correctly."""
self.assertTrue(hasattr(self.settings, 'use_antialiasing'))
def test_as_dict(self):
"""Check that the as_dict() method is working correctly."""
self.assertEqual(self.settings.as_dict(), self.settings.__dict__)
def tearDown(self):
del self.settings
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"src.api.Settings"
] |
[((616, 631), 'unittest.main', 'unittest.main', ([], {}), '()\n', (629, 631), False, 'import unittest\n'), ((161, 220), 'src.api.Settings', 'Settings', (['(800)', '(600)', '(60)', '"""3D Engine"""'], {'use_antialiasing': '(False)'}), "(800, 600, 60, '3D Engine', use_antialiasing=False)\n", (169, 220), False, 'from src.api import Settings\n')]
|
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
IMAGE_SIZE = 64
#按照指定图像大小调整尺寸
def resize_image(image, height = IMAGE_SIZE, width = IMAGE_SIZE):
top, bottom, left, right = (0, 0, 0, 0)
#获取图像尺寸
h, w, _ = image.shape
#对于长宽不相等的图片,找到最长的一边
longest_edge = max(h, w)
#计算短边需要增加多上像素宽度使其与长边等长
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
BLACK = [0, 0, 0]
#给图像增加边界,是图片长、宽等长,cv2.BORDER_CONSTANT指定边界颜色由value指定
constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value = BLACK)
#调整图像大小并返回
return cv2.resize(constant, (height, width))
#读取训练数据
images = []
labels = []
def read_images(path_name):
for dir_item in os.listdir(path_name):
full_path = os.path.abspath(os.path.join(path_name, dir_item))
if os.path.isdir(full_path):
read_images(full_path)
else:
if dir_item.endswith('.jpg'):
print(full_path)
image = cv2.imread(full_path)
image = resize_image(image, IMAGE_SIZE, IMAGE_SIZE)
images.append(image)
labels.append(path_name)
return images,labels
#从指定路径读取训练数据
def load_dataset(path_name):
images,labels = read_images(path_name)
#将输入的所有图片转成四维数组,尺寸为(图片数量*IMAGE_SIZE*IMAGE_SIZE*3)
#图片为64 * 64像素,一个像素3个颜色值(RGB)
images = np.array(images)
labels = np.array([0 if label.endswith('yangwk') else 1 for label in labels])
return images, labels
if __name__ == '__main__':
path_name = './data/'
images, labels = load_dataset(path_name)
print(images.shape)
print(labels.shape)
|
[
"os.path.isdir",
"numpy.array",
"os.listdir",
"os.path.join"
] |
[((798, 819), 'os.listdir', 'os.listdir', (['path_name'], {}), '(path_name)\n', (808, 819), False, 'import os\n'), ((1350, 1366), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1358, 1366), True, 'import numpy as np\n'), ((891, 915), 'os.path.isdir', 'os.path.isdir', (['full_path'], {}), '(full_path)\n', (904, 915), False, 'import os\n'), ((851, 884), 'os.path.join', 'os.path.join', (['path_name', 'dir_item'], {}), '(path_name, dir_item)\n', (863, 884), False, 'import os\n')]
|
"""
Regularizer class for that also supports GPU code
<NAME> <EMAIL>
<NAME> <EMAIL>
March 04, 2018
"""
import arrayfire as af
import numpy as np
from opticaltomography import settings
np_complex_datatype = settings.np_complex_datatype
np_float_datatype = settings.np_float_datatype
af_float_datatype = settings.af_float_datatype
af_complex_datatype = settings.af_complex_datatype
class Regularizer:
"""
Highest-level Regularizer class that is responsible for parsing user arguments to create proximal operators
All proximal operators operate on complex variables (real & imaginary part separately)
Pure Real:
pure_real: boolean, whether or not to enforce object to be purely real
Pure imaginary:
pure_imag: boolean, whether or not to enforce object to be purely imaginary
Positivity:
positivity_real(positivity_imag): boolean, whether or not to enforce positivity for real(imaginary) part
Negativity:
negativity_real(negativity_imag): boolean, whether or not to enforce negativity for real(imaginary) part
LASSO (L1 regularizer):
lasso: boolean, whether or not to use LASSO proximal operator
lasso_parameter: threshold for LASSO
Total variation (3D only):
total_variation: boolean, whether or not to use total variation regularization
total_variation_gpu: boolean, whether or not to use GPU implementation
total_variation_parameter: scalar, regularization parameter (lambda)
total_variation_maxitr: integer, number of each iteration for total variation
"""
def __init__(self, configs = None, verbose = True, **kwargs):
#Given all parameters, construct all proximal operators
self.prox_list = []
reg_params = kwargs
if configs != None:
reg_params = self._parseConfigs(configs)
#Purely real
if reg_params.get("pure_real", False):
self.prox_list.append(PureReal())
#Purely imaginary
if reg_params.get("pure_imag", False):
self.prox_list.append(Pureimag())
#Total Variation
if reg_params.get("total_variation", False):
if reg_params.get("total_variation_gpu", False):
self.prox_list.append(TotalVariationGPU(**reg_params))
else:
self.prox_list.append(TotalVariationCPU(**reg_params))
#L1 Regularizer (LASSO)
elif reg_params.get("lasso", False):
self.prox_list.append(Lasso(reg_params.get("lasso_parameter", 1.0)))
#Others
else:
#Positivity
positivity_real = reg_params.get("positivity_real", False)
positivity_imag = reg_params.get("positivity_imag", False)
if positivity_real or positivity_imag:
self.prox_list.append(Positivity(positivity_real, positivity_imag))
#Negativity
negativity_real = reg_params.get("negativity_real", False)
negativity_imag = reg_params.get("negativity_imag", False)
if negativity_real or negativity_imag:
self.prox_list.append(Negativity(negativity_real, negativity_imag))
if verbose:
for prox_op in self.prox_list:
print("Regularizer -", prox_op.proximal_name)
def _parseConfigs(self, configs):
params = {}
params["pure_real"] = configs.pure_real
params["pure_imag"] = configs.pure_imag
#Total variation
params["total_variation"] = configs.total_variation
params["total_variation_gpu"] = configs.total_variation_gpu
params["total_variation_maxitr"] = configs.max_iter_tv
params["total_variation_order"] = configs.order_tv
params["total_variation_parameter"] = configs.reg_tv
#LASSO
params["lasso"] = configs.lasso
params["lasso_parameter"] = configs.reg_lasso
#Positivity/Negativity
if configs.positivity_real[0]:
if configs.positivity_real[1] == "larger":
params["positivity_real"] = True
else:
params["negativity_real"] = True
if configs.positivity_imag[0]:
if configs.positivity_imag[1] == "larger":
params["positivity_imag"] = True
else:
params["negativity_imag"] = True
return params
def computeCost(self, x):
cost = 0.0
for prox_op in self.prox_list:
cost_temp = prox_op.computeCost(x)
if cost_temp != None:
cost += cost_temp
return cost
def applyRegularizer(self, x):
for prox_op in self.prox_list:
x = prox_op.computeProx(x)
return x
class ProximalOperator():
def __init__(self, proximal_name):
self.proximal_name = proximal_name
def computeCost(self):
pass
def computeProx(self):
pass
def setParameter(self):
pass
def _boundRealValue(self, x, value = 0, flag_project = True):
"""If flag is true, only values that are greater than 'value' are preserved"""
if flag_project:
x[x < value] = 0
return x
class TotalVariationGPU(ProximalOperator):
def __init__(self, **kwargs):
proximal_name = "Total Variation"
parameter = kwargs.get("total_variation_parameter", 1.0)
maxitr = kwargs.get("total_variation_maxitr", 15)
self.order = kwargs.get("total_variation_order", 1)
self.pure_real = kwargs.get("pure_real", False)
self.pure_imag = kwargs.get("pure_imag", False)
#real part
if kwargs.get("positivity_real", False):
self.realProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_real")
elif kwargs.get("negativity_real", False):
self.realProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_real")
else:
self.realProjector = lambda x: x
#imaginary part
if kwargs.get("positivity_imag", False):
self.imagProjector = lambda x: self._boundRealValue(x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "positivity_imag")
elif kwargs.get("negativity_imag", False):
self.imagProjector = lambda x: -1.0 * self._boundRealValue(-1.0 * x, 0, True)
proximal_name = "%s+%s" % (proximal_name, "negativity_imag")
else:
self.imagProjector = lambda x: x
self.setParameter(parameter, maxitr)
super().__init__(proximal_name)
def setParameter(self, parameter, maxitr):
self.parameter = parameter
self.maxitr = maxitr
def computeCost(self, x):
return None
def _computeTVNorm(self, x):
x_norm = x**2
x_norm = af.sum(x_norm, dim = 3)**0.5
x_norm[x_norm<1.0] = 1.0
return x_norm
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(af.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(af.imag(x), self.imagProjector)
else:
x = self._computeProxReal(af.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(af.imag(x), self.imagProjector)
return x
def _filterD(self, x, axis):
assert axis<3, "This function only supports matrix up to 3 dimension!"
if self.order == 1:
if axis == 0:
Dx = x - af.shift(x, 1, 0, 0)
elif axis == 1:
Dx = x - af.shift(x, 0, 1, 0)
else:
Dx = x - af.shift(x, 0, 0, 1)
elif self.order == 2:
if axis == 0:
Dx = x - 2*af.shift(x, 1, 0, 0) + af.shift(x, 2, 0, 0)
elif axis == 1:
Dx = x - 2*af.shift(x, 0, 1, 0) + af.shift(x, 0, 2, 0)
else:
Dx = x - 2*af.shift(x, 0, 0, 1) + af.shift(x, 0, 0, 2)
elif self.order == 3:
if axis == 0:
Dx = x - 3*af.shift(x, 1, 0, 0) + 3*af.shift(x, 2, 0, 0) - af.shift(x, 3, 0, 0)
elif axis == 1:
Dx = x - 3*af.shift(x, 0, 1, 0) + 3*af.shift(x, 0, 2, 0) - af.shift(x, 0, 3, 0)
else:
Dx = x - 3*af.shift(x, 0, 0, 1) + 3*af.shift(x, 0, 0, 2) - af.shift(x, 0, 0, 3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return Dx
def _filterDT(self, x):
if self.order == 1:
DTx = x[:, :, :, 0] - af.shift(x[ :, :, :, 0], -1, 0, 0) + \
x[:, :, :, 1] - af.shift(x[ :, :, :, 1], 0, -1, 0) + \
x[:, :, :, 2] - af.shift(x[ :, :, :, 2], 0, 0, -1)
elif self.order == 2:
DTx = x[:, :, :, 0] - 2*af.shift(x[ :, :, :, 0], -1, 0, 0) + af.shift(x[ :, :, :, 0], -2, 0, 0) + \
x[:, :, :, 1] - 2*af.shift(x[ :, :, :, 1], 0, -1, 0) + af.shift(x[ :, :, :, 1], 0, -2, 0) + \
x[:, :, :, 2] - 2*af.shift(x[ :, :, :, 2], 0, 0, -1) + af.shift(x[ :, :, :, 2], 0, 0, -2)
elif self.order == 3:
DTx = x[:, :, :, 0] - 3*af.shift(x[ :, :, :, 0], -1, 0, 0) + 3*af.shift(x[ :, :, :, 0], -2, 0, 0) - af.shift(x[ :, :, :, 0], -3, 0, 0) + \
x[:, :, :, 1] - 3*af.shift(x[ :, :, :, 1], 0, -1, 0) + 3*af.shift(x[ :, :, :, 1], 0, -2, 0) - af.shift(x[ :, :, :, 1], 0, -3, 0) + \
x[:, :, :, 2] - 3*af.shift(x[ :, :, :, 2], 0, 0, -1) + 3*af.shift(x[ :, :, :, 2], 0, 0, -2) - af.shift(x[ :, :, :, 2], 0, 0, -3)
else:
raise NotImplementedError("filter orders larger than 1 are not implemented!")
return DTx
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_k1 = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype = af_float_datatype)
grad_u_hat = af.constant(0.0, x.shape[0], x.shape[1], x.shape[2], dtype = af_float_datatype)
def _gradUpdate():
grad_u_hat = x - self.parameter * self._filterDT(u_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat[:, :, :] = x
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=0)
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=1)
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/(12.0)**self.order/self.parameter) * self._filterD(grad_u_hat, axis=2)
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :, 0] /= u_k1_norm
u_k1[ :, :, :, 1] /= u_k1_norm
u_k1[ :, :, :, 2] /= u_k1_norm
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class TotalVariationCPU(TotalVariationGPU):
def _computeTVNorm(self, x):
u_k1_norm = af.to_array(x)
u_k1_norm[:, :, :, :] *= u_k1_norm
u_k1_norm = af.sum(u_k1_norm, dim = 3)**0.5
u_k1_norm[u_k1_norm<1.0] = 1.0
return np.array(u_k1_norm)
def computeProx(self, x):
if self.pure_real:
x = self._computeProxReal(np.real(x), self.realProjector) + 1.0j * 0.0
elif self.pure_imag:
x = 1.0j *self._computeProxReal(np.imag(x), self.imagProjector)
else:
x = self._computeProxReal(np.real(x), self.realProjector) \
+ 1.0j * self._computeProxReal(np.imag(x), self.imagProjector)
return af.to_array(x)
def _computeProxReal(self, x, projector):
t_k = 1.0
u_k = np.zeros(x.shape + (3,), dtype = np_float_datatype);
u_k1 = u_k.copy()
u_hat = u_k.copy()
def _gradUpdate():
u_hat_af = af.to_array(u_hat)
DTu_hat = u_hat_af[:, :, :, 0] - af.shift(u_hat_af[ :, :, :, 0], -1, 0, 0) + \
u_hat_af[:, :, :, 1] - af.shift(u_hat_af[ :, :, :, 1], 0, -1, 0) + \
u_hat_af[:, :, :, 2] - af.shift(u_hat_af[ :, :, :, 2], 0, 0, -1)
grad_u_hat = x - np.array(self.parameter * DTu_hat)
return grad_u_hat
for iteration in range(self.maxitr):
if iteration > 0:
grad_u_hat = _gradUpdate()
else:
grad_u_hat = x.copy()
grad_u_hat = projector(grad_u_hat)
u_k1[ :, :, :, 0] = u_hat[ :, :, :, 0] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 0))
u_k1[ :, :, :, 1] = u_hat[ :, :, :, 1] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 1))
u_k1[ :, :, :, 2] = u_hat[ :, :, :, 2] + (1.0/12.0/self.parameter) * (grad_u_hat-np.roll(grad_u_hat, 1, axis = 2))
u_k1_norm = self._computeTVNorm(u_k1)
u_k1[ :, :, :] /= u_k1_norm[:, :, :, np.newaxis]
t_k1 = 0.5 * (1.0 + (1.0 + 4.0*t_k**2)**0.5)
beta = (t_k - 1.0)/t_k1
u_hat = (1.0 + beta)*u_k1 - beta*u_k
if iteration < self.maxitr - 1:
u_k = u_k1.copy()
return projector(_gradUpdate())
class Positivity(ProximalOperator):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, positivity_real, positivity_imag, proximal_name = "Positivity"):
super().__init__(proximal_name)
self.real = positivity_real
self.imag = positivity_imag
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._boundRealValue(af.real(x), 0, self.real) +\
1.0j * self._boundRealValue(af.imag(x), 0, self.imag)
else:
x = self._boundRealValue(np.real(x), 0, self.real) +\
1.0j * self._boundRealValue(np.imag(x), 0, self.imag)
return x
class Negativity(Positivity):
"""Enforce positivity constraint on a complex variable's real & imaginary part."""
def __init__(self, negativity_real, negativity_imag):
super().__init__(negativity_real, negativity_imag, "Negativity")
def computeProx(self, x):
return (-1.) * super().computeProx((-1.) * x)
class PureReal(ProximalOperator):
"""Enforce real constraint on a complex, imaginary part will be cleared"""
def __init__(self):
super().__init__("Pure real")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = af.real(x) + 1j*0.0
else:
x = np.real(x) + 1j*0.0
return x
class Pureimag(ProximalOperator):
"""Enforce imaginary constraint on a complex, real part will be cleared"""
def __init__(self):
super().__init__("Pure imaginary")
def computeCost(self, x):
return None
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = 1j*af.imag(x)
else:
x = 1j*x.imag
return x
class Lasso(ProximalOperator):
"""||x||_1 regularizer, soft thresholding with certain parameter"""
def __init__(self, parameter):
super().__init__("LASSO")
self.setParameter(parameter)
def _softThreshold(self, x):
if type(x).__module__ == "arrayfire.array":
#POTENTIAL BUG: af.sign implementation does not agree with documentation
x = (af.sign(x)-0.5)*(-2.0) * (af.abs(x) - self.parameter) * (af.abs(x) > self.parameter)
else:
x = np.sign(x) * (np.abs(x) - self.parameter) * (np.abs(x) > self.parameter)
return x
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
return af.norm(af.moddims(x, np.prod(x.shape)), norm_type = af.NORM.VECTOR_1)
def computeProx(self, x):
if type(x).__module__ == "arrayfire.array":
x = self._softThreshold(af.real(x)) + 1.0j * self._softThreshold(af.imag(x))
else:
x = self._softThreshold(np.real(x)) + 1.0j * self._softThreshold(np.imag(x))
return x
#TODO: implement Tikhonov
class Tikhonov(ProximalOperator):
def __init__(self):
pass
def setParameter(self, parameter):
self.parameter = parameter
def computeCost(self, x):
pass
def computeProx(self, x):
return x
#TODO: implement pure amplitude constraint
class PureAmplitude(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
#TODO: implement pure phase constraint
class PurePhase(ProximalOperator):
def computeCost(self, x):
return None
def computeProx(self, x):
return x
|
[
"arrayfire.to_array",
"arrayfire.abs",
"numpy.abs",
"arrayfire.sum",
"arrayfire.shift",
"arrayfire.imag",
"numpy.roll",
"numpy.zeros",
"numpy.prod",
"numpy.imag",
"numpy.array",
"numpy.real",
"numpy.sign",
"arrayfire.sign",
"arrayfire.real",
"arrayfire.constant"
] |
[((8794, 8879), 'arrayfire.constant', 'af.constant', (['(0.0)', 'x.shape[0]', 'x.shape[1]', 'x.shape[2]', '(3)'], {'dtype': 'af_float_datatype'}), '(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype=af_float_datatype\n )\n', (8805, 8879), True, 'import arrayfire as af\n'), ((8892, 8977), 'arrayfire.constant', 'af.constant', (['(0.0)', 'x.shape[0]', 'x.shape[1]', 'x.shape[2]', '(3)'], {'dtype': 'af_float_datatype'}), '(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype=af_float_datatype\n )\n', (8903, 8977), True, 'import arrayfire as af\n'), ((8990, 9075), 'arrayfire.constant', 'af.constant', (['(0.0)', 'x.shape[0]', 'x.shape[1]', 'x.shape[2]', '(3)'], {'dtype': 'af_float_datatype'}), '(0.0, x.shape[0], x.shape[1], x.shape[2], 3, dtype=af_float_datatype\n )\n', (9001, 9075), True, 'import arrayfire as af\n'), ((9088, 9165), 'arrayfire.constant', 'af.constant', (['(0.0)', 'x.shape[0]', 'x.shape[1]', 'x.shape[2]'], {'dtype': 'af_float_datatype'}), '(0.0, x.shape[0], x.shape[1], x.shape[2], dtype=af_float_datatype)\n', (9099, 9165), True, 'import arrayfire as af\n'), ((10341, 10355), 'arrayfire.to_array', 'af.to_array', (['x'], {}), '(x)\n', (10352, 10355), True, 'import arrayfire as af\n'), ((10498, 10517), 'numpy.array', 'np.array', (['u_k1_norm'], {}), '(u_k1_norm)\n', (10506, 10517), True, 'import numpy as np\n'), ((10884, 10898), 'arrayfire.to_array', 'af.to_array', (['x'], {}), '(x)\n', (10895, 10898), True, 'import arrayfire as af\n'), ((10977, 11026), 'numpy.zeros', 'np.zeros', (['(x.shape + (3,))'], {'dtype': 'np_float_datatype'}), '(x.shape + (3,), dtype=np_float_datatype)\n', (10985, 11026), True, 'import numpy as np\n'), ((6166, 6187), 'arrayfire.sum', 'af.sum', (['x_norm'], {'dim': '(3)'}), '(x_norm, dim=3)\n', (6172, 6187), True, 'import arrayfire as af\n'), ((10424, 10448), 'arrayfire.sum', 'af.sum', (['u_k1_norm'], {'dim': '(3)'}), '(u_k1_norm, dim=3)\n', (10430, 10448), True, 'import arrayfire as af\n'), ((11120, 11138), 'arrayfire.to_array', 'af.to_array', (['u_hat'], {}), '(u_hat)\n', (11131, 11138), True, 'import arrayfire as af\n'), ((7810, 7843), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-1)'], {}), '(x[:, :, :, 2], 0, 0, -1)\n', (7818, 7843), True, 'import arrayfire as af\n'), ((11348, 11388), 'arrayfire.shift', 'af.shift', (['u_hat_af[:, :, :, 2]', '(0)', '(0)', '(-1)'], {}), '(u_hat_af[:, :, :, 2], 0, 0, -1)\n', (11356, 11388), True, 'import arrayfire as af\n'), ((11410, 11444), 'numpy.array', 'np.array', (['(self.parameter * DTu_hat)'], {}), '(self.parameter * DTu_hat)\n', (11418, 11444), True, 'import numpy as np\n'), ((13669, 13679), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (13676, 13679), True, 'import arrayfire as af\n'), ((13707, 13717), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (13714, 13717), True, 'import numpy as np\n'), ((14037, 14047), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (14044, 14047), True, 'import arrayfire as af\n'), ((14752, 14768), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (14759, 14768), True, 'import numpy as np\n'), ((6321, 6331), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (6328, 6331), True, 'import arrayfire as af\n'), ((6768, 6788), 'arrayfire.shift', 'af.shift', (['x', '(1)', '(0)', '(0)'], {}), '(x, 1, 0, 0)\n', (6776, 6788), True, 'import arrayfire as af\n'), ((8136, 8169), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-2)'], {}), '(x[:, :, :, 2], 0, 0, -2)\n', (8144, 8169), True, 'import arrayfire as af\n'), ((10599, 10609), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (10606, 10609), True, 'import numpy as np\n'), ((12808, 12818), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (12815, 12818), True, 'import arrayfire as af\n'), ((12949, 12959), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (12956, 12959), True, 'import numpy as np\n'), ((14498, 14507), 'arrayfire.abs', 'af.abs', (['x'], {}), '(x)\n', (14504, 14507), True, 'import arrayfire as af\n'), ((14541, 14551), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (14548, 14551), True, 'import numpy as np\n'), ((14586, 14595), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (14592, 14595), True, 'import numpy as np\n'), ((14903, 14913), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (14910, 14913), True, 'import arrayfire as af\n'), ((14994, 15004), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (15001, 15004), True, 'import numpy as np\n'), ((6424, 6434), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (6431, 6434), True, 'import arrayfire as af\n'), ((6493, 6503), 'arrayfire.real', 'af.real', (['x'], {}), '(x)\n', (6500, 6503), True, 'import arrayfire as af\n'), ((6826, 6846), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(1)', '(0)'], {}), '(x, 0, 1, 0)\n', (6834, 6846), True, 'import arrayfire as af\n'), ((6874, 6894), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(1)'], {}), '(x, 0, 0, 1)\n', (6882, 6894), True, 'import arrayfire as af\n'), ((6980, 7000), 'arrayfire.shift', 'af.shift', (['x', '(2)', '(0)', '(0)'], {}), '(x, 2, 0, 0)\n', (6988, 7000), True, 'import arrayfire as af\n'), ((7740, 7773), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-1)', '(0)'], {}), '(x[:, :, :, 1], 0, -1, 0)\n', (7748, 7773), True, 'import arrayfire as af\n'), ((8579, 8612), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-3)'], {}), '(x[:, :, :, 2], 0, 0, -3)\n', (8587, 8612), True, 'import arrayfire as af\n'), ((10702, 10712), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (10709, 10712), True, 'import numpy as np\n'), ((10771, 10781), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (10778, 10781), True, 'import numpy as np\n'), ((11263, 11303), 'arrayfire.shift', 'af.shift', (['u_hat_af[:, :, :, 1]', '(0)', '(-1)', '(0)'], {}), '(u_hat_af[:, :, :, 1], 0, -1, 0)\n', (11271, 11303), True, 'import arrayfire as af\n'), ((11727, 11757), 'numpy.roll', 'np.roll', (['grad_u_hat', '(1)'], {'axis': '(0)'}), '(grad_u_hat, 1, axis=0)\n', (11734, 11757), True, 'import numpy as np\n'), ((11846, 11876), 'numpy.roll', 'np.roll', (['grad_u_hat', '(1)'], {'axis': '(1)'}), '(grad_u_hat, 1, axis=1)\n', (11853, 11876), True, 'import numpy as np\n'), ((11965, 11995), 'numpy.roll', 'np.roll', (['grad_u_hat', '(1)'], {'axis': '(2)'}), '(grad_u_hat, 1, axis=2)\n', (11972, 11995), True, 'import numpy as np\n'), ((12887, 12897), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (12894, 12897), True, 'import arrayfire as af\n'), ((13028, 13038), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (13035, 13038), True, 'import numpy as np\n'), ((14467, 14476), 'arrayfire.abs', 'af.abs', (['x'], {}), '(x)\n', (14473, 14476), True, 'import arrayfire as af\n'), ((14555, 14564), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (14561, 14564), True, 'import numpy as np\n'), ((14944, 14954), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (14951, 14954), True, 'import arrayfire as af\n'), ((15035, 15045), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (15042, 15045), True, 'import numpy as np\n'), ((6565, 6575), 'arrayfire.imag', 'af.imag', (['x'], {}), '(x)\n', (6572, 6575), True, 'import arrayfire as af\n'), ((7063, 7083), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(2)', '(0)'], {}), '(x, 0, 2, 0)\n', (7071, 7083), True, 'import arrayfire as af\n'), ((7136, 7156), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(2)'], {}), '(x, 0, 0, 2)\n', (7144, 7156), True, 'import arrayfire as af\n'), ((7266, 7286), 'arrayfire.shift', 'af.shift', (['x', '(3)', '(0)', '(0)'], {}), '(x, 3, 0, 0)\n', (7274, 7286), True, 'import arrayfire as af\n'), ((8099, 8132), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-1)'], {}), '(x[:, :, :, 2], 0, 0, -1)\n', (8107, 8132), True, 'import arrayfire as af\n'), ((10843, 10853), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (10850, 10853), True, 'import numpy as np\n'), ((14441, 14451), 'arrayfire.sign', 'af.sign', (['x'], {}), '(x)\n', (14448, 14451), True, 'import arrayfire as af\n'), ((6956, 6976), 'arrayfire.shift', 'af.shift', (['x', '(1)', '(0)', '(0)'], {}), '(x, 1, 0, 0)\n', (6964, 6976), True, 'import arrayfire as af\n'), ((7374, 7394), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(3)', '(0)'], {}), '(x, 0, 3, 0)\n', (7382, 7394), True, 'import arrayfire as af\n'), ((7472, 7492), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(3)'], {}), '(x, 0, 0, 3)\n', (7480, 7492), True, 'import arrayfire as af\n'), ((7670, 7703), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-1)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -1, 0, 0)\n', (7678, 7703), True, 'import arrayfire as af\n'), ((8036, 8069), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-2)', '(0)'], {}), '(x[:, :, :, 1], 0, -2, 0)\n', (8044, 8069), True, 'import arrayfire as af\n'), ((8542, 8575), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-2)'], {}), '(x[:, :, :, 2], 0, 0, -2)\n', (8550, 8575), True, 'import arrayfire as af\n'), ((11178, 11218), 'arrayfire.shift', 'af.shift', (['u_hat_af[:, :, :, 0]', '(-1)', '(0)', '(0)'], {}), '(u_hat_af[:, :, :, 0], -1, 0, 0)\n', (11186, 11218), True, 'import arrayfire as af\n'), ((7039, 7059), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(1)', '(0)'], {}), '(x, 0, 1, 0)\n', (7047, 7059), True, 'import arrayfire as af\n'), ((7112, 7132), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(1)'], {}), '(x, 0, 0, 1)\n', (7120, 7132), True, 'import arrayfire as af\n'), ((7243, 7263), 'arrayfire.shift', 'af.shift', (['x', '(2)', '(0)', '(0)'], {}), '(x, 2, 0, 0)\n', (7251, 7263), True, 'import arrayfire as af\n'), ((8503, 8536), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 2]', '(0)', '(0)', '(-1)'], {}), '(x[:, :, :, 2], 0, 0, -1)\n', (8511, 8536), True, 'import arrayfire as af\n'), ((7217, 7237), 'arrayfire.shift', 'af.shift', (['x', '(1)', '(0)', '(0)'], {}), '(x, 1, 0, 0)\n', (7225, 7237), True, 'import arrayfire as af\n'), ((7351, 7371), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(2)', '(0)'], {}), '(x, 0, 2, 0)\n', (7359, 7371), True, 'import arrayfire as af\n'), ((7449, 7469), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(2)'], {}), '(x, 0, 0, 2)\n', (7457, 7469), True, 'import arrayfire as af\n'), ((7999, 8032), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-1)', '(0)'], {}), '(x[:, :, :, 1], 0, -1, 0)\n', (8007, 8032), True, 'import arrayfire as af\n'), ((8440, 8473), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-3)', '(0)'], {}), '(x[:, :, :, 1], 0, -3, 0)\n', (8448, 8473), True, 'import arrayfire as af\n'), ((7325, 7345), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(1)', '(0)'], {}), '(x, 0, 1, 0)\n', (7333, 7345), True, 'import arrayfire as af\n'), ((7423, 7443), 'arrayfire.shift', 'af.shift', (['x', '(0)', '(0)', '(1)'], {}), '(x, 0, 0, 1)\n', (7431, 7443), True, 'import arrayfire as af\n'), ((7936, 7969), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-2)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -2, 0, 0)\n', (7944, 7969), True, 'import arrayfire as af\n'), ((8403, 8436), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-2)', '(0)'], {}), '(x[:, :, :, 1], 0, -2, 0)\n', (8411, 8436), True, 'import arrayfire as af\n'), ((7899, 7932), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-1)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -1, 0, 0)\n', (7907, 7932), True, 'import arrayfire as af\n'), ((8364, 8397), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 1]', '(0)', '(-1)', '(0)'], {}), '(x[:, :, :, 1], 0, -1, 0)\n', (8372, 8397), True, 'import arrayfire as af\n'), ((8301, 8334), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-3)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -3, 0, 0)\n', (8309, 8334), True, 'import arrayfire as af\n'), ((8264, 8297), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-2)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -2, 0, 0)\n', (8272, 8297), True, 'import arrayfire as af\n'), ((8225, 8258), 'arrayfire.shift', 'af.shift', (['x[:, :, :, 0]', '(-1)', '(0)', '(0)'], {}), '(x[:, :, :, 0], -1, 0, 0)\n', (8233, 8258), True, 'import arrayfire as af\n')]
|
import os, tempfile, subprocess
from string import Template
from PuzzleLib import Config
from PuzzleLib.Compiler.JIT import getCacheDir, computeHash, FileLock
from PuzzleLib.Cuda.SourceModule import SourceModule, ElementwiseKernel, ElementHalf2Kernel, ReductionKernel
from PuzzleLib.Cuda.SourceModule import eltwiseTest, reductionTest
from PuzzleLib.Hip import Driver as HipDriver
hipWarpBit, hipBlockBit = 6, 8
hipWarpSize, hipBlockSize = 1 << hipWarpBit, 1 << hipBlockBit
class HipSourceModule(SourceModule):
Driver = HipDriver
runtimeHeader = """
#include <hip/hip_runtime.h>
#define __shfl_xor_sync(mask, value, laneMask, ...) __shfl_xor(value, laneMask, __VA_ARGS__)
#define __shfl_up_sync(mask, value, delta, ...) __shfl_up(value, delta, __VA_ARGS__)
"""
def __init__(self, source, options=None, includes=None, externC=False, verbose=True, debug=False, recompile=False,
name=None):
super().__init__(source, options, includes, externC, verbose, debug, name)
self.recompile = recompile
self.includes = [] if self.includes is None else self.includes
def build(self):
source = self.source.replace("cuda_fp16.h", "hip/hip_fp16.h")
source = ("%sextern \"C\"\n{\n%s\n}\n" if self.externC else "%s%s") % (self.runtimeHeader, source)
cachedir = getCacheDir(os.path.join(Config.libname, Config.Backend.hip.name))
with FileLock(cachedir):
try:
codename = self.tryBuild(source, cachedir)
except subprocess.CalledProcessError as e:
log = e.output.decode()
text = log if self.debug else "%s\nSource:\n%s" % (
log,
"\n".join("%-4s %s" % (i + 1, line) for i, line in enumerate(source.splitlines(keepends=False)))
)
raise self.Driver.RtcError(text)
with open(codename, mode="rb") as f:
hsaco = f.read()
self.cumod = self.Driver.Module(hsaco)
def tryBuild(self, source, cachedir):
options, includes = self.options, self.includes
hashsum = computeHash(source, *options, *includes)
codepath = os.path.join(cachedir, hashsum)
name, srcext = "module" if self.name is None else self.name, ".hip.cpp"
codename = os.path.join(codepath, "%s.code" % name)
sourcename = os.path.join(codepath, "%s%s" % (name, srcext))
if not os.path.exists(codename) or self.recompile:
os.makedirs(codepath, exist_ok=True)
args = ["hipcc", "--genco"] + options + ["-o", codename]
stderr = subprocess.STDOUT if self.verbose else subprocess.DEVNULL
Config.getLogger().debug("No cache found for HIP extension '%s', performing compilation ...", name)
if not self.debug:
f = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", suffix=srcext, delete=False)
try:
with f:
f.write(source)
subprocess.check_output(args + [f.name], stderr=stderr)
finally:
os.remove(f.name)
else:
with open(sourcename, mode="w", encoding="utf-8") as f:
f.write(source)
subprocess.check_output(args + [sourcename], stderr=stderr)
else:
Config.getLogger().debug("Found cached compilation for HIP extension '%s', skipping compilation ...", name)
return codename
@classmethod
def getDefaultOptions(cls):
deviceIdx = cls.Driver.Device.getCurrent()
return ["--targets gfx%s" % cls.Driver.Device(deviceIdx).getArch()]
class HipEltwiseKernel(ElementwiseKernel):
Driver = HipDriver
SourceModule = HipSourceModule
warpBit, warpSize = hipWarpBit, hipWarpSize
blockBit, blockSize = hipBlockBit, hipBlockSize
class HipEltHalf2Kernel(ElementHalf2Kernel):
Driver = HipDriver
SourceModule = HipSourceModule
warpBit, warpSize = hipWarpBit, hipWarpSize
blockBit, blockSize = hipBlockBit, hipBlockSize
class HipReductionKernel(ReductionKernel):
Driver = HipDriver
SourceModule = HipSourceModule
warpBit, warpSize = hipWarpBit, hipWarpSize
blockBit, blockSize = hipBlockBit, hipBlockSize
reduceTmpl = Template("""
#undef READ_AND_MAP
#undef REDUCE
#define READ_AND_MAP(i) ($mapExpr)
#define REDUCE(a, b) ($reduceExpr)
extern "C" __global__ void $name($arguments, $T *partials, int size)
{
__shared__ $T sdata[$warpSize];
int tid = threadIdx.x;
int gid = tid + blockIdx.x * $NT;
$T acc = $neutral;
for (int i = gid; i < size; i += $NT * gridDim.x)
acc = REDUCE(acc, READ_AND_MAP(i));
for (int mask = $warpSize / 2; mask > 0; mask /= 2)
{
$T upval = __shfl_xor(acc, mask, $warpSize);
acc = REDUCE(acc, upval);
}
if (tid % $warpSize == 0)
sdata[tid / $warpSize] = acc;
__syncthreads();
int nwarps = $NT / $warpSize;
if (tid < $warpSize)
{
acc = (tid < nwarps) ? sdata[tid] : $neutral;
for (int mask = $warpSize / 2; mask > 0; mask /= 2)
{
$T upval = __shfl_xor(acc, mask, $warpSize);
acc = REDUCE(acc, upval);
}
}
if (tid == 0)
partials[blockIdx.x] = acc;
}
""")
def unittest():
from PuzzleLib.Hip import Backend
for deviceIdx in range(Backend.getDeviceCount()):
bnd = Backend.getBackend(deviceIdx)
eltwiseTest(bnd)
reductionTest(bnd)
if __name__ == "__main__":
unittest()
|
[
"PuzzleLib.Hip.Backend.getDeviceCount",
"tempfile.NamedTemporaryFile",
"os.remove",
"os.path.join",
"PuzzleLib.Cuda.SourceModule.eltwiseTest",
"PuzzleLib.Cuda.SourceModule.reductionTest",
"PuzzleLib.Compiler.JIT.FileLock",
"os.makedirs",
"subprocess.check_output",
"os.path.exists",
"PuzzleLib.Config.getLogger",
"string.Template",
"PuzzleLib.Compiler.JIT.computeHash",
"PuzzleLib.Hip.Backend.getBackend"
] |
[((3838, 4759), 'string.Template', 'Template', (['"""\n\n#undef READ_AND_MAP\n#undef REDUCE\n\n#define READ_AND_MAP(i) ($mapExpr)\n#define REDUCE(a, b) ($reduceExpr)\n\n\nextern "C" __global__ void $name($arguments, $T *partials, int size)\n{\n\t__shared__ $T sdata[$warpSize];\n\n\tint tid = threadIdx.x;\n\tint gid = tid + blockIdx.x * $NT;\n\n\t$T acc = $neutral;\n\n\tfor (int i = gid; i < size; i += $NT * gridDim.x)\n\t\tacc = REDUCE(acc, READ_AND_MAP(i));\n\n\tfor (int mask = $warpSize / 2; mask > 0; mask /= 2)\n\t{\n\t\t$T upval = __shfl_xor(acc, mask, $warpSize);\n\t\tacc = REDUCE(acc, upval);\n\t}\n\n\tif (tid % $warpSize == 0)\n\t\tsdata[tid / $warpSize] = acc;\n\n\t__syncthreads();\n\tint nwarps = $NT / $warpSize;\n\n\tif (tid < $warpSize)\n\t{\n\t\tacc = (tid < nwarps) ? sdata[tid] : $neutral;\n\n\t\tfor (int mask = $warpSize / 2; mask > 0; mask /= 2)\n\t\t{\n\t\t\t$T upval = __shfl_xor(acc, mask, $warpSize);\n\t\t\tacc = REDUCE(acc, upval);\n\t\t}\n\t}\n\n\tif (tid == 0)\n\t\tpartials[blockIdx.x] = acc;\n}\n\n"""'], {}), '(\n """\n\n#undef READ_AND_MAP\n#undef REDUCE\n\n#define READ_AND_MAP(i) ($mapExpr)\n#define REDUCE(a, b) ($reduceExpr)\n\n\nextern "C" __global__ void $name($arguments, $T *partials, int size)\n{\n\t__shared__ $T sdata[$warpSize];\n\n\tint tid = threadIdx.x;\n\tint gid = tid + blockIdx.x * $NT;\n\n\t$T acc = $neutral;\n\n\tfor (int i = gid; i < size; i += $NT * gridDim.x)\n\t\tacc = REDUCE(acc, READ_AND_MAP(i));\n\n\tfor (int mask = $warpSize / 2; mask > 0; mask /= 2)\n\t{\n\t\t$T upval = __shfl_xor(acc, mask, $warpSize);\n\t\tacc = REDUCE(acc, upval);\n\t}\n\n\tif (tid % $warpSize == 0)\n\t\tsdata[tid / $warpSize] = acc;\n\n\t__syncthreads();\n\tint nwarps = $NT / $warpSize;\n\n\tif (tid < $warpSize)\n\t{\n\t\tacc = (tid < nwarps) ? sdata[tid] : $neutral;\n\n\t\tfor (int mask = $warpSize / 2; mask > 0; mask /= 2)\n\t\t{\n\t\t\t$T upval = __shfl_xor(acc, mask, $warpSize);\n\t\t\tacc = REDUCE(acc, upval);\n\t\t}\n\t}\n\n\tif (tid == 0)\n\t\tpartials[blockIdx.x] = acc;\n}\n\n"""\n )\n', (3846, 4759), False, 'from string import Template\n'), ((1923, 1963), 'PuzzleLib.Compiler.JIT.computeHash', 'computeHash', (['source', '*options', '*includes'], {}), '(source, *options, *includes)\n', (1934, 1963), False, 'from PuzzleLib.Compiler.JIT import getCacheDir, computeHash, FileLock\n'), ((1978, 2009), 'os.path.join', 'os.path.join', (['cachedir', 'hashsum'], {}), '(cachedir, hashsum)\n', (1990, 2009), False, 'import os, tempfile, subprocess\n'), ((2098, 2138), 'os.path.join', 'os.path.join', (['codepath', "('%s.code' % name)"], {}), "(codepath, '%s.code' % name)\n", (2110, 2138), False, 'import os, tempfile, subprocess\n'), ((2154, 2201), 'os.path.join', 'os.path.join', (['codepath', "('%s%s' % (name, srcext))"], {}), "(codepath, '%s%s' % (name, srcext))\n", (2166, 2201), False, 'import os, tempfile, subprocess\n'), ((4828, 4852), 'PuzzleLib.Hip.Backend.getDeviceCount', 'Backend.getDeviceCount', ([], {}), '()\n', (4850, 4852), False, 'from PuzzleLib.Hip import Backend\n'), ((4863, 4892), 'PuzzleLib.Hip.Backend.getBackend', 'Backend.getBackend', (['deviceIdx'], {}), '(deviceIdx)\n', (4881, 4892), False, 'from PuzzleLib.Hip import Backend\n'), ((4896, 4912), 'PuzzleLib.Cuda.SourceModule.eltwiseTest', 'eltwiseTest', (['bnd'], {}), '(bnd)\n', (4907, 4912), False, 'from PuzzleLib.Cuda.SourceModule import eltwiseTest, reductionTest\n'), ((4915, 4933), 'PuzzleLib.Cuda.SourceModule.reductionTest', 'reductionTest', (['bnd'], {}), '(bnd)\n', (4928, 4933), False, 'from PuzzleLib.Cuda.SourceModule import eltwiseTest, reductionTest\n'), ((1290, 1343), 'os.path.join', 'os.path.join', (['Config.libname', 'Config.Backend.hip.name'], {}), '(Config.libname, Config.Backend.hip.name)\n', (1302, 1343), False, 'import os, tempfile, subprocess\n'), ((1353, 1371), 'PuzzleLib.Compiler.JIT.FileLock', 'FileLock', (['cachedir'], {}), '(cachedir)\n', (1361, 1371), False, 'from PuzzleLib.Compiler.JIT import getCacheDir, computeHash, FileLock\n'), ((2259, 2295), 'os.makedirs', 'os.makedirs', (['codepath'], {'exist_ok': '(True)'}), '(codepath, exist_ok=True)\n', (2270, 2295), False, 'import os, tempfile, subprocess\n'), ((2212, 2236), 'os.path.exists', 'os.path.exists', (['codename'], {}), '(codename)\n', (2226, 2236), False, 'import os, tempfile, subprocess\n'), ((2562, 2650), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'encoding': '"""utf-8"""', 'suffix': 'srcext', 'delete': '(False)'}), "(mode='w', encoding='utf-8', suffix=srcext,\n delete=False)\n", (2589, 2650), False, 'import os, tempfile, subprocess\n'), ((2886, 2945), 'subprocess.check_output', 'subprocess.check_output', (['(args + [sourcename])'], {'stderr': 'stderr'}), '(args + [sourcename], stderr=stderr)\n', (2909, 2945), False, 'import os, tempfile, subprocess\n'), ((2431, 2449), 'PuzzleLib.Config.getLogger', 'Config.getLogger', ([], {}), '()\n', (2447, 2449), False, 'from PuzzleLib import Config\n'), ((2697, 2752), 'subprocess.check_output', 'subprocess.check_output', (['(args + [f.name])'], {'stderr': 'stderr'}), '(args + [f.name], stderr=stderr)\n', (2720, 2752), False, 'import os, tempfile, subprocess\n'), ((2772, 2789), 'os.remove', 'os.remove', (['f.name'], {}), '(f.name)\n', (2781, 2789), False, 'import os, tempfile, subprocess\n'), ((2958, 2976), 'PuzzleLib.Config.getLogger', 'Config.getLogger', ([], {}), '()\n', (2974, 2976), False, 'from PuzzleLib import Config\n')]
|
"""FindDockerStackFiles
Crawls the fetched application registry directory (from FetchAppRegistry)
and locates all docker-stack.yml files"""
__author__ = '<EMAIL>'
import os
from modules.steps.base_pipeline_step import BasePipelineStep
from modules.util import environment, data_defs
class FindDockerStackFiles(BasePipelineStep):
def __init__(self):
BasePipelineStep.__init__(self)
self.registry_root = None
def get_required_env_variables(self):
return [environment.REGISTRY_SUB_DIRECTORY]
def get_required_data_keys(self):
return []
def run_step(self, pipeline_data):
self.registry_root = environment.get_registry_path()
pipeline_data[data_defs.STACK_FILES] = self.walk_repository()
return pipeline_data
def walk_repository(self):
stack_files = []
for dirpath, _, files in os.walk(self.registry_root):
for file in files:
if file == 'docker-stack.yml':
stack_files.append(os.path.join(dirpath, file))
self.log.debug('Found %s docker stack files', len(stack_files))
return stack_files
|
[
"modules.steps.base_pipeline_step.BasePipelineStep.__init__",
"os.walk",
"os.path.join",
"modules.util.environment.get_registry_path"
] |
[((366, 397), 'modules.steps.base_pipeline_step.BasePipelineStep.__init__', 'BasePipelineStep.__init__', (['self'], {}), '(self)\n', (391, 397), False, 'from modules.steps.base_pipeline_step import BasePipelineStep\n'), ((653, 684), 'modules.util.environment.get_registry_path', 'environment.get_registry_path', ([], {}), '()\n', (682, 684), False, 'from modules.util import environment, data_defs\n'), ((874, 901), 'os.walk', 'os.walk', (['self.registry_root'], {}), '(self.registry_root)\n', (881, 901), False, 'import os\n'), ((1020, 1047), 'os.path.join', 'os.path.join', (['dirpath', 'file'], {}), '(dirpath, file)\n', (1032, 1047), False, 'import os\n')]
|
# <NAME>
# PandS project 2020
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Import data as pandas dataframe
iris_data = pd.read_csv('iris.data', header=None)
# assign column headers
iris_data.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
# A. Output a summary of each variable to a single txt file.
# Isolate columns according to data type
float_values = iris_data.iloc[:,0:4]
str_values = iris_data.iloc[:,4]
# Use describe function to summarise data
float_summary = float_values.describe()
str_summary = str_values.describe()
# Establish 3 unique values in str_summary.
# This creates an array of each value.
str_summary = str_values.unique()
# Transpose str_summary array and convert to dataframe
str_summary = str_summary[:, None]
str_summary = pd.DataFrame({"Species": str_summary[:, 0]})
# Format string variable summary
# Add column containing quantity of unique values
quantity = ['50', '50', '50']
str_summary['Count'] = quantity
# Rename rows in str_summary
str_summary.index = ['Species_A', 'Species_B', 'Species_C']
# Format summary output and write to text file
with open("iris_summary.txt", "w") as f:
heading = "SUMMARY OF VARIABLES IN IRIS DATASET"
f.write(heading + "\n")
f.write("=" * len(heading) + "\n\n\n\n")
heading2 = "NUMERIC VARIABLE SUMMARY"
f.write(heading2 + "\n")
f.write("=" * len(heading2) + "\n")
f.write(float_summary.to_string() + "\n\n\n\n")
heading3 = "DEPENDENT VARIABLE SUMMARY"
f.write(heading3 + "\n")
f.write("=" * len(heading3) + "\n")
f.write(str_summary.to_string() + "\n\n\n\n\n\n\n")
# B. Save a histogram of each variable to png files
# Assign each column to a variable for easier manipulation
sep_len = iris_data['sepal_length']
sep_width = iris_data['sepal_width']
pet_len = iris_data['petal_length']
pet_width = iris_data['petal_width']
species = iris_data['species']
# Write a function which outputs a histogram for each dataset variable and saves
# it as a png file.
# First for numeric variables
def var_hist(var_data, fig_num, x_label, y_label, title, filepath):
plt.figure(fig_num)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.hist(var_data, rwidth=0.9,)
plt.savefig(filepath)
plt.close() # Close figure so plot won't be displayed later
# Then for string variable
def var_hist2(var_data, fig_num, x_label, y_label, title, filepath):
plt.figure(fig_num)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.hist(var_data, bins=3, rwidth=0.9)
plt.xticks(np.arange(0,3))
plt.savefig(filepath)
plt.close()
# Call function for each variable
var_hist(sep_len, 1, 'sepal_length_cm', 'Frequency', 'Sepal Length', 'sepal_length.png')
var_hist(sep_width, 2, 'sepal_width_cm', 'Frequency', 'Sepal Width', 'sepal_width.png')
var_hist(pet_len, 3, 'petal_length_cm', 'Frequency', 'Petal Length', 'petal_length.png')
var_hist(pet_width, 4, 'petal_width_cm', 'Frequency', 'Petal Width', 'petal_width.png')
var_hist2(species, 5, 'species', 'Frequency', 'Iris Species', 'species.png')
# 4 axes on one figure for better visual comparison
fig, axs = plt.subplots(2, 2)
axs1 = axs[0, 0]
axs1.hist(sep_len, rwidth=0.9)
axs1.set_title('Sepal_Length_Cm')
axs1.set(ylabel='frequency')
axs2 = axs[0, 1]
axs2.hist(sep_width, rwidth=0.9)
axs2.set_title('Sepal_Width_Cm',)
axs2.set(ylabel='frequency')
axs3 = axs[1, 0]
axs3.hist(pet_len, rwidth=0.9)
axs3.set_title('Petal_Length_Cm')
axs3.set(ylabel='frequency')
axs4 = axs[1, 1]
axs4.hist(pet_width, rwidth=0.9)
axs4.set_title('Petal_Width_Cm')
axs4.set(ylabel='frequency')
#plt.show()
plt.close()
# C. Output a scatter plot of each pair of variables
# Scatter plot with matplotlib (no colour separation)
plt.scatter(sep_len, sep_width)
plt.xlabel('sepal_length')
plt.ylabel('sepal_width')
#plt.show()
plt.close()
# Write a function which outputs a scatter plot of each pair of variables.
# Each categorical variable (species of iris flower) is categorized by colour
def scatter(x, y):
sns.set(style="darkgrid", font_scale=1.25)
sns.lmplot(x, y, iris_data, fit_reg=False, hue='species')
plt.show()
plt.close()
# Call function for each pair of variables
scatter('sepal_length', 'sepal_width')
scatter('sepal_length', 'petal_length')
scatter('sepal_length', 'petal_width')
scatter('sepal_width', 'petal_length')
scatter('sepal_width', 'petal_width')
scatter('petal_length', 'petal_width')
# Output pairplot using kde to represent marginal distribution
sns.set(style='ticks', font_scale=1.25, color_codes=True)
sns.pairplot(iris_data, hue='species', diag_kind='kde')
plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"seaborn.set",
"seaborn.lmplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"pandas.read_csv",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"numpy.arange",
"seaborn.pairplot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((171, 208), 'pandas.read_csv', 'pd.read_csv', (['"""iris.data"""'], {'header': 'None'}), "('iris.data', header=None)\n", (182, 208), True, 'import pandas as pd\n'), ((849, 893), 'pandas.DataFrame', 'pd.DataFrame', (["{'Species': str_summary[:, 0]}"], {}), "({'Species': str_summary[:, 0]})\n", (861, 893), True, 'import pandas as pd\n'), ((3231, 3249), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (3243, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3715, 3726), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3724, 3726), True, 'import matplotlib.pyplot as plt\n'), ((3839, 3870), 'matplotlib.pyplot.scatter', 'plt.scatter', (['sep_len', 'sep_width'], {}), '(sep_len, sep_width)\n', (3850, 3870), True, 'import matplotlib.pyplot as plt\n'), ((3871, 3897), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sepal_length"""'], {}), "('sepal_length')\n", (3881, 3897), True, 'import matplotlib.pyplot as plt\n'), ((3898, 3923), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""sepal_width"""'], {}), "('sepal_width')\n", (3908, 3923), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3947), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3945, 3947), True, 'import matplotlib.pyplot as plt\n'), ((4610, 4667), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'font_scale': '(1.25)', 'color_codes': '(True)'}), "(style='ticks', font_scale=1.25, color_codes=True)\n", (4617, 4667), True, 'import seaborn as sns\n'), ((4668, 4723), 'seaborn.pairplot', 'sns.pairplot', (['iris_data'], {'hue': '"""species"""', 'diag_kind': '"""kde"""'}), "(iris_data, hue='species', diag_kind='kde')\n", (4680, 4723), True, 'import seaborn as sns\n'), ((4724, 4734), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4732, 4734), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2197), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (2188, 2197), True, 'import matplotlib.pyplot as plt\n'), ((2202, 2221), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (2212, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2245), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (2236, 2245), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2266), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2259, 2266), True, 'import matplotlib.pyplot as plt\n'), ((2271, 2301), 'matplotlib.pyplot.hist', 'plt.hist', (['var_data'], {'rwidth': '(0.9)'}), '(var_data, rwidth=0.9)\n', (2279, 2301), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2328), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (2318, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2344), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2342, 2344), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2514), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (2505, 2514), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2538), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (2529, 2538), True, 'import matplotlib.pyplot as plt\n'), ((2543, 2562), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (2553, 2562), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2583), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2576, 2583), True, 'import matplotlib.pyplot as plt\n'), ((2588, 2626), 'matplotlib.pyplot.hist', 'plt.hist', (['var_data'], {'bins': '(3)', 'rwidth': '(0.9)'}), '(var_data, bins=3, rwidth=0.9)\n', (2596, 2626), True, 'import matplotlib.pyplot as plt\n'), ((2662, 2683), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (2673, 2683), True, 'import matplotlib.pyplot as plt\n'), ((2688, 2699), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2697, 2699), True, 'import matplotlib.pyplot as plt\n'), ((4126, 4168), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""', 'font_scale': '(1.25)'}), "(style='darkgrid', font_scale=1.25)\n", (4133, 4168), True, 'import seaborn as sns\n'), ((4173, 4230), 'seaborn.lmplot', 'sns.lmplot', (['x', 'y', 'iris_data'], {'fit_reg': '(False)', 'hue': '"""species"""'}), "(x, y, iris_data, fit_reg=False, hue='species')\n", (4183, 4230), True, 'import seaborn as sns\n'), ((4235, 4245), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4243, 4245), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4261), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4259, 4261), True, 'import matplotlib.pyplot as plt\n'), ((2642, 2657), 'numpy.arange', 'np.arange', (['(0)', '(3)'], {}), '(0, 3)\n', (2651, 2657), True, 'import numpy as np\n')]
|
# <NAME> - github.com/2b-t (2022)
# @file utilities_test.py
# @brief Different testing routines for utility functions for accuracy calculation and file import and export
import numpy as np
from parameterized import parameterized
from typing import Tuple
import unittest
from src.utilities import AccX, IO
class TestAccX(unittest.TestCase):
_shape = (10,20)
_disparities = [ ["disparity = 1", 1],
["disparity = 2", 2],
["disparity = 3", 3]
]
@parameterized.expand(_disparities)
def test_same_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two identical images result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = mag*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_slightly_shifted_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if an image and its slightly shifted counterpart result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = (mag+threshold_disparity-1)*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_no_mask(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two identical images with no given mask result in an accuracy measure of unity
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = mag*np.ones(groundtruth_image.shape)
mask_image = None
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 1.0, places=7)
return
@parameterized.expand(_disparities)
def test_inverse_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two inverse images result in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = np.zeros(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
@parameterized.expand(_disparities)
def test_significantly_shifted_image(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if an image and its significantly shifted counterpart result in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = (mag+threshold_disparity+1)*np.ones(groundtruth_image.shape)
mask_image = np.ones(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
@parameterized.expand(_disparities)
def test_zero_mask(self, name: str, threshold_disparity: int) -> None:
# Parameterised unit test for testing if two equal images with a mask of zero results in an accuracy measure of zero
# @param[in] name: The name of the parameterised test
# @param[in] threshold_disparity: The threshold disparity for the accuracy measure
mag = threshold_disparity*10
groundtruth_image = mag*np.ones(self._shape)
prediction_image = groundtruth_image
mask_image = np.zeros(groundtruth_image.shape)
accx = AccX.compute(prediction_image, groundtruth_image, mask_image, threshold_disparity)
self.assertAlmostEqual(accx, 0.0, places=7)
return
class TestIO(unittest.TestCase):
_resolutions = [ ["resolution = (10, 20)", (10, 20)],
["resolution = (30, 4)", (30, 4)],
["resolution = (65, 24)", (65, 24)]
]
def test_import_image(self) -> None:
# TODO(tobit): Implement
pass
def test_export_image(self) -> None:
# TODO(tobit): Implement
pass
def test_str_comma(self) -> None:
# Function for testing conversion of numbers to comma-separated numbers
self.assertEqual(IO._str_comma(10, 2), "10")
self.assertEqual(IO._str_comma(9.3, 2), "9,3")
self.assertEqual(IO._str_comma(1.234, 2), "1,23")
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_no_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a positive image with a no ground-truth should result in a positive image
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = None
result = IO.normalise_image(image, groundtruth_image)
self.assertGreaterEqual(np.min(result), 0.0)
self.assertLessEqual(np.max(result), 1.0)
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_positive_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a regular image with a regular ground-truth should result in a positive image
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = 2*image
result = IO.normalise_image(image, groundtruth_image)
self.assertGreaterEqual(np.min(result), 0.0)
self.assertLessEqual(np.max(result), 1.0)
return
@parameterized.expand(_resolutions)
def test_normalise_negative_image_positive_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a negative image which should result in a ValueError
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
groundtruth_image = mag*np.ones(shape)
image = -2*groundtruth_image
self.assertRaises(ValueError, IO.normalise_image, image, groundtruth_image)
return
@parameterized.expand(_resolutions)
def test_normalise_positive_image_negative_groundtruth(self, name: str, shape: Tuple[int, int]) -> None:
# Function for testing normalising a negative ground-truth which should result in a ValueError
# @param[in] name: The name of the parameterised test
# @param[in] shape: The image resolution to be considered for the test
mag = 13
image = mag*np.ones(shape)
groundtruth_image = -2*image
self.assertRaises(ValueError, IO.normalise_image, image, groundtruth_image)
return
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"src.utilities.IO._str_comma",
"src.utilities.AccX.compute",
"numpy.zeros",
"numpy.ones",
"src.utilities.IO.normalise_image",
"parameterized.parameterized.expand",
"numpy.min",
"numpy.max"
] |
[((509, 543), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (529, 543), False, 'from parameterized import parameterized\n'), ((1222, 1256), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (1242, 1256), False, 'from parameterized import parameterized\n'), ((1998, 2032), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (2018, 2032), False, 'from parameterized import parameterized\n'), ((2699, 2733), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (2719, 2733), False, 'from parameterized import parameterized\n'), ((3409, 3443), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (3429, 3443), False, 'from parameterized import parameterized\n'), ((4192, 4226), 'parameterized.parameterized.expand', 'parameterized.expand', (['_disparities'], {}), '(_disparities)\n', (4212, 4226), False, 'from parameterized import parameterized\n'), ((5563, 5597), 'parameterized.parameterized.expand', 'parameterized.expand', (['_resolutions'], {}), '(_resolutions)\n', (5583, 5597), False, 'from parameterized import parameterized\n'), ((6195, 6229), 'parameterized.parameterized.expand', 'parameterized.expand', (['_resolutions'], {}), '(_resolutions)\n', (6215, 6229), False, 'from parameterized import parameterized\n'), ((6836, 6870), 'parameterized.parameterized.expand', 'parameterized.expand', (['_resolutions'], {}), '(_resolutions)\n', (6856, 6870), False, 'from parameterized import parameterized\n'), ((7394, 7428), 'parameterized.parameterized.expand', 'parameterized.expand', (['_resolutions'], {}), '(_resolutions)\n', (7414, 7428), False, 'from parameterized import parameterized\n'), ((7972, 7987), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7985, 7987), False, 'import unittest\n'), ((1032, 1064), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (1039, 1064), True, 'import numpy as np\n'), ((1076, 1162), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (1088, 1162), False, 'from src.utilities import AccX, IO\n'), ((1806, 1838), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (1813, 1838), True, 'import numpy as np\n'), ((1850, 1936), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (1862, 1936), False, 'from src.utilities import AccX, IO\n'), ((2553, 2639), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (2565, 2639), False, 'from src.utilities import AccX, IO\n'), ((3168, 3201), 'numpy.zeros', 'np.zeros', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (3176, 3201), True, 'import numpy as np\n'), ((3219, 3251), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (3226, 3251), True, 'import numpy as np\n'), ((3263, 3349), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (3275, 3349), False, 'from src.utilities import AccX, IO\n'), ((4002, 4034), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (4009, 4034), True, 'import numpy as np\n'), ((4046, 4132), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (4058, 4132), False, 'from src.utilities import AccX, IO\n'), ((4711, 4744), 'numpy.zeros', 'np.zeros', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (4719, 4744), True, 'import numpy as np\n'), ((4756, 4842), 'src.utilities.AccX.compute', 'AccX.compute', (['prediction_image', 'groundtruth_image', 'mask_image', 'threshold_disparity'], {}), '(prediction_image, groundtruth_image, mask_image,\n threshold_disparity)\n', (4768, 4842), False, 'from src.utilities import AccX, IO\n'), ((6040, 6084), 'src.utilities.IO.normalise_image', 'IO.normalise_image', (['image', 'groundtruth_image'], {}), '(image, groundtruth_image)\n', (6058, 6084), False, 'from src.utilities import AccX, IO\n'), ((6681, 6725), 'src.utilities.IO.normalise_image', 'IO.normalise_image', (['image', 'groundtruth_image'], {}), '(image, groundtruth_image)\n', (6699, 6725), False, 'from src.utilities import AccX, IO\n'), ((934, 954), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (941, 954), True, 'import numpy as np\n'), ((982, 1014), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (989, 1014), True, 'import numpy as np\n'), ((1684, 1704), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (1691, 1704), True, 'import numpy as np\n'), ((1756, 1788), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (1763, 1788), True, 'import numpy as np\n'), ((2439, 2459), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (2446, 2459), True, 'import numpy as np\n'), ((2487, 2519), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (2494, 2519), True, 'import numpy as np\n'), ((3124, 3144), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (3131, 3144), True, 'import numpy as np\n'), ((3880, 3900), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (3887, 3900), True, 'import numpy as np\n'), ((3952, 3984), 'numpy.ones', 'np.ones', (['groundtruth_image.shape'], {}), '(groundtruth_image.shape)\n', (3959, 3984), True, 'import numpy as np\n'), ((4632, 4652), 'numpy.ones', 'np.ones', (['self._shape'], {}), '(self._shape)\n', (4639, 4652), True, 'import numpy as np\n'), ((5413, 5433), 'src.utilities.IO._str_comma', 'IO._str_comma', (['(10)', '(2)'], {}), '(10, 2)\n', (5426, 5433), False, 'from src.utilities import AccX, IO\n'), ((5462, 5483), 'src.utilities.IO._str_comma', 'IO._str_comma', (['(9.3)', '(2)'], {}), '(9.3, 2)\n', (5475, 5483), False, 'from src.utilities import AccX, IO\n'), ((5513, 5536), 'src.utilities.IO._str_comma', 'IO._str_comma', (['(1.234)', '(2)'], {}), '(1.234, 2)\n', (5526, 5536), False, 'from src.utilities import AccX, IO\n'), ((5983, 5997), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (5990, 5997), True, 'import numpy as np\n'), ((6113, 6127), 'numpy.min', 'np.min', (['result'], {}), '(result)\n', (6119, 6127), True, 'import numpy as np\n'), ((6159, 6173), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (6165, 6173), True, 'import numpy as np\n'), ((6621, 6635), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (6628, 6635), True, 'import numpy as np\n'), ((6754, 6768), 'numpy.min', 'np.min', (['result'], {}), '(result)\n', (6760, 6768), True, 'import numpy as np\n'), ((6800, 6814), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (6806, 6814), True, 'import numpy as np\n'), ((7249, 7263), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (7256, 7263), True, 'import numpy as np\n'), ((7802, 7816), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (7809, 7816), True, 'import numpy as np\n')]
|
###
# Copyright Notice:
# Copyright 2016 Distributed Management Task Force, Inc. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/python-redfish-utility/blob/master/LICENSE.md
###
""" List Command for RDMC """
import redfish.ris
from optparse import OptionParser
from rdmc_base_classes import RdmcCommandBase
from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS,\
NoContentsFoundForOperationError
class ListCommand(RdmcCommandBase):
""" Constructor """
def __init__(self, rdmcObj):
RdmcCommandBase.__init__(self,\
name='list',\
usage='list [OPTIONS]\n\n\tDisplays the current values of the ' \
'properties within\n\ta selected type including'\
' reserved properties\n\texample: list\n\n\tNOTE: If ' \
'you wish not to get all the reserved properties\n\t ' \
' run the get command instead',\
summary='Displays the current value(s) of a' \
' property(ies) within a selected type including'\
' reserved properties.',\
aliases=['ls'],\
optparser=OptionParser())
self.definearguments(self.parser)
self._rdmc = rdmcObj
self.lobobj = rdmcObj.commandsDict["LoginCommand"](rdmcObj)
self.selobj = rdmcObj.commandsDict["SelectCommand"](rdmcObj)
self.getobj = rdmcObj.commandsDict["GetCommand"](rdmcObj)
def run(self, line):
""" Wrapper function for main list function
:param line: command line input
:type line: string.
"""
try:
(options, args) = self._parse_arglist(line)
except:
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.listvalidation(options)
if args:
for arg in args:
newargs = list()
if "/" in arg:
newargs = arg.split("/")
arg = newargs[0]
if not self.getobj.getworkerfunction(arg, options, line,\
newargs=newargs, uselist=True):
raise NoContentsFoundForOperationError('No contents found '\
'for entry: %s\n' % arg)
else:
if not self.getobj.getworkerfunction(args, options, line, \
uselist=True):
raise NoContentsFoundForOperationError('No contents found.')
#Return code
return ReturnCodes.SUCCESS
def listvalidation(self, options):
""" List data validation function
:param options: command line options
:type options: list.
"""
inputline = list()
if self._rdmc.app.config._ac__format.lower() == 'json':
options.json = True
try:
self._rdmc.app.get_current_client()
except:
if options.user or options.password or options.url:
if options.url:
inputline.extend([options.url])
if options.user:
inputline.extend(["-u", options.user])
if options.password:
inputline.extend(["-p", options.password])
else:
if self._rdmc.app.config.get_url():
inputline.extend([self._rdmc.app.config.get_url()])
if self._rdmc.app.config.get_username():
inputline.extend(["-u", \
self._rdmc.app.config.get_username()])
if self._rdmc.app.config.get_password():
inputline.extend(["-p", \
self._rdmc.app.config.get_password()])
if len(inputline) and options.selector:
if options.filter:
inputline.extend(["--filter", options.filter])
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend(["--selector", options.selector])
self.lobobj.loginfunction(inputline)
elif options.selector:
if options.filter:
inputline.extend(["--filter", options.filter])
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend([options.selector])
self.selobj.selectfunction(inputline)
else:
try:
inputline = list()
selector = self._rdmc.app.get_selector()
if options.filter:
inputline.extend(["--filter", options.filter])
if options.includelogs:
inputline.extend(["--includelogs"])
if options.path:
inputline.extend(["--path", options.path])
inputline.extend([selector])
self.selobj.selectfunction(inputline)
except:
raise redfish.ris.NothingSelectedError
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
customparser.add_option(
'--url',
dest='url',
help="Use the provided URL to login.",
default=None,
)
customparser.add_option(
'-u',
'--user',
dest='user',
help="If you are not logged in yet, including this flag along"\
" with the password and URL flags can be used to log into a"\
" server in the same command.""",
default=None,
)
customparser.add_option(
'-p',
'--password',
dest='password',
help="""Use the provided password to log in.""",
default=None,
)
customparser.add_option(
'--includelogs',
dest='includelogs',
action="store_true",
help="Optionally include logs in the data retrieval process.",
default=False,
)
customparser.add_option(
'--selector',
dest='selector',
help="Optionally include this flag to select a type to run"\
" the current command on. Use this flag when you wish to"\
" select a type without entering another command, or if you"\
" wish to work with a type that is different from the one"\
" you currently have selected.",
default=None,
)
customparser.add_option(
'--filter',
dest='filter',
help="Optionally set a filter value for a filter attribute."\
" This uses the provided filter for the currently selected"\
" type. Note: Use this flag to narrow down your results. For"\
" example, selecting a common type might return multiple"\
" objects that are all of that type. If you want to modify"\
" the properties of only one of those objects, use the filter"\
" flag to narrow down results based on properties."\
"\t\t\t\t\t Usage: --filter [ATTRIBUTE]=[VALUE]",
default=None,
)
customparser.add_option(
'-j',
'--json',
dest='json',
action="store_true",
help="Optionally include this flag if you wish to change the"\
" displayed output to JSON format. Preserving the JSON data"\
" structure makes the information easier to parse.",
default=False
)
customparser.add_option(
'--logout',
dest='logout',
action="store_true",
help="Optionally include the logout flag to log out of the"\
" server after this command is completed. Using this flag when"\
" not logged in will have no effect",
default=None,
)
customparser.add_option(
'--path',
dest='path',
help="Optionally set a starting point for data collection."\
" If you do not specify a starting point, the default path"\
" will be /redfish/v1/. Note: The path flag can only be specified"\
" at the time of login, so if you are already logged into the"\
" server, the path flag will not change the path. If you are"\
" entering a command that isn't the login command, but include"\
" your login information, you can still specify the path flag"\
" there. ",
default=None,
)
|
[
"rdmc_helper.InvalidCommandLineErrorOPTS",
"optparse.OptionParser",
"rdmc_helper.NoContentsFoundForOperationError"
] |
[((1272, 1286), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (1284, 1286), False, 'from optparse import OptionParser\n'), ((2748, 2802), 'rdmc_helper.NoContentsFoundForOperationError', 'NoContentsFoundForOperationError', (['"""No contents found."""'], {}), "('No contents found.')\n", (2780, 2802), False, 'from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS, NoContentsFoundForOperationError\n'), ((1961, 1992), 'rdmc_helper.InvalidCommandLineErrorOPTS', 'InvalidCommandLineErrorOPTS', (['""""""'], {}), "('')\n", (1988, 1992), False, 'from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS, NoContentsFoundForOperationError\n'), ((2420, 2495), 'rdmc_helper.NoContentsFoundForOperationError', 'NoContentsFoundForOperationError', (["('No contents found for entry: %s\\n' % arg)"], {}), "('No contents found for entry: %s\\n' % arg)\n", (2452, 2495), False, 'from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS, NoContentsFoundForOperationError\n')]
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="covid19_dashboard",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="A personalized dashboard which maps up to date covid data to a web template",
long_description="Using a webpage template this package creates a dashboard displaying up to date covid data from "
"an api, it also contains news articles obtained from a news api and you can remove articles and "
"schedule updates for yourself",
long_description_content_type="text/markdown",
url="",
classifiers=[
"Programming Language :: Python :: 3",
"License :: Freeware",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
)
|
[
"setuptools.find_packages"
] |
[((901, 938), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'where': '"""src"""'}), "(where='src')\n", (925, 938), False, 'import setuptools\n')]
|
import Bio.SeqUtils.ProtParam
import os
import ASAP.FeatureExtraction as extract
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Chothia numbering definition for CDR regions
CHOTHIA_CDR = {'L': {'1': [24, 34], '2': [50, 56], '3': [89, 97]}, 'H':{'1': [26, 32], '2': [52, 56], '3': [95, 102]}}
canonical_direct = '../data/pigs_canonical.txt'
SET_NAME = 'IGHV'
IF_ONLY_HEAVY = True
CNT_DB = 1
CNT_TARGET = 1
REFERENCE_PATH_TESTCASE = '../testCase/IGHV/reference-IGHV/'
TARGETING_PATH_TESTCASE = '../testCase/IGHV/targeting-MMP-IGHV/'
TARGET_DESIRE_SIZE = 134 #44 #IGHV
targeting_direct = TARGETING_PATH_TESTCASE
reference_direct = REFERENCE_PATH_TESTCASE
Amino, Num, Germ, DatasetName, DatasetSize = extract.ReadAminoNumGerm(targeting_direct, reference_direct)
seq_id = []
for i, name in enumerate(DatasetName):
# if i<2:
# continue
tmp= [[] for j in range(int(DatasetSize[i]))]
# for every seq in that dataset
for j in range(int(DatasetSize[i])):
seq_name = name + '_' + str(j)
seq_id.append(seq_name)
# raw sequence
def sequence_raw():
def getSequenceHL(sname):
SH = ''.join(Amino['H'][sname])
SL = ''
if not IF_ONLY_HEAVY:
SL = ''.join(Amino['L'][sname])
return SL, SH
else:
return [SH]
with open('../results/'+SET_NAME +'_Sequence.csv','w') as fi:
fi.write('sequence name, ')
if not IF_ONLY_HEAVY:
fi.write('light chain, ')
fi.write('heavy chain\n')
for sname in seq_id:
fi.write(sname + ',' + ','.join(getSequenceHL(sname))+ '\n')
# sequence with numbering
def sequence_num():
def getSequenceHL_num(sname):
NH = ','.join(Num['H'][sname])
SH = ','.join(Amino['H'][sname])
NL = ','.join(Num['L'][sname])
SL = ','.join(Amino['L'][sname])
return NH, SH, NL, SL
with open('./Sequence_numbered.csv','w') as fi:
for sname in seq_id:
NH, SH, NL, SL = getSequenceHL_num(sname)
fi.write(sname + ' light num,' + NL + '\n')
fi.write(sname + ' light seq,' + SL + '\n')
fi.write(sname + ' heavy num,' + NH + '\n')
fi.write(sname + ' heavy seq,' + SH + '\n')
# sequence with region
def sequence_region():
def getSequenceHL_region(sname):
NH = Num['H'][sname]
HFW1, HCDR1, HFW2, HCDR2, HFW3, HCDR3, HFW4 = '', '', '', '', '', '', ''
for i, number in enumerate(NH):
if number[-1] >= 'A' and number[-1] <= 'Z':
num_i = int(number[:-1])
else:
num_i = int(number)
if num_i < CHOTHIA_CDR['H']['1'][0]:
HFW1 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['1'][1]:
HCDR1+= Amino['H'][sname][i]
elif num_i < CHOTHIA_CDR['H']['2'][0]:
HFW2 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['2'][1]:
HCDR2 += Amino['H'][sname][i]
elif num_i < CHOTHIA_CDR['H']['3'][0]:
HFW3 += Amino['H'][sname][i]
elif num_i <= CHOTHIA_CDR['H']['3'][1]:
HCDR3 += Amino['H'][sname][i]
else:
HFW4 += Amino['H'][sname][i]
if IF_ONLY_HEAVY:
return ''.join(HFW1), ''.join(HCDR1), ''.join(HFW2), ''.join(HCDR2), ''.join(HFW3), ''.join(HCDR3), ''.join(
HFW4)
else:
NL = Num['L'][sname]
LFW1, LCDR1, LFW2, LCDR2, LFW3, LCDR3, LFW4 = '', '', '', '', '', '', ''
for i, number in enumerate(NL):
if number[-1] >= 'A' and number[-1] <= 'Z':
num_i = int(number[:-1])
else:
num_i = int(number)
if num_i < CHOTHIA_CDR['L']['1'][0]:
LFW1 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['1'][1]:
LCDR1 += Amino['L'][sname][i]
elif num_i < CHOTHIA_CDR['L']['2'][0]:
LFW2 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['2'][1]:
LCDR2 += Amino['L'][sname][i]
elif num_i < CHOTHIA_CDR['L']['3'][0]:
LFW3 += Amino['L'][sname][i]
elif num_i <= CHOTHIA_CDR['L']['3'][1]:
LCDR3 += Amino['L'][sname][i]
else:
LFW4 += Amino['L'][sname][i]
return ''.join(LFW1), ''.join(LCDR1), ''.join(LFW2), ''.join(LCDR2), ''.join(LFW3), ''.join(LCDR3), ''.join(LFW4),\
''.join(HFW1), ''.join(HCDR1), ''.join(HFW2), ''.join(HCDR2), ''.join(HFW3), ''.join(HCDR3), ''.join(HFW4)
with open('../results/'+SET_NAME +'_Sequence_region.csv','w') as fi:
if IF_ONLY_HEAVY:
fi.write(
'sequence id, heavy chain FW1, heavy chain CDR1, heavy chain FW2, heavy chain CDR2, heavy chain FW3, heavy chain CDR3, heavy chain FW4\n')
else:
fi.write('sequence id, light chain FW1, light chain CDR1, light chain FW2, light chain CDR2, light chain FW3, light chain CDR3, light chain FW4, '+
'heavy chain FW1, heavy chain CDR1, heavy chain FW2, heavy chain CDR2, heavy chain FW3, heavy chain CDR3, heavy chain FW4\n')
for sname in seq_id:
fi.write(sname + ',' + ','.join(getSequenceHL_region(sname)) + '\n')
def feature_distribution():
from collections import Counter
write_out = [[] for i in range(len(seq_id))]
for fi in range(1,12):
feat = []
for item in write_out:
feat.append(item[fi])
feat_count = Counter(feat)
sorted_count = sorted(feat_count.items(), key=lambda kv: kv[1], reverse=True)
if fi==11:
feat_type = sorted_count[0][0].split('_')[0]
else:
feat_type = sorted_count[0][0].split('_')[0] + sorted_count[0][0].split('_')[1]
with open('./Features_distribution_'+feat_type+'.csv','w') as fi:
for i in range(len(sorted_count)):
fi.write(sorted_count[i][0]+','+str(sorted_count[i][1])+'\n')
def feature():
write_out = [[] for i in range(len(seq_id))]
for i in range(len(seq_id)):
write_out[i].append(seq_id[i])
for idx, f in enumerate(AllFeatureVectors[i]):
if f == 1:
write_out[i].append(AllFeatureNames[idx])
with open('../results/'+SET_NAME +'_Features.csv', 'w') as fi:
fi.write('sequence id, ')
if not IF_ONLY_HEAVY:
fi.write('light chain V region, light chain J region, ')
fi.write('heavy chain V region, heavy chain J region, ')
if not IF_ONLY_HEAVY:
fi.write('Canonical L1, Canonical L2, Canonical L3, ')
fi.write('Canonical H1, Canonical H2, Canonical H3, ' )
fi.write('PI, frequent positional motif\n')
for i in range(len(write_out)):
fi.write(','.join(write_out[i]) + '\n')
def correlation_feature():
###### plot correlation matrix
data = pd.DataFrame(AllFeatureVectors, columns=AllFeatureNames)
# print(AllFeatureVectors.shape)
corr = data.corr()
import numpy as np
corr = np.array(corr)
with open('../results/Pearson_feature_correlation.csv', 'w') as fi:
fi.write('Feature value 1, Feature value 2, Pearson coefficient\n')
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
# if str(corr[i][j])=='nan':
# print('nan', AllFeatureNames[i], AllFeatureNames[j])
fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(corr[i][j])+'\n')
# data.to_csv(r'../results/Feature_test.csv', header=True)
# fig = plt.figure(figsize=(100, 70))
# ax = fig.add_subplot(111)
# cax = ax.matshow(corr, cmap='seismic', vmin=-1, vmax =1)
# fig.colorbar(cax)
# ticks = np.arange(0, len(data.columns),1)
# ax.set_xticks(ticks)
# plt.xticks(rotation=90)
# ax.set_yticks(ticks)
# ax.set_xticklabels(data.columns)
# ax.set_yticklabels(data.columns)
# plt.savefig('../results/feature_correlation.png')
# corr = pd.DataFrame(corr, index=AllFeatureNames, columns=AllFeatureNames)
###### display pairwise correlation value
# au_corr = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
# au_corr = au_corr.stack().sort_values(ascending=False)
# au_corr = corr.unstack()
# au_corr.columns = [' 1', 'Feature 2', 'Pearson Correlation Value']
# au_corr = pd.DataFrame(au_corr.values, columns = ['Feature 1, Feature 2, Pearson Correlation Value'])
# au_corr.to_csv(r'../results/Pearson_feature_correlation.csv', header=True)
# print(len(au_corr))
# print(AllFeatureVectors[:, AllFeatureNames.index('Germ_LJ_IGKJ3*01')])
# print(AllFeatureVectors[:, AllFeatureNames.index('Canonical_L2_0')])
# def JaccardCoefficientAnalysis():
# df = pd.DataFrame(AllFeatureVectors, columns=AllFeatureNames)
#
# interest_feature=['Germ_HV_IGHV3-23*01', 'Canonical_H2_6', 'Germ_HJ_IGHJ4*02', 'Germ_HJ_IGHJ6*01', 'Germ_LV_IGKV1D-39*01',
# 'Canonical_H2_5', 'Germ_HJ_IGHJ4*01']
# jac_sim = np.eye(len(AllFeatureNames))
# for i in range(len(AllFeatureNames)):
# for j in range(i+1, len(AllFeatureNames)):
# if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
# continue
# a = AllFeatureVectors[:, i]
# b = AllFeatureVectors[:, j]
# aandb =0
# aorb = 0
# for k in range(len(a)):
# if a[k]==b[k] and a[k]==1:
# aandb +=1
# if a[k]==1 or b[k]==1:
# aorb +=1
# if aorb==0:
# jac_tmp=0
# else:
# jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
#
# jac_sim[i][j]=jac_tmp
# jac_sim[j][i]=jac_tmp
#
#
# with open('../results/Jaccard_feature_coefficient.csv', 'w') as fi:
# fi.write('Feature value 1, Feature value 2, Jaccard coefficient\n')
# for i in range(len(AllFeatureNames)):
# for j in range(i+1, len(AllFeatureNames)):
# if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
# continue
# fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(jac_sim[i][j])+'\n')
#
#
# fig = plt.figure(figsize=(100, 70))
# ax = fig.add_subplot(111)
# cax = ax.matshow(jac_sim, cmap='Blues', vmin=0, vmax =1)
# fig.colorbar(cax)
# ticks = np.arange(0, len(df.columns),1)
# ax.set_xticks(ticks)
# plt.xticks(rotation=90)
# ax.set_yticks(ticks)
# ax.set_xticklabels(df.columns)
# ax.set_yticklabels(df.columns)
# plt.savefig('../results/feature_coefficient.png')
#
# # print(AllFeatureVectors[:,AllFeatureNames.index('Germ_LJ_IGKJ3*01')])
# # print(AllFeatureVectors[:,AllFeatureNames.index('Canonical_L2_0*01')])
# # where(np.triu(np.ones(jac_sim.shape), k=1).astype(np.bool))
# # au_jac = jac_sim.where(np.triu(np.ones(jac_sim.shape), k=0).astype(np.bool))
# # au_jac = au_jac.stack().sort_values(ascending=False)
# # au_jac = jac_sim.unstack()
# # print(len(au_jac))
# # au_jac.to_csv(r'../results/Jaccard_feature_coefficient.csv', header=True)
def JaccardCoefficientAnalysis():
PDB_size = DatasetSize[0]
jac_sim_PDB = np.eye(len(AllFeatureNames))
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
a = AllFeatureVectors[:PDB_size, i]
b = AllFeatureVectors[:PDB_size, j]
aandb =0
aorb = 0
for k in range(len(a)):
if a[k]==b[k] and a[k]==1:
aandb +=1
if a[k]==1 or b[k]==1:
aorb +=1
if aorb==0:
jac_tmp=0
else:
jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] == 'Germ_HV_IGHV3-23*01' and AllFeatureNames[j] =='Canonical_H2_6':
# print(a, b, jac_tmp)
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
jac_sim_PDB[i][j]=jac_tmp
jac_sim_PDB[j][i]=jac_tmp
jac_sim_MMP = np.eye(len(AllFeatureNames))
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
a = AllFeatureVectors[PDB_size:, i]
b = AllFeatureVectors[PDB_size:, j]
aandb =0
aorb = 0
for k in range(len(a)):
if a[k]==b[k] and a[k]==1:
aandb +=1
if a[k]==1 or b[k]==1:
aorb +=1
if aorb==0:
jac_tmp=0
else:
jac_tmp = float(aandb)/aorb
# if AllFeatureNames[i] in interest_feature and AllFeatureNames[j] in interest_feature:
# print(AllFeatureNames[i], AllFeatureNames[j], jac_tmp)
jac_sim_MMP[i][j]=jac_tmp
jac_sim_MMP[j][i]=jac_tmp
with open('../results/'+SET_NAME+'_Jaccard Feature Coefficient.csv', 'w') as fi:
fi.write('Feature value 1, Feature value 2, Jaccard coefficient for reference set, Jaccard coefficient for MMP-targeting set\n')
for i in range(len(AllFeatureNames)):
for j in range(i+1, len(AllFeatureNames)):
if AllFeatureNames[i].startswith('Motif') or AllFeatureNames[j].startswith('Motif'):
continue
fi.write(AllFeatureNames[i]+ ','+AllFeatureNames[j]+','+ str(jac_sim_PDB[i][j])+','+ str(jac_sim_MMP[i][j])+'\n')
if __name__=='__main__':
sequence_raw()
sequence_region()
OneHotGerm, GermFeatureNames = extract.GetOneHotGerm(Germ, DatasetSize, DatasetName)
OneHotCanon, CanonFeatureNames = extract.GetOneHotCanon(canonical_direct, Amino, Num, DatasetSize, DatasetName)
CDRH3 = extract.GetCDRH3(Amino, Num)
OneHotPI, PIFeatureNames = extract.GetOneHotPI(CDRH3, DatasetSize, DatasetName)
MultiHotMotif, MotifFeatureNames = extract.MultiHotMotif(CDRH3, DatasetSize, DatasetName)
AllFeatureVectors, AllFeatureNames, _, _ = extract.GetFeatureVectors(OneHotGerm, GermFeatureNames, OneHotCanon, CanonFeatureNames, OneHotPI, PIFeatureNames, MultiHotMotif, MotifFeatureNames)
feature()
# correlation_feature()
JaccardCoefficientAnalysis()
|
[
"pandas.DataFrame",
"ASAP.FeatureExtraction.MultiHotMotif",
"ASAP.FeatureExtraction.GetOneHotGerm",
"ASAP.FeatureExtraction.GetOneHotCanon",
"ASAP.FeatureExtraction.GetFeatureVectors",
"numpy.array",
"ASAP.FeatureExtraction.GetCDRH3",
"collections.Counter",
"ASAP.FeatureExtraction.GetOneHotPI",
"ASAP.FeatureExtraction.ReadAminoNumGerm"
] |
[((728, 788), 'ASAP.FeatureExtraction.ReadAminoNumGerm', 'extract.ReadAminoNumGerm', (['targeting_direct', 'reference_direct'], {}), '(targeting_direct, reference_direct)\n', (752, 788), True, 'import ASAP.FeatureExtraction as extract\n'), ((7133, 7189), 'pandas.DataFrame', 'pd.DataFrame', (['AllFeatureVectors'], {'columns': 'AllFeatureNames'}), '(AllFeatureVectors, columns=AllFeatureNames)\n', (7145, 7189), True, 'import pandas as pd\n'), ((7284, 7298), 'numpy.array', 'np.array', (['corr'], {}), '(corr)\n', (7292, 7298), True, 'import numpy as np\n'), ((14457, 14510), 'ASAP.FeatureExtraction.GetOneHotGerm', 'extract.GetOneHotGerm', (['Germ', 'DatasetSize', 'DatasetName'], {}), '(Germ, DatasetSize, DatasetName)\n', (14478, 14510), True, 'import ASAP.FeatureExtraction as extract\n'), ((14548, 14626), 'ASAP.FeatureExtraction.GetOneHotCanon', 'extract.GetOneHotCanon', (['canonical_direct', 'Amino', 'Num', 'DatasetSize', 'DatasetName'], {}), '(canonical_direct, Amino, Num, DatasetSize, DatasetName)\n', (14570, 14626), True, 'import ASAP.FeatureExtraction as extract\n'), ((14639, 14667), 'ASAP.FeatureExtraction.GetCDRH3', 'extract.GetCDRH3', (['Amino', 'Num'], {}), '(Amino, Num)\n', (14655, 14667), True, 'import ASAP.FeatureExtraction as extract\n'), ((14699, 14751), 'ASAP.FeatureExtraction.GetOneHotPI', 'extract.GetOneHotPI', (['CDRH3', 'DatasetSize', 'DatasetName'], {}), '(CDRH3, DatasetSize, DatasetName)\n', (14718, 14751), True, 'import ASAP.FeatureExtraction as extract\n'), ((14791, 14845), 'ASAP.FeatureExtraction.MultiHotMotif', 'extract.MultiHotMotif', (['CDRH3', 'DatasetSize', 'DatasetName'], {}), '(CDRH3, DatasetSize, DatasetName)\n', (14812, 14845), True, 'import ASAP.FeatureExtraction as extract\n'), ((14893, 15048), 'ASAP.FeatureExtraction.GetFeatureVectors', 'extract.GetFeatureVectors', (['OneHotGerm', 'GermFeatureNames', 'OneHotCanon', 'CanonFeatureNames', 'OneHotPI', 'PIFeatureNames', 'MultiHotMotif', 'MotifFeatureNames'], {}), '(OneHotGerm, GermFeatureNames, OneHotCanon,\n CanonFeatureNames, OneHotPI, PIFeatureNames, MultiHotMotif,\n MotifFeatureNames)\n', (14918, 15048), True, 'import ASAP.FeatureExtraction as extract\n'), ((5731, 5744), 'collections.Counter', 'Counter', (['feat'], {}), '(feat)\n', (5738, 5744), False, 'from collections import Counter\n')]
|
'''
name: E#01
author: <NAME>
email: <EMAIL>
link: https://www.youtube.com/channel/UCNN3bpPlWWUkUMB7gjcUFlw
MIT License https://github.com/repen/E-parsers/blob/master/License
'''
import requests
from bs4 import BeautifulSoup
url = "http://light-science.ru/kosmos/vselennaya/top-10-samyh-bolshih-zvezd-vo-vselennoj.html"
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; \
x64; rv:47.0) Gecko/20100101 Firefox/48.0'}
response = requests.get(url, headers=header)
html = response.text
soup = BeautifulSoup(html, "html.parser")
conteiner = soup.find("div", {"class":"td-post-content"})
elements = conteiner.find_all("p")
string = "топ звезд самых больших: \n"
for element in elements:
if element.find("strong"):
string += "\t" + element.strong.text + "\n"
with open("data.txt", "w", encoding="utf8") as f:
f.write(string)
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((475, 508), 'requests.get', 'requests.get', (['url'], {'headers': 'header'}), '(url, headers=header)\n', (487, 508), False, 'import requests\n'), ((539, 573), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (552, 573), False, 'from bs4 import BeautifulSoup\n')]
|
#!/usr/bin/env python3
from ontobio.sparql2ontology import *
from networkx.algorithms.dag import ancestors
import time
def r():
t1 = time.process_time()
get_edges('pato')
t2 = time.process_time()
print(t2-t1)
r()
r()
r()
"""
LRU is much faster, but does not persist. However, should be fast enough
# percache
## ENVO
$ python ./obographs/bin/timeit.py
QUERYING:envo
1.103934
0.0032450000000001644
0.003185999999999911
$ python ./obographs/bin/timeit.py
0.018115000000000048
0.00362800000000002
0.003180000000000016
## GO
$ python ./obographs/bin/timeit.py
QUERYING:go
13.218031
0.04876699999999978
0.04904600000000059
$ python ./obographs/bin/timeit.py
0.05928599999999995
0.045568
0.045347000000000026
# lru
$ python ./obographs/bin/timeit.py
QUERYING:envo
1.0635080000000001
2.0000000000575113e-06
1.000000000139778e-06
$ python ./obographs/bin/timeit.py
QUERYING:go
13.225105000000001
2.000000000279556e-06
0.0
"""
|
[
"time.process_time"
] |
[((140, 159), 'time.process_time', 'time.process_time', ([], {}), '()\n', (157, 159), False, 'import time\n'), ((191, 210), 'time.process_time', 'time.process_time', ([], {}), '()\n', (208, 210), False, 'import time\n')]
|
import os
from distutils.dir_util import copy_tree
# import PyInstaller.__main__
pyinst_args = [
'-c',
'serve_up.py',
'--name=ServeUp',
'--onefile',
'--hidden-import=whitenoise',
'--hidden-import=whitenoise.middleware',
'--hidden-import=visitors.admin',
'--hidden-import=tables.admin',
'--hidden-import=orders.admin',
'--hidden-import=menu.admin',
'--clean',
]
# PyInstaller.__main__.run(pyinst_args) # running pyinstaller via this script in windows is super brittle
os.system('pyinstaller {}'.format(' '.join(pyinst_args))) # Just use the command line instead
dist_static_path = os.path.join('dist', 'static')
if not os.path.exists(dist_static_path):
os.mkdir(dist_static_path)
copy_tree('static', dist_static_path)
dist_templates_path = os.path.join('dist', 'templates')
if not os.path.exists(dist_templates_path):
os.mkdir(dist_templates_path)
copy_tree('templates', dist_templates_path)
|
[
"os.mkdir",
"os.path.join",
"os.path.exists",
"distutils.dir_util.copy_tree"
] |
[((652, 682), 'os.path.join', 'os.path.join', (['"""dist"""', '"""static"""'], {}), "('dist', 'static')\n", (664, 682), False, 'import os\n'), ((758, 795), 'distutils.dir_util.copy_tree', 'copy_tree', (['"""static"""', 'dist_static_path'], {}), "('static', dist_static_path)\n", (767, 795), False, 'from distutils.dir_util import copy_tree\n'), ((821, 854), 'os.path.join', 'os.path.join', (['"""dist"""', '"""templates"""'], {}), "('dist', 'templates')\n", (833, 854), False, 'import os\n'), ((936, 979), 'distutils.dir_util.copy_tree', 'copy_tree', (['"""templates"""', 'dist_templates_path'], {}), "('templates', dist_templates_path)\n", (945, 979), False, 'from distutils.dir_util import copy_tree\n'), ((691, 723), 'os.path.exists', 'os.path.exists', (['dist_static_path'], {}), '(dist_static_path)\n', (705, 723), False, 'import os\n'), ((730, 756), 'os.mkdir', 'os.mkdir', (['dist_static_path'], {}), '(dist_static_path)\n', (738, 756), False, 'import os\n'), ((863, 898), 'os.path.exists', 'os.path.exists', (['dist_templates_path'], {}), '(dist_templates_path)\n', (877, 898), False, 'import os\n'), ((905, 934), 'os.mkdir', 'os.mkdir', (['dist_templates_path'], {}), '(dist_templates_path)\n', (913, 934), False, 'import os\n')]
|
"""
Platform independent ssh port forwarding
Much code stolen from the paramiko example
"""
import select
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
import paramiko
SSH_PORT = 22
DEFAULT_PORT = 5432
class ForwardServer (SocketServer.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class Handler (SocketServer.BaseRequestHandler):
def handle(self):
try:
chan = self.ssh_transport.open_channel('direct-tcpip',
(self.chain_host,
self.chain_port),
self.request.getpeername())
except Exception as e:
print('Incoming request to %s:%d failed: %s' % (self.chain_host,
self.chain_port,
repr(e)))
return
if chan is None:
print('Incoming request to %s:%d was rejected by the SSH server.'
% (self.chain_host, self.chain_port))
return
print('Connected! Tunnel open %r -> %r -> %r' %
(self.request.getpeername(),
chan.getpeername(), (self.chain_host, self.chain_port)))
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
peername = self.request.getpeername()
chan.close()
self.request.close()
print('Tunnel closed from %r' % (peername,))
def forward_tunnel(local_port, remote_host, remote_port, transport):
# this is a little convoluted, but lets me configure things for the Handler
# object. (SocketServer doesn't give Handlers any way to access the outer
# server normally.)
class SubHander (Handler):
chain_host = remote_host
chain_port = remote_port
ssh_transport = transport
ForwardServer(('', local_port), SubHander).serve_forever()
def connect_ssh(server, login, password, port=SSH_PORT):
"""Return a paramiko.SSHClient on successfull connection, otherwise returns
None
"""
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
print('Connecting to ssh host %s:%d ...' % (server, port))
try:
client.connect(server, port, login, password=password)
print("Connection successful")
return client
except Exception as e:
print('*** Failed to connect to %s:%d: %r' % (server, port, e))
return None
def portforward(client, threadfinishedmutex,
remote_host,
local_port=DEFAULT_PORT,
remote_port=DEFAULT_PORT):
"""Neverending portforwarding thread. Locks threadfinishedmutex
on failure.
client has to be a connected paramiko.SSHClient."""
print('Now forwarding port %d to %s:%d ...' % (local_port, remote_host,
remote_port))
try:
forward_tunnel(local_port, remote_host, remote_port,
client.get_transport())
threadfinishedmutex.acquire()
except Exception as e:
threadfinishedmutex.acquire()
raise e
|
[
"select.select",
"paramiko.WarningPolicy",
"paramiko.SSHClient"
] |
[((2552, 2572), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (2570, 2572), False, 'import paramiko\n'), ((2647, 2671), 'paramiko.WarningPolicy', 'paramiko.WarningPolicy', ([], {}), '()\n', (2669, 2671), False, 'import paramiko\n'), ((1401, 1444), 'select.select', 'select.select', (['[self.request, chan]', '[]', '[]'], {}), '([self.request, chan], [], [])\n', (1414, 1444), False, 'import select\n')]
|
import numpy as np
import tensorflow as tf
from rl.losses import QLearningLoss
from rl.algorithms import OnlineRLAlgorithm
from rl.runner import *
from rl.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from rl import util
from deeplearning.layers import Adam, RunningNorm
from deeplearning.schedules import LinearSchedule
from deeplearning import logger
from collections import deque
import time
class QLearning(OnlineRLAlgorithm):
def defaults(self):
return {
'lr': 1e-4,
'momentum': 0.9,
'beta2': 0.999,
'clip_norm': 10.,
'gamma': 0.99,
'learning_starts': int(1e5),
'exploration_timesteps': int(1e6),
'final_eps': 0.02,
'target_update_freq': int(1e4),
'prioritized_replay': True,
'huber_loss': True,
'buffer_size': int(1e6),
'replay_alpha': 0.6,
'replay_beta': 0.4,
't_beta_max': int(1e7)
}
def __init__(self,
logdir,
env_fn,
model_fn,
nenv,
rollout_length=1,
batch_size=32,
callback=None,
**kwargs
):
defaults = self.defaults()
for k in kwargs:
assert k in defaults, "Unknown argument: {}".format(k)
defaults.update(kwargs)
super().__init__(logdir, env_fn, model_fn, nenv, rollout_length, batch_size, callback, runner_flags=[], **defaults)
self.target_sync = tf.group([tf.assign(v1,v2) for v1,v2 in zip(self.loss.qtarg.variables(), self.loss.qvals.variables())])
if self.args.prioritized_replay:
self.buffer = PrioritizedReplayBuffer(self.args.buffer_size, alpha=self.args.replay_alpha)
else:
self.buffer = ReplayBuffer(self.args.buffer_size)
# determine if the network has a RunningNorm submodule that needs to be updated.
submods = self.opt.find_submodules_by_instance(RunningNorm)
self.rn = submods[0] if len(submods) > 0 else None
self.losses = deque(maxlen=100)
self.nsteps = 0
self.last_target_sync = (self.t // self.args.target_update_freq) * self.args.target_update_freq
self.beta_schedule = LinearSchedule(self.args.t_beta_max, 1.0, self.args.replay_beta)
self.eps_schedule = LinearSchedule(int(self.args.exploration_timesteps), self.args.final_eps, 1.0)
self._time_start = time.time()
self._t_start = self.t
def _def_loss(self, model_fn, env):
target_network = model_fn(env)
target_network.build('target', self.nenv, self.batch_size, trainable=False)
# extra network for double dqn. Tie variables with network
return QLearningLoss('loss', model_fn(env), model_fn(env), target_network, gamma=self.args.gamma, use_huber_loss=self.args.huber_loss)
def _def_opt(self, loss):
return Adam(
'opt',
loss,
lr=self.args.lr,
beta1=self.args.momentum,
beta2=self.args.beta2,
clip_norm=self.args.clip_norm
)
def _before_step(self):
if self.t == 0 or self.t - self.last_target_sync > self.args.target_update_freq:
self.target_sync.run()
self.last_target_sync = self.t
self.actor.update_eps(self.eps_schedule.value(self.t))
def _process_rollout(self, rollout):
self._update_buffer(rollout)
while len(self.buffer) < self.args.learning_starts and len(self.buffer) != self.args.buffer_size:
self._update_buffer(self.runner.rollout())
self.t += self.timesteps_per_step
if self.args.prioritized_replay:
obs, acs, rews, next_obs, dones, weights, self._inds = self.buffer.sample(self.nenv * self.batch_size, self.beta_schedule.value(self.t))
inputs=[obs, next_obs, next_obs, rews, acs, dones, weights[...,None]]
else:
obs, acs, rews, next_obs, dones = self.buffer.sample(self.nenv * self.batch_size)
inputs=[obs, next_obs, next_obs, rews, acs, dones]
return inputs
def _update_buffer(self, rollout):
if self.rn is not None:
x = np.asarray(rollout.obs)
self._update_running_norm(x.reshape([-1] + list(x.shape[2:])))
for i,obs in enumerate(rollout.obs):
next_obs = rollout.end_ob if i == len(rollout.obs) - 1 else rollout.obs[i+1]
for j in range(self.nenv):
ob = obs[j]
next_ob = next_obs[j]
ac = rollout.actions[i][j]
r = rollout.rewards[i][j]
done = rollout.dones[i][j]
self.buffer.add(ob, ac, r, next_ob, done)
def _update_model(self, data):
outs = self.opt.run(inputs=data, state=[], state_out=False, update=True, td=True)
if self.args.prioritized_replay:
self.buffer.update_priorities(self._inds, priorities=np.abs(outs['td'][:,0]) + 1e-6)
self.losses.append(outs['out'])
return outs
def _after_step(self, rollout, data, outs):
self.nsteps += 1
if self.nsteps % 100 == 0:
logger.log("========================| Timestep: {} |========================".format(self.t))
meanloss = np.mean(np.array(self.losses), axis=0)
# Logging stats...
logger.logkv('Loss', meanloss)
logger.logkv('timesteps', self.t)
logger.logkv('serial timesteps', self.t / self.nenv)
logger.logkv('mean episode length', np.mean(self.runner.get_episode_lengths()))
logger.logkv('mean episode reward', np.mean(self.runner.get_episode_rewards()))
logger.logkv('fps', int((self.t - self._t_start) / (time.time() - self._time_start)))
logger.logkv('time_elapsed', time.time() - self._time_start)
logger.logkv('time spent exploring', self.actor.eps)
logger.dumpkvs()
def _update_running_norm(self, x):
mean = x.mean(axis=0)
var = x.var(axis=0)
count = x.shape[0]
self.rn.update(mean, var, count)
def update_lr(self, new_lr):
self.opt.update_lr(new_lr)
|
[
"numpy.abs",
"numpy.asarray",
"deeplearning.logger.dumpkvs",
"deeplearning.schedules.LinearSchedule",
"time.time",
"deeplearning.logger.logkv",
"deeplearning.layers.Adam",
"tensorflow.assign",
"numpy.array",
"rl.replay_buffer.PrioritizedReplayBuffer",
"rl.replay_buffer.ReplayBuffer",
"collections.deque"
] |
[((2065, 2082), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (2070, 2082), False, 'from collections import deque\n'), ((2240, 2304), 'deeplearning.schedules.LinearSchedule', 'LinearSchedule', (['self.args.t_beta_max', '(1.0)', 'self.args.replay_beta'], {}), '(self.args.t_beta_max, 1.0, self.args.replay_beta)\n', (2254, 2304), False, 'from deeplearning.schedules import LinearSchedule\n'), ((2439, 2450), 'time.time', 'time.time', ([], {}), '()\n', (2448, 2450), False, 'import time\n'), ((2902, 3021), 'deeplearning.layers.Adam', 'Adam', (['"""opt"""', 'loss'], {'lr': 'self.args.lr', 'beta1': 'self.args.momentum', 'beta2': 'self.args.beta2', 'clip_norm': 'self.args.clip_norm'}), "('opt', loss, lr=self.args.lr, beta1=self.args.momentum, beta2=self.\n args.beta2, clip_norm=self.args.clip_norm)\n", (2906, 3021), False, 'from deeplearning.layers import Adam, RunningNorm\n'), ((1673, 1749), 'rl.replay_buffer.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['self.args.buffer_size'], {'alpha': 'self.args.replay_alpha'}), '(self.args.buffer_size, alpha=self.args.replay_alpha)\n', (1696, 1749), False, 'from rl.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\n'), ((1790, 1825), 'rl.replay_buffer.ReplayBuffer', 'ReplayBuffer', (['self.args.buffer_size'], {}), '(self.args.buffer_size)\n', (1802, 1825), False, 'from rl.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\n'), ((4222, 4245), 'numpy.asarray', 'np.asarray', (['rollout.obs'], {}), '(rollout.obs)\n', (4232, 4245), True, 'import numpy as np\n'), ((5392, 5422), 'deeplearning.logger.logkv', 'logger.logkv', (['"""Loss"""', 'meanloss'], {}), "('Loss', meanloss)\n", (5404, 5422), False, 'from deeplearning import logger\n'), ((5435, 5468), 'deeplearning.logger.logkv', 'logger.logkv', (['"""timesteps"""', 'self.t'], {}), "('timesteps', self.t)\n", (5447, 5468), False, 'from deeplearning import logger\n'), ((5481, 5533), 'deeplearning.logger.logkv', 'logger.logkv', (['"""serial timesteps"""', '(self.t / self.nenv)'], {}), "('serial timesteps', self.t / self.nenv)\n", (5493, 5533), False, 'from deeplearning import logger\n'), ((5901, 5953), 'deeplearning.logger.logkv', 'logger.logkv', (['"""time spent exploring"""', 'self.actor.eps'], {}), "('time spent exploring', self.actor.eps)\n", (5913, 5953), False, 'from deeplearning import logger\n'), ((5966, 5982), 'deeplearning.logger.dumpkvs', 'logger.dumpkvs', ([], {}), '()\n', (5980, 5982), False, 'from deeplearning import logger\n'), ((1512, 1529), 'tensorflow.assign', 'tf.assign', (['v1', 'v2'], {}), '(v1, v2)\n', (1521, 1529), True, 'import tensorflow as tf\n'), ((5318, 5339), 'numpy.array', 'np.array', (['self.losses'], {}), '(self.losses)\n', (5326, 5339), True, 'import numpy as np\n'), ((5857, 5868), 'time.time', 'time.time', ([], {}), '()\n', (5866, 5868), False, 'import time\n'), ((4978, 5002), 'numpy.abs', 'np.abs', (["outs['td'][:, 0]"], {}), "(outs['td'][:, 0])\n", (4984, 5002), True, 'import numpy as np\n'), ((5782, 5793), 'time.time', 'time.time', ([], {}), '()\n', (5791, 5793), False, 'import time\n')]
|
from pprint import pprint
import httpretty
from httpretty import httprettified
import unittest
from checks import load_favicons
from checks.config import Config
@httprettified
class TestFavicons(unittest.TestCase):
def test_favicons(self):
# This site has a favicon
url1 = 'http://example1.com/favicon.ico'
httpretty.register_uri(httpretty.HEAD, url1,
body='',
adding_headers={
"Content-type": "image/x-ico",
})
# This site has no favicon
url2 = 'http://example2.com/favicon.ico'
httpretty.register_uri(httpretty.HEAD, url2,
status=404,
body='Not found',
adding_headers={
"Content-type": "text/plain",
})
config = Config(urls=['http://example1.com/path/', 'http://example2.com/'])
checker = load_favicons.Checker(config=config)
result = checker.run()
pprint(result)
self.assertEqual(result, {
'http://example1.com/path/': {
'url': 'http://example1.com/favicon.ico'
}
})
|
[
"checks.load_favicons.Checker",
"httpretty.register_uri",
"checks.config.Config",
"pprint.pprint"
] |
[((339, 445), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.HEAD', 'url1'], {'body': '""""""', 'adding_headers': "{'Content-type': 'image/x-ico'}"}), "(httpretty.HEAD, url1, body='', adding_headers={\n 'Content-type': 'image/x-ico'})\n", (361, 445), False, 'import httpretty\n'), ((673, 798), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.HEAD', 'url2'], {'status': '(404)', 'body': '"""Not found"""', 'adding_headers': "{'Content-type': 'text/plain'}"}), "(httpretty.HEAD, url2, status=404, body='Not found',\n adding_headers={'Content-type': 'text/plain'})\n", (695, 798), False, 'import httpretty\n'), ((976, 1042), 'checks.config.Config', 'Config', ([], {'urls': "['http://example1.com/path/', 'http://example2.com/']"}), "(urls=['http://example1.com/path/', 'http://example2.com/'])\n", (982, 1042), False, 'from checks.config import Config\n'), ((1061, 1097), 'checks.load_favicons.Checker', 'load_favicons.Checker', ([], {'config': 'config'}), '(config=config)\n', (1082, 1097), False, 'from checks import load_favicons\n'), ((1138, 1152), 'pprint.pprint', 'pprint', (['result'], {}), '(result)\n', (1144, 1152), False, 'from pprint import pprint\n')]
|
import gi
import numpy.testing
import pint
import pyRestTable
import pytest
gi.require_version("Hkl", "5.0")
# NOTE: MUST call gi.require_version() BEFORE import hkl
from hkl.calc import A_KEV
from hkl.diffract import Constraint
from hkl import SimulatedE4CV
class Fourc(SimulatedE4CV):
...
@pytest.fixture(scope="function")
def fourc():
fourc = Fourc("", name="fourc")
fourc.wait_for_connection()
fourc._update_calc_energy()
return fourc
def test_calc_energy_permit(fourc):
assert fourc._calc_energy_update_permitted
fourc.energy_update_calc_flag.put(False)
assert not fourc._calc_energy_update_permitted
nrg = fourc.calc.energy
fourc.energy.put(5.989) # BTW: Cr K absorption edge
numpy.testing.assert_almost_equal(fourc.energy.get(), 5.989)
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._energy_changed()
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._energy_changed(fourc.energy.get())
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._energy_changed(5.989)
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
fourc._update_calc_energy()
numpy.testing.assert_almost_equal(fourc.calc.energy, 5.989)
# test that value argument is ignored
fourc._update_calc_energy(A_KEV / 1)
numpy.testing.assert_almost_equal(fourc.calc.energy, 5.989)
def test_energy(fourc):
numpy.testing.assert_almost_equal(fourc.energy.get(), fourc.calc.energy)
for nrg in (8.0, 8.04, 9.0, 0.931):
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.calc.energy, nrg)
numpy.testing.assert_almost_equal(fourc.calc.wavelength, A_KEV / nrg)
def test_energy_offset(fourc):
assert fourc.energy_offset.get() == 0
nrg = 8.0
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), fourc.calc.energy)
for offset in (0.05, -0.1):
fourc.energy_offset.put(offset)
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.energy.get() + offset, fourc.calc.energy)
def test_energy_offset_units(fourc):
assert fourc.energy_offset.get() == 0
assert fourc.energy_units.get() == "keV"
fourc.energy_units.put("eV")
assert fourc.energy_units.get() == "eV"
nrg = 931
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal(fourc.energy.get() / 1000, fourc.calc.energy)
for offset in (5, -6):
fourc.energy_offset.put(offset)
fourc.energy.put(nrg)
numpy.testing.assert_almost_equal(fourc.energy.get(), nrg)
numpy.testing.assert_almost_equal((fourc.energy.get() + offset) / 1000, fourc.calc.energy)
def test_energy_units_931eV(fourc):
assert fourc.energy_units.get() == "keV"
fourc.energy_units.put("eV")
assert fourc.energy_units.get() == "eV"
eV = 931
fourc.energy.put(eV)
numpy.testing.assert_almost_equal(fourc.energy.get(), eV)
numpy.testing.assert_almost_equal(fourc.calc.energy, eV / 1000)
def test_energy_units_issue79(fourc):
# issue #79
fourc.energy_units.put("eV")
fourc.energy_offset.put(0)
eV = 1746
fourc.energy.put(eV)
numpy.testing.assert_almost_equal(fourc.calc.energy, eV / 1000)
numpy.testing.assert_almost_equal(
# fmt: off
pint.Quantity(fourc.calc.energy, "keV").to(fourc.energy_units.get()).magnitude,
fourc.energy.get(),
# fmt: on
)
def test_energy_units_offset(fourc):
fourc.energy_units.put("keV")
fourc.energy.put(7.985)
fourc.energy_offset.put(0.015)
assert fourc.calc.energy == 8.0
assert round(fourc.energy.get(), 6) == 7.985
fourc.energy.put(8)
assert fourc.calc.energy == 8.015
assert round(fourc.energy.get(), 6) == 8
fourc.energy_offset.put(0.0)
assert fourc.calc.energy == 8.0
def test_energy_units_issue86(fourc):
# issue #86
# changing units or offset changes .energy, not .calc.energy
fourc.energy.put(8)
fourc.energy_offset.put(0.015)
fourc.energy_units.put("eV")
# test interim state when fourc.energy value has not changed but units have
assert round(fourc.calc.energy, 6) == 8.015e-3
assert round(fourc.energy.get(), 1) == 8
fourc.energy.put(8000)
assert round(fourc.calc.energy, 8) == 8.000015
assert round(fourc.energy.get(), 1) == 8000
fourc.energy_offset.put(15)
assert round(fourc.calc.energy, 8) == 8.015
assert round(fourc.energy.get(), 1) == 8000
fourc.energy.put(8000)
assert round(fourc.calc.energy, 8) == 8.015
assert round(fourc.energy.get(), 1) == 8000
def test_names(fourc):
assert fourc.geometry_name.get() == "E4CV"
assert fourc.class_name.get() == "Fourc"
def test_forward_solutions_table(fourc):
fourc.energy.put(A_KEV / 1.54)
# (100) has chi ~ 0 which poses occasional roundoff errors
# (sometimes -0.00000, sometimes 0.00000)
sol = fourc.forward(1, 0, 0)
assert pytest.approx(sol.omega, 1e-5) == -30
assert pytest.approx(sol.chi, 1e-5) == 0
assert pytest.approx(sol.phi, 1e-5) == -90
assert pytest.approx(sol.tth, 1e-5) == -60
fourc.apply_constraints({"tth": Constraint(0, 180, 0, True)})
tbl = fourc.forward_solutions_table(
# fmt: off
[
[1, 1, 0],
[1, 1, 1],
[100, 1, 1], # no solutions
]
# fmt: on
)
received = str(tbl).splitlines()
expected = [
"=========== ======== ===== ======== ==== =====",
"(hkl) solution omega chi phi tth ",
"=========== ======== ===== ======== ==== =====",
"[1, 1, 0] 0 45.0 45.0 90.0 90.0 ",
"[1, 1, 1] 0 60.0 35.26439 45.0 120.0",
"[100, 1, 1] none ",
"=========== ======== ===== ======== ==== =====",
]
for r, e in zip(received, expected):
assert r == e
def test_pa(fourc, capsys):
tbl = fourc.pa()
assert isinstance(tbl, pyRestTable.Table)
out, err = capsys.readouterr()
assert len(out) > 0
assert err == ""
out = [v.rstrip() for v in out.strip().splitlines()]
expected = [
"===================== ====================================================================",
"term value",
"===================== ====================================================================",
"diffractometer fourc",
"geometry E4CV",
"class Fourc",
"energy (keV) 8.00000",
"wavelength (angstrom) 1.54980",
"calc engine hkl",
"mode bissector",
"positions ===== =======",
" name value",
" ===== =======",
" omega 0.00000",
" chi 0.00000",
" phi 0.00000",
" tth 0.00000",
" ===== =======",
"constraints ===== ========= ========== ===== ====",
" axis low_limit high_limit value fit",
" ===== ========= ========== ===== ====",
" omega -180.0 180.0 0.0 True",
" chi -180.0 180.0 0.0 True",
" phi -180.0 180.0 0.0 True",
" tth -180.0 180.0 0.0 True",
" ===== ========= ========== ===== ====",
"sample: main ================ ===================================================",
" term value",
" ================ ===================================================",
" unit cell edges a=1.54, b=1.54, c=1.54",
" unit cell angles alpha=90.0, beta=90.0, gamma=90.0",
" [U] [[1. 0. 0.]",
" [0. 1. 0.]",
" [0. 0. 1.]]",
" [UB] [[ 4.07999046e+00 -2.49827363e-16 -2.49827363e-16]",
" [ 0.00000000e+00 4.07999046e+00 -2.49827363e-16]",
" [ 0.00000000e+00 0.00000000e+00 4.07999046e+00]]",
" ================ ===================================================",
"===================== ====================================================================",
]
assert len(out) == len(expected)
assert out == expected
def test_wh(fourc, capsys):
tbl = fourc.wh()
assert isinstance(tbl, pyRestTable.Table)
out, err = capsys.readouterr()
assert len(out) > 0
assert err == ""
out = [v.rstrip() for v in out.strip().splitlines()]
expected = [
"===================== ========= =========",
"term value axis_type",
"===================== ========= =========",
"diffractometer fourc",
"sample name main",
"energy (keV) 8.00000",
"wavelength (angstrom) 1.54980",
"calc engine hkl",
"mode bissector",
"h 0.0 pseudo",
"k 0.0 pseudo",
"l 0.0 pseudo",
"omega 0 real",
"chi 0 real",
"phi 0 real",
"tth 0 real",
"===================== ========= =========",
]
assert len(out) == len(expected)
assert out == expected
def test_show_constraints(fourc, capsys):
fourc.show_constraints()
out, err = capsys.readouterr()
assert len(out) > 0
assert err == ""
out = [v.rstrip() for v in out.strip().splitlines()]
expected = [
"===== ========= ========== ===== ====",
"axis low_limit high_limit value fit",
"===== ========= ========== ===== ====",
"omega -180.0 180.0 0.0 True",
"chi -180.0 180.0 0.0 True",
"phi -180.0 180.0 0.0 True",
"tth -180.0 180.0 0.0 True",
"===== ========= ========== ===== ====",
]
for r, e in zip(out, expected):
assert r.rstrip() == e.rstrip()
def test_apply_constraints(fourc):
fourc.energy.put(A_KEV / 1.54)
# fmt: off
fourc.apply_constraints(
{
"tth": Constraint(0, 180, 0, True),
"chi": Constraint(0, 180, 0, True),
}
)
# fmt: on
sol = fourc.forward(1, 0, 0)
assert pytest.approx(sol.omega, 1e-5) == 30
assert pytest.approx(sol.chi, 1e-5) == 0
assert pytest.approx(sol.phi, 1e-5) == 90
assert pytest.approx(sol.tth, 1e-5) == 60
def test_specify_engine():
import hkl
import numpy as np
from ophyd import Component as Cpt
from ophyd import PseudoSingle
from ophyd import SoftPositioner
class Q4C(hkl.E4CV):
q = Cpt(PseudoSingle, "")
omega = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
chi = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
phi = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
tth = Cpt(SoftPositioner, limits=(-180, 180), init_pos=0)
q4c = Q4C("", name="q4c")
assert q4c.calc.engine.name == "hkl"
q4c = Q4C("", name="q4c", engine="q")
assert q4c.calc.engine.name == "q"
q = 1.0
angle = 2 * np.arcsin(q * q4c.calc.wavelength / 4 / np.pi) * 180 / np.pi
value = q4c.forward(q)
assert round(value.tth, 5) == round(angle, 5)
assert value.omega == 0.0
assert value.chi == 0.0
assert value.phi == 0.0
|
[
"gi.require_version",
"pytest.fixture",
"numpy.arcsin",
"ophyd.Component",
"hkl.diffract.Constraint",
"pytest.approx",
"pint.Quantity"
] |
[((77, 109), 'gi.require_version', 'gi.require_version', (['"""Hkl"""', '"""5.0"""'], {}), "('Hkl', '5.0')\n", (95, 109), False, 'import gi\n'), ((301, 333), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (315, 333), False, 'import pytest\n'), ((5226, 5257), 'pytest.approx', 'pytest.approx', (['sol.omega', '(1e-05)'], {}), '(sol.omega, 1e-05)\n', (5239, 5257), False, 'import pytest\n'), ((5275, 5304), 'pytest.approx', 'pytest.approx', (['sol.chi', '(1e-05)'], {}), '(sol.chi, 1e-05)\n', (5288, 5304), False, 'import pytest\n'), ((5320, 5349), 'pytest.approx', 'pytest.approx', (['sol.phi', '(1e-05)'], {}), '(sol.phi, 1e-05)\n', (5333, 5349), False, 'import pytest\n'), ((5367, 5396), 'pytest.approx', 'pytest.approx', (['sol.tth', '(1e-05)'], {}), '(sol.tth, 1e-05)\n', (5380, 5396), False, 'import pytest\n'), ((11196, 11227), 'pytest.approx', 'pytest.approx', (['sol.omega', '(1e-05)'], {}), '(sol.omega, 1e-05)\n', (11209, 11227), False, 'import pytest\n'), ((11244, 11273), 'pytest.approx', 'pytest.approx', (['sol.chi', '(1e-05)'], {}), '(sol.chi, 1e-05)\n', (11257, 11273), False, 'import pytest\n'), ((11289, 11318), 'pytest.approx', 'pytest.approx', (['sol.phi', '(1e-05)'], {}), '(sol.phi, 1e-05)\n', (11302, 11318), False, 'import pytest\n'), ((11335, 11364), 'pytest.approx', 'pytest.approx', (['sol.tth', '(1e-05)'], {}), '(sol.tth, 1e-05)\n', (11348, 11364), False, 'import pytest\n'), ((11586, 11607), 'ophyd.Component', 'Cpt', (['PseudoSingle', '""""""'], {}), "(PseudoSingle, '')\n", (11589, 11607), True, 'from ophyd import Component as Cpt\n'), ((11624, 11675), 'ophyd.Component', 'Cpt', (['SoftPositioner'], {'limits': '(-180, 180)', 'init_pos': '(0)'}), '(SoftPositioner, limits=(-180, 180), init_pos=0)\n', (11627, 11675), True, 'from ophyd import Component as Cpt\n'), ((11690, 11741), 'ophyd.Component', 'Cpt', (['SoftPositioner'], {'limits': '(-180, 180)', 'init_pos': '(0)'}), '(SoftPositioner, limits=(-180, 180), init_pos=0)\n', (11693, 11741), True, 'from ophyd import Component as Cpt\n'), ((11756, 11807), 'ophyd.Component', 'Cpt', (['SoftPositioner'], {'limits': '(-180, 180)', 'init_pos': '(0)'}), '(SoftPositioner, limits=(-180, 180), init_pos=0)\n', (11759, 11807), True, 'from ophyd import Component as Cpt\n'), ((11822, 11873), 'ophyd.Component', 'Cpt', (['SoftPositioner'], {'limits': '(-180, 180)', 'init_pos': '(0)'}), '(SoftPositioner, limits=(-180, 180), init_pos=0)\n', (11825, 11873), True, 'from ophyd import Component as Cpt\n'), ((5440, 5467), 'hkl.diffract.Constraint', 'Constraint', (['(0)', '(180)', '(0)', '(True)'], {}), '(0, 180, 0, True)\n', (5450, 5467), False, 'from hkl.diffract import Constraint\n'), ((11045, 11072), 'hkl.diffract.Constraint', 'Constraint', (['(0)', '(180)', '(0)', '(True)'], {}), '(0, 180, 0, True)\n', (11055, 11072), False, 'from hkl.diffract import Constraint\n'), ((11093, 11120), 'hkl.diffract.Constraint', 'Constraint', (['(0)', '(180)', '(0)', '(True)'], {}), '(0, 180, 0, True)\n', (11103, 11120), False, 'from hkl.diffract import Constraint\n'), ((12056, 12102), 'numpy.arcsin', 'np.arcsin', (['(q * q4c.calc.wavelength / 4 / np.pi)'], {}), '(q * q4c.calc.wavelength / 4 / np.pi)\n', (12065, 12102), True, 'import numpy as np\n'), ((3577, 3616), 'pint.Quantity', 'pint.Quantity', (['fourc.calc.energy', '"""keV"""'], {}), "(fourc.calc.energy, 'keV')\n", (3590, 3616), False, 'import pint\n')]
|
import zipfile
from utils import download_from_url
# =================================
# Script purpose:
# Download and unzip all raw files
# =================================
# Word frequency calculations from Beijing Language and Culture University
download_from_url(
"http://bcc.blcu.edu.cn/downloads/resources/BCC_LEX_Zh.zip",
"./data/raw/BCC_LEX_Zh.zip",
overwrite=False,
)
# Word frequency calculations for blogs, converted to UTF-8
download_from_url(
"https://www.plecoforums.com/download/blogs_wordfreq-release_utf-8-txt.2602/",
"./data/raw/blogs_wordfreq-release_utf-8.txt",
overwrite=False,
)
# CEDICT dictionary
download_from_url(
"https://www.mdbg.net/chinese/export/cedict/cedict_1_0_ts_utf-8_mdbg.zip",
"./data/raw/cedict_1_0_ts_utf-8_mdbg.zip",
overwrite=True,
)
# CJKVI character decompositions
download_from_url(
"https://raw.githubusercontent.com/cjkvi/cjkvi-ids/master/ids.txt",
"./data/raw/cjkvi_ids.txt",
overwrite=True,
)
# Word segmentation index for jieba
download_from_url(
"https://github.com/fxsjy/jieba/raw/master/extra_dict/dict.txt.big",
"./data/raw/dict.txt.big.txt",
overwrite=True,
)
# FastText CommonCrawl word vectors
download_from_url(
"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.zh.300.bin.gz",
"./data/raw/cc.zh.300.bin.gz",
overwrite=True,
)
# Tencent word vectors
download_from_url(
"https://ai.tencent.com/ailab/nlp/en/data/Tencent_AILab_ChineseEmbedding.tar.gz",
"./data/raw/Tencent_AILab_ChineseEmbedding.tar.gz",
overwrite=True,
)
print("Unzipping BCC_LEX_Zh.zip... ", end="")
with zipfile.ZipFile("./data/raw/BCC_LEX_Zh.zip", "r") as zip_ref:
zip_ref.extractall("./data/raw/BCC_LEX_Zh")
print("ok")
print("Unzipping cedict_1_0_ts_utf-8_mdbg.zip... ", end="")
with zipfile.ZipFile("./data/raw/cedict_1_0_ts_utf-8_mdbg.zip", "r") as zip_ref:
zip_ref.extractall("./data/raw/cedict_1_0_ts_utf-8_mdbg")
print("ok")
print("Unzipping Tencent_AILab_ChineseEmbedding.zip... ", end="")
with zipfile.ZipFile("./data/raw/Tencent_AILab_ChineseEmbedding.zip", "r") as zip_ref:
zip_ref.extractall("./data/raw/Tencent_AILab_ChineseEmbedding")
print("ok")
|
[
"zipfile.ZipFile",
"utils.download_from_url"
] |
[((254, 382), 'utils.download_from_url', 'download_from_url', (['"""http://bcc.blcu.edu.cn/downloads/resources/BCC_LEX_Zh.zip"""', '"""./data/raw/BCC_LEX_Zh.zip"""'], {'overwrite': '(False)'}), "('http://bcc.blcu.edu.cn/downloads/resources/BCC_LEX_Zh.zip',\n './data/raw/BCC_LEX_Zh.zip', overwrite=False)\n", (271, 382), False, 'from utils import download_from_url\n'), ((455, 625), 'utils.download_from_url', 'download_from_url', (['"""https://www.plecoforums.com/download/blogs_wordfreq-release_utf-8-txt.2602/"""', '"""./data/raw/blogs_wordfreq-release_utf-8.txt"""'], {'overwrite': '(False)'}), "(\n 'https://www.plecoforums.com/download/blogs_wordfreq-release_utf-8-txt.2602/'\n , './data/raw/blogs_wordfreq-release_utf-8.txt', overwrite=False)\n", (472, 625), False, 'from utils import download_from_url\n'), ((652, 812), 'utils.download_from_url', 'download_from_url', (['"""https://www.mdbg.net/chinese/export/cedict/cedict_1_0_ts_utf-8_mdbg.zip"""', '"""./data/raw/cedict_1_0_ts_utf-8_mdbg.zip"""'], {'overwrite': '(True)'}), "(\n 'https://www.mdbg.net/chinese/export/cedict/cedict_1_0_ts_utf-8_mdbg.zip',\n './data/raw/cedict_1_0_ts_utf-8_mdbg.zip', overwrite=True)\n", (669, 812), False, 'from utils import download_from_url\n'), ((853, 991), 'utils.download_from_url', 'download_from_url', (['"""https://raw.githubusercontent.com/cjkvi/cjkvi-ids/master/ids.txt"""', '"""./data/raw/cjkvi_ids.txt"""'], {'overwrite': '(True)'}), "(\n 'https://raw.githubusercontent.com/cjkvi/cjkvi-ids/master/ids.txt',\n './data/raw/cjkvi_ids.txt', overwrite=True)\n", (870, 991), False, 'from utils import download_from_url\n'), ((1035, 1177), 'utils.download_from_url', 'download_from_url', (['"""https://github.com/fxsjy/jieba/raw/master/extra_dict/dict.txt.big"""', '"""./data/raw/dict.txt.big.txt"""'], {'overwrite': '(True)'}), "(\n 'https://github.com/fxsjy/jieba/raw/master/extra_dict/dict.txt.big',\n './data/raw/dict.txt.big.txt', overwrite=True)\n", (1052, 1177), False, 'from utils import download_from_url\n'), ((1221, 1368), 'utils.download_from_url', 'download_from_url', (['"""https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.zh.300.bin.gz"""', '"""./data/raw/cc.zh.300.bin.gz"""'], {'overwrite': '(True)'}), "(\n 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.zh.300.bin.gz',\n './data/raw/cc.zh.300.bin.gz', overwrite=True)\n", (1238, 1368), False, 'from utils import download_from_url\n'), ((1399, 1576), 'utils.download_from_url', 'download_from_url', (['"""https://ai.tencent.com/ailab/nlp/en/data/Tencent_AILab_ChineseEmbedding.tar.gz"""', '"""./data/raw/Tencent_AILab_ChineseEmbedding.tar.gz"""'], {'overwrite': '(True)'}), "(\n 'https://ai.tencent.com/ailab/nlp/en/data/Tencent_AILab_ChineseEmbedding.tar.gz'\n , './data/raw/Tencent_AILab_ChineseEmbedding.tar.gz', overwrite=True)\n", (1416, 1576), False, 'from utils import download_from_url\n'), ((1635, 1684), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""./data/raw/BCC_LEX_Zh.zip"""', '"""r"""'], {}), "('./data/raw/BCC_LEX_Zh.zip', 'r')\n", (1650, 1684), False, 'import zipfile\n'), ((1828, 1891), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""./data/raw/cedict_1_0_ts_utf-8_mdbg.zip"""', '"""r"""'], {}), "('./data/raw/cedict_1_0_ts_utf-8_mdbg.zip', 'r')\n", (1843, 1891), False, 'import zipfile\n'), ((2056, 2125), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""./data/raw/Tencent_AILab_ChineseEmbedding.zip"""', '"""r"""'], {}), "('./data/raw/Tencent_AILab_ChineseEmbedding.zip', 'r')\n", (2071, 2125), False, 'import zipfile\n')]
|
import os
import json
from copy import deepcopy
from collections import defaultdict
import ir_datasets
from capreolus import ModuleBase
from capreolus.utils.caching import cached_file, TargetFileExists
from capreolus.utils.trec import write_qrels, load_qrels, load_trec_topics
from capreolus.utils.loginit import get_logger
logger = get_logger(__name__)
def validate(build_f):
def validate_folds_file(self):
if not hasattr(self, "fold_file"):
logger.warning(f"Folds file is not found for Module {self.module_name}")
return
if self.fold_file.suffix != ".json":
raise ValueError(f"Expect folds file to be in .json format.")
raw_folds = json.load(open(self.fold_file))
# we actually don't need to verify the name of folds right?
for fold_name, fold_sets in raw_folds.items():
if set(fold_sets) != {"train_qids", "predict"}:
raise ValueError(f"Expect each fold to contain ['train_qids', 'predict'] fields.")
if set(fold_sets["predict"]) != {"dev", "test"}:
raise ValueError(f"Expect each fold to contain ['dev', 'test'] fields under 'predict'.")
logger.info("Folds file validation finishes.")
def validate_qrels_file(self):
if not hasattr(self, "qrel_file"):
logger.warning(f"Qrel file is not found for Module {self.module_name}")
return
n_dup, qrels = 0, defaultdict(dict)
with open(self.qrel_file) as f:
for line in f:
qid, _, docid, label = line.strip().split()
if docid in qrels[qid]:
n_dup += 1
if int(label) != qrels[qid][docid]:
raise ValueError(f"Found conflicting label in {self.qrel_file} for query {qid} and document {docid}.")
qrels[qid][docid] = int(label)
if n_dup > 0:
qrel_file_no_ext, ext = os.path.splitext(self.qrel_file)
dup_qrel_file = qrel_file_no_ext + "-contain-dup-entries" + ext
os.rename(self.qrel_file, dup_qrel_file)
write_qrels(qrels, self.qrel_file)
logger.warning(
f"Removed {n_dup} entries from the file {self.qrel_file}. The original version could be found in {dup_qrel_file}."
)
logger.info("Qrel file validation finishes.")
def validate_query_alignment(self):
topic_qids = set(self.topics[self.query_type])
qrels_qids = set(self.qrels)
for fold_name, fold_sets in self.folds.items():
# check if there are overlap between training, dev, and test set
train_qids, dev_qids, test_qids = (
set(fold_sets["train_qids"]),
set(fold_sets["predict"]["dev"]),
set(fold_sets["predict"]["test"]),
)
if len(train_qids & dev_qids) > 0:
logger.warning(
f"Found {len(train_qids & dev_qids)} overlap queries between training and dev set in fold {fold_name}."
)
if len(train_qids & test_qids) > 0:
logger.warning(
f"Found {len(train_qids & dev_qids)} overlap queries between training and dev set in fold {fold_name}."
)
if len(dev_qids & test_qids) > 0:
logger.warning(
f"Found {len(train_qids & dev_qids)} overlap queries between training and dev set in fold {fold_name}."
)
# check if the topics, qrels, and folds file share a reasonable set (if not all) of queries
folds_qids = train_qids | dev_qids | test_qids
n_overlap = len(set(topic_qids) & set(qrels_qids) & set(folds_qids))
if not len(topic_qids) == len(qrels_qids) == len(folds_qids) == n_overlap:
logger.warning(
f"Number of queries are not aligned across topics, qrels and folds in fold {fold_name}: {len(topic_qids)} queries in topics file, {len(qrels_qids)} queries in qrels file, {len(folds_qids)} queries in folds file; {n_overlap} overlap queries found among the three."
)
# check if any topic in folds cannot be found in topics file
for set_name, set_qids in zip(["training", "dev", "test"], [train_qids, dev_qids, test_qids]):
if len(set_qids - topic_qids) > 0:
raise ValueError(
f"{len(set_qids - topic_qids)} queries in {set_name} set of fold {fold_name} cannot be found in topic file."
)
logger.info("Query Alignment validation finishes.")
def _validate(self):
"""Rewrite the files that contain invalid (duplicate) entries, and remove the currently loaded variables"""
build_f(self)
validate_folds_file(self)
validate_qrels_file(self)
validate_query_alignment(self)
return _validate
class Benchmark(ModuleBase):
"""Base class for Benchmark modules. The purpose of a Benchmark is to provide the data needed to run an experiment, such as queries, folds, and relevance judgments.
Modules should provide:
- a ``topics`` dict mapping query ids (*qids*) to *queries*
- a ``qrels`` dict mapping *qids* to *docids* and *relevance labels*
- a ``folds`` dict mapping a fold name to *training*, *dev* (validation), and *testing* qids
- if these can be loaded from files in standard formats, they can be specified by setting the ``topic_file``, ``qrel_file``, and ``fold_file``, respectively, rather than by setting the above attributes directly
"""
module_type = "benchmark"
qrel_file = None
topic_file = None
fold_file = None
query_type = None
relevance_level = 1
""" Documents with a relevance label >= relevance_level will be considered relevant.
This corresponds to trec_eval's --level_for_rel (and is passed to pytrec_eval as relevance_level). """
use_train_as_dev = True
""" Whether to use training set as validate set when there is no training needed,
e.g. for traditional IR algorithms like BM25 """
@property
def qrels(self):
if not hasattr(self, "_qrels"):
self._qrels = load_qrels(self.qrel_file)
return self._qrels
@property
def topics(self):
if not hasattr(self, "_topics"):
self._topics = load_trec_topics(self.topic_file)
return self._topics
@property
def folds(self):
if not hasattr(self, "_folds"):
self._folds = json.load(open(self.fold_file, "rt"), parse_int=str)
return self._folds
@property
def non_nn_dev(self):
dev_per_fold = {fold_name: deepcopy(folds["predict"]["dev"]) for fold_name, folds in self.folds.items()}
if self.use_train_as_dev:
for fold_name, folds in self.folds.items():
dev_per_fold[fold_name].extend(folds["train_qids"])
return dev_per_fold
def get_topics_file(self, query_sets=None):
"""Returns path to a topics file in TSV format containing queries from query_sets.
query_sets may contain any combination of 'train', 'dev', and 'test'.
All are returned if query_sets is None."""
if query_sets:
query_sets = set(query_sets)
invalid = query_sets - {"train", "test", "dev"}
if invalid:
raise ValueError(f"query_sets contains invalid fold names: {invalid}")
query_sets = "_".join(sorted(query_sets))
valid_qids = set()
if "train" in query_sets:
valid_qids.update(self.folds["train_qids"])
if "dev" in query_sets:
valid_qids.update(self.folds["predict"]["dev"])
if "test" in query_sets:
valid_qids.update(self.folds["predict"]["test"])
else:
query_sets = "all"
valid_qids = None
fn = self.get_cache_path() / f"topics-{query_sets}.tsv"
try:
with cached_file(fn) as tmp_fn:
with open(tmp_fn, "wt") as outf:
for qid, query in self.topics[self.query_type].items():
if query_sets == "all" or qid in valid_qids:
print(f"{qid}\t{query}", file=outf)
except TargetFileExists as e:
pass
return fn
@validate
def build(self):
return
class IRDBenchmark(Benchmark):
ird_dataset_names = []
@property
def qrels(self):
if not hasattr(self, "_qrels"):
self._qrels = self.ird_load_qrels()
return self._qrels
@property
def topics(self):
if not hasattr(self, "_topics"):
self._topics = self.ird_load_topics()
return self._topics
def ird_load_qrels(self):
qrels = {}
for name in self.ird_dataset_names:
dataset = ir_datasets.load(name)
for qrel in dataset.qrels_iter():
qrels.setdefault(qrel.query_id, {})
qrels[qrel.query_id][qrel.doc_id] = max(qrel.relevance, qrels[qrel.query_id].get(qrel.doc_id, -1))
return qrels
def ird_load_topics(self):
topics = {}
field = "description" if self.query_type == "desc" else self.query_type
for name in self.ird_dataset_names:
dataset = ir_datasets.load(name)
for query in dataset.queries_iter():
topics[query.query_id] = getattr(query, field).replace("\n", " ")
return {self.query_type: topics}
from profane import import_all_modules
from .dummy import DummyBenchmark
import_all_modules(__file__, __package__)
|
[
"capreolus.utils.trec.write_qrels",
"copy.deepcopy",
"capreolus.utils.loginit.get_logger",
"os.rename",
"collections.defaultdict",
"ir_datasets.load",
"os.path.splitext",
"capreolus.utils.trec.load_trec_topics",
"capreolus.utils.caching.cached_file",
"capreolus.utils.trec.load_qrels",
"profane.import_all_modules"
] |
[((337, 357), 'capreolus.utils.loginit.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (347, 357), False, 'from capreolus.utils.loginit import get_logger\n'), ((9714, 9755), 'profane.import_all_modules', 'import_all_modules', (['__file__', '__package__'], {}), '(__file__, __package__)\n', (9732, 9755), False, 'from profane import import_all_modules\n'), ((1452, 1469), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1463, 1469), False, 'from collections import defaultdict\n'), ((1957, 1989), 'os.path.splitext', 'os.path.splitext', (['self.qrel_file'], {}), '(self.qrel_file)\n', (1973, 1989), False, 'import os\n'), ((2078, 2118), 'os.rename', 'os.rename', (['self.qrel_file', 'dup_qrel_file'], {}), '(self.qrel_file, dup_qrel_file)\n', (2087, 2118), False, 'import os\n'), ((2131, 2165), 'capreolus.utils.trec.write_qrels', 'write_qrels', (['qrels', 'self.qrel_file'], {}), '(qrels, self.qrel_file)\n', (2142, 2165), False, 'from capreolus.utils.trec import write_qrels, load_qrels, load_trec_topics\n'), ((6286, 6312), 'capreolus.utils.trec.load_qrels', 'load_qrels', (['self.qrel_file'], {}), '(self.qrel_file)\n', (6296, 6312), False, 'from capreolus.utils.trec import write_qrels, load_qrels, load_trec_topics\n'), ((6445, 6478), 'capreolus.utils.trec.load_trec_topics', 'load_trec_topics', (['self.topic_file'], {}), '(self.topic_file)\n', (6461, 6478), False, 'from capreolus.utils.trec import write_qrels, load_qrels, load_trec_topics\n'), ((6765, 6798), 'copy.deepcopy', 'deepcopy', (["folds['predict']['dev']"], {}), "(folds['predict']['dev'])\n", (6773, 6798), False, 'from copy import deepcopy\n'), ((8984, 9006), 'ir_datasets.load', 'ir_datasets.load', (['name'], {}), '(name)\n', (9000, 9006), False, 'import ir_datasets\n'), ((9441, 9463), 'ir_datasets.load', 'ir_datasets.load', (['name'], {}), '(name)\n', (9457, 9463), False, 'import ir_datasets\n'), ((8091, 8106), 'capreolus.utils.caching.cached_file', 'cached_file', (['fn'], {}), '(fn)\n', (8102, 8106), False, 'from capreolus.utils.caching import cached_file, TargetFileExists\n')]
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
论文中指出了,先使用CA,后使用SA
定义了:
channel attention output.shape: [b, 1, 1, filters]
spatial attention output.shape: [b, h, w, 1]
"""
def regularized_padded_conv(*args, **kwargs):
""" 定义一个3x3卷积!kernel_initializer='he_normal','glorot_normal'"""
return layers.Conv2D(
*args, **kwargs,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4)
)
def channel_attention_dense(inputs, filters, ratio=16):
avg_out = layers.GlobalAveragePooling2D()(inputs)
max_out = layers.GlobalMaxPool2D()(inputs)
out = tf.stack([avg_out, max_out], axis=1)
out = layers.Dense(filters // ratio,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True,
bias_initializer='zeros',
activation='relu'
)(out)
out = layers.Dense(filters,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True,
bias_initializer='zeros'
)(out)
out = tf.reduce_sum(out, axis=1)
out = layers.Activation('sigmoid')(out)
out = layers.Reshape((1, 1, out.shape[1]))(out)
return out
def channel_attention_conv(inputs, filters, ratio=16):
"""将全连接层替换为卷积层: channel attention 输出: [B, 1, 1, filters]"""
avg_out = layers.GlobalAveragePooling2D()(inputs)
max_out = layers.GlobalMaxPool2D()(inputs)
avg_out = layers.Reshape((1, 1, avg_out.shape[1]))(avg_out)
max_out = layers.Reshape((1, 1, max_out.shape[1]))(max_out)
out = layers.Concatenate(axis=3)([avg_out, max_out]) # [batch_size, 1, 1, dims+dims]
pool_out = [avg_out, max_out]
conv_out = []
for i in range(2):
out = layers.Conv2D(filters // ratio,
kernel_size=1, strides=1,
padding='same',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu
)(pool_out[i])
out = layers.Conv2D(filters,
kernel_size=1, strides=1, padding='same',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True
)(out)
conv_out.append(out)
conv_out = conv_out[0] + conv_out[1]
out = layers.Reshape((1, 1, filters))(out)
out = layers.Activation('sigmoid')(out)
return out
class ChannelAttentionConv(layers.Layer):
def __init__(self, out_filters, ratio=16):
super(ChannelAttentionConv, self).__init__()
self.avg = layers.GlobalAveragePooling2D()
self.max = layers.GlobalMaxPooling2D()
self.conv1 = layers.Conv2D(
out_filters // ratio, kernel_size=1, strides=1, padding='same',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu)
self.conv2 = layers.Conv2D(
out_filters, kernel_size=1, strides=1, padding='same',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True)
def build(self, input_shape):
filter_size = input_shape[1]
input_filters = input_shape[-1]
self.conv_filter_size = layers.Conv2D(
input_filters, kernel_size=filter_size, strides=1, padding='valid',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True)
return
def call(self, inputs):
avg = self.avg(inputs)
max = self.max(inputs)
avg = layers.Reshape((1, 1, avg.shape[1]))(avg) # shape (None, 1, 1 feature)
max = layers.Reshape((1, 1, max.shape[1]))(max) # shape (None, 1, 1 feature)
avg_out = self.conv2(self.conv1(avg))
max_out = self.conv2(self.conv1(max))
out = avg_out + max_out
out = tf.nn.sigmoid(out)
return out
class ChannelAttentionDense(layers.Layer):
"""channel attention 自定义类"""
def __init__(self, in_planes, ratio=16):
super(ChannelAttentionDense, self).__init__()
self.avg = layers.GlobalAveragePooling2D()
self.max = layers.GlobalMaxPooling2D()
self.fc1 = layers.Dense(in_planes // ratio,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True,
bias_initializer='zeros',
activation='relu')
self.fc2 = layers.Dense(in_planes,
kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4),
use_bias=True,
bias_initializer='zeros')
def build(self, input_shape):
pass
def call(self, inputs):
avg_out = self.fc2(self.fc1(self.avg(inputs)))
max_out = self.fc2(self.fc1(self.max(inputs)))
out = avg_out + max_out
out = tf.nn.sigmoid(out)
out = layers.Reshape((1, 1, out.shape[1]))(out)
return out
class SpatialAttention(layers.Layer):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = layers.Conv2D(
filters=1, kernel_size=kernel_size, strides=1, activation='sigmoid',
padding='same', use_bias=False, kernel_initializer='he_normal',
# kernel_regularizer=keras.regularizers.l2(5e-4)
)
def call(self, inputs):
avg_out = tf.reduce_mean(inputs, axis=3) # [b, h, w, 1]
max_out = tf.reduce_max(inputs, axis=3) # [b, h, w, 1]
out = tf.stack([avg_out, max_out], axis=-1) # 创建一个维度,拼接到一起concat。[b, h, w, 2]
out = self.conv1(out) # [b, h, w, 1]
return out
def test_model(input_shape):
inputs = layers.Input(input_shape)
out = SpatialAttention()(inputs)
return tf.keras.Model(inputs, out)
if __name__ == '__main__':
model_ = test_model((32, 32, 64))
model_.summary()
|
[
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.reduce_mean",
"tensorflow.stack",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.reduce_max",
"tensorflow.nn.sigmoid",
"tensorflow.keras.layers.GlobalMaxPool2D"
] |
[((390, 488), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['*args'], {'padding': '"""same"""', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), "(*args, **kwargs, padding='same', use_bias=False,\n kernel_initializer='he_normal')\n", (403, 488), False, 'from tensorflow.keras import layers\n'), ((762, 798), 'tensorflow.stack', 'tf.stack', (['[avg_out, max_out]'], {'axis': '(1)'}), '([avg_out, max_out], axis=1)\n', (770, 798), True, 'import tensorflow as tf\n'), ((1426, 1452), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (1439, 1452), True, 'import tensorflow as tf\n'), ((6406, 6431), 'tensorflow.keras.layers.Input', 'layers.Input', (['input_shape'], {}), '(input_shape)\n', (6418, 6431), False, 'from tensorflow.keras import layers\n'), ((6482, 6509), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'out'], {}), '(inputs, out)\n', (6496, 6509), True, 'import tensorflow as tf\n'), ((663, 694), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (692, 694), False, 'from tensorflow.keras import layers\n'), ((718, 742), 'tensorflow.keras.layers.GlobalMaxPool2D', 'layers.GlobalMaxPool2D', ([], {}), '()\n', (740, 742), False, 'from tensorflow.keras import layers\n'), ((810, 937), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(filters // ratio)'], {'kernel_initializer': '"""he_normal"""', 'use_bias': '(True)', 'bias_initializer': '"""zeros"""', 'activation': '"""relu"""'}), "(filters // ratio, kernel_initializer='he_normal', use_bias=\n True, bias_initializer='zeros', activation='relu')\n", (822, 937), False, 'from tensorflow.keras import layers\n'), ((1144, 1242), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['filters'], {'kernel_initializer': '"""he_normal"""', 'use_bias': '(True)', 'bias_initializer': '"""zeros"""'}), "(filters, kernel_initializer='he_normal', use_bias=True,\n bias_initializer='zeros')\n", (1156, 1242), False, 'from tensorflow.keras import layers\n'), ((1464, 1492), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (1481, 1492), False, 'from tensorflow.keras import layers\n'), ((1509, 1545), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, 1, out.shape[1])'], {}), '((1, 1, out.shape[1]))\n', (1523, 1545), False, 'from tensorflow.keras import layers\n'), ((1707, 1738), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (1736, 1738), False, 'from tensorflow.keras import layers\n'), ((1762, 1786), 'tensorflow.keras.layers.GlobalMaxPool2D', 'layers.GlobalMaxPool2D', ([], {}), '()\n', (1784, 1786), False, 'from tensorflow.keras import layers\n'), ((1810, 1850), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, 1, avg_out.shape[1])'], {}), '((1, 1, avg_out.shape[1]))\n', (1824, 1850), False, 'from tensorflow.keras import layers\n'), ((1875, 1915), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, 1, max_out.shape[1])'], {}), '((1, 1, max_out.shape[1]))\n', (1889, 1915), False, 'from tensorflow.keras import layers\n'), ((1936, 1962), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (1954, 1962), False, 'from tensorflow.keras import layers\n'), ((2782, 2813), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, 1, filters)'], {}), '((1, 1, filters))\n', (2796, 2813), False, 'from tensorflow.keras import layers\n'), ((2830, 2858), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (2847, 2858), False, 'from tensorflow.keras import layers\n'), ((3049, 3080), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (3078, 3080), False, 'from tensorflow.keras import layers\n'), ((3101, 3128), 'tensorflow.keras.layers.GlobalMaxPooling2D', 'layers.GlobalMaxPooling2D', ([], {}), '()\n', (3126, 3128), False, 'from tensorflow.keras import layers\n'), ((3151, 3271), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(out_filters // ratio)'], {'kernel_size': '(1)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'activation': 'tf.nn.relu'}), "(out_filters // ratio, kernel_size=1, strides=1, padding=\n 'same', use_bias=True, activation=tf.nn.relu)\n", (3164, 3271), False, 'from tensorflow.keras import layers\n'), ((3379, 3466), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['out_filters'], {'kernel_size': '(1)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)'}), "(out_filters, kernel_size=1, strides=1, padding='same',\n use_bias=True)\n", (3392, 3466), False, 'from tensorflow.keras import layers\n'), ((3702, 3803), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['input_filters'], {'kernel_size': 'filter_size', 'strides': '(1)', 'padding': '"""valid"""', 'use_bias': '(True)'}), "(input_filters, kernel_size=filter_size, strides=1, padding=\n 'valid', use_bias=True)\n", (3715, 3803), False, 'from tensorflow.keras import layers\n'), ((4316, 4334), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['out'], {}), '(out)\n', (4329, 4334), True, 'import tensorflow as tf\n'), ((4562, 4593), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (4591, 4593), False, 'from tensorflow.keras import layers\n'), ((4614, 4641), 'tensorflow.keras.layers.GlobalMaxPooling2D', 'layers.GlobalMaxPooling2D', ([], {}), '()\n', (4639, 4641), False, 'from tensorflow.keras import layers\n'), ((4662, 4791), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(in_planes // ratio)'], {'kernel_initializer': '"""he_normal"""', 'use_bias': '(True)', 'bias_initializer': '"""zeros"""', 'activation': '"""relu"""'}), "(in_planes // ratio, kernel_initializer='he_normal', use_bias=\n True, bias_initializer='zeros', activation='relu')\n", (4674, 4791), False, 'from tensorflow.keras import layers\n'), ((5022, 5122), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['in_planes'], {'kernel_initializer': '"""he_normal"""', 'use_bias': '(True)', 'bias_initializer': '"""zeros"""'}), "(in_planes, kernel_initializer='he_normal', use_bias=True,\n bias_initializer='zeros')\n", (5034, 5122), False, 'from tensorflow.keras import layers\n'), ((5543, 5561), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['out'], {}), '(out)\n', (5556, 5561), True, 'import tensorflow as tf\n'), ((5794, 5945), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(1)', 'kernel_size': 'kernel_size', 'strides': '(1)', 'activation': '"""sigmoid"""', 'padding': '"""same"""', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), "(filters=1, kernel_size=kernel_size, strides=1, activation=\n 'sigmoid', padding='same', use_bias=False, kernel_initializer='he_normal')\n", (5807, 5945), False, 'from tensorflow.keras import layers\n'), ((6091, 6121), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['inputs'], {'axis': '(3)'}), '(inputs, axis=3)\n', (6105, 6121), True, 'import tensorflow as tf\n'), ((6157, 6186), 'tensorflow.reduce_max', 'tf.reduce_max', (['inputs'], {'axis': '(3)'}), '(inputs, axis=3)\n', (6170, 6186), True, 'import tensorflow as tf\n'), ((6218, 6255), 'tensorflow.stack', 'tf.stack', (['[avg_out, max_out]'], {'axis': '(-1)'}), '([avg_out, max_out], axis=-1)\n', (6226, 6255), True, 'import tensorflow as tf\n'), ((2111, 2226), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(filters // ratio)'], {'kernel_size': '(1)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)', 'activation': 'tf.nn.relu'}), "(filters // ratio, kernel_size=1, strides=1, padding='same',\n use_bias=True, activation=tf.nn.relu)\n", (2124, 2226), False, 'from tensorflow.keras import layers\n'), ((2447, 2526), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['filters'], {'kernel_size': '(1)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(True)'}), "(filters, kernel_size=1, strides=1, padding='same', use_bias=True)\n", (2460, 2526), False, 'from tensorflow.keras import layers\n'), ((4015, 4051), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, 1, avg.shape[1])'], {}), '((1, 1, avg.shape[1]))\n', (4029, 4051), False, 'from tensorflow.keras import layers\n'), ((4102, 4138), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, 1, max.shape[1])'], {}), '((1, 1, max.shape[1]))\n', (4116, 4138), False, 'from tensorflow.keras import layers\n'), ((5577, 5613), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, 1, out.shape[1])'], {}), '((1, 1, out.shape[1]))\n', (5591, 5613), False, 'from tensorflow.keras import layers\n')]
|
#! /usr/bin/env python
from __future__ import print_function
import rospy
import actionlib
import time
from std_msgs.msg import Float32
from selfie_msgs.msg import PolygonArray
import selfie_msgs.msg
def intersection_client():
client = actionlib.SimpleActionClient('intersection', selfie_msgs.msg.intersectionAction)
client.wait_for_server()
goal = selfie_msgs.msg.intersectionGoal()
print("Sending goal")
client.send_goal(goal)
distance_pub=rospy.Publisher('/intersection_distance', Float32, queue_size=10)
distance=Float32(data=5)
time.sleep(0.5)
print("Sending mock (far) distance to intersection.")
distance_pub.publish(distance)
polygons = PolygonArray()
pub = rospy.Publisher('/obstacles', PolygonArray, queue_size=10)
time.sleep(0.5)
print("."),
pub.publish(polygons)
time.sleep(0.8)
print("."),
pub.publish(polygons)
distance.data=0.05
distance_pub.publish(distance)
time.sleep(0.8)
print("."),
pub.publish(polygons)
time.sleep(1)
print("."),
pub.publish(polygons)
time.sleep(1)
print("."),
pub.publish(polygons)
time.sleep(1)
print("."),
pub.publish(polygons)
print('mock obstacles sent')
client.wait_for_result()
print("Result achieved")
return client.get_result()
if __name__ == '__main__':
try:
rospy.init_node('intersection_mock_client_py')
result = intersection_client()
except rospy.ROSInterruptException:
print("program interrupted before completion", file=sys.stderr)
|
[
"actionlib.SimpleActionClient",
"rospy.Publisher",
"time.sleep",
"rospy.init_node",
"selfie_msgs.msg.PolygonArray",
"std_msgs.msg.Float32"
] |
[((244, 329), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['"""intersection"""', 'selfie_msgs.msg.intersectionAction'], {}), "('intersection', selfie_msgs.msg.intersectionAction\n )\n", (272, 329), False, 'import actionlib\n'), ((470, 535), 'rospy.Publisher', 'rospy.Publisher', (['"""/intersection_distance"""', 'Float32'], {'queue_size': '(10)'}), "('/intersection_distance', Float32, queue_size=10)\n", (485, 535), False, 'import rospy\n'), ((549, 564), 'std_msgs.msg.Float32', 'Float32', ([], {'data': '(5)'}), '(data=5)\n', (556, 564), False, 'from std_msgs.msg import Float32\n'), ((569, 584), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (579, 584), False, 'import time\n'), ((693, 707), 'selfie_msgs.msg.PolygonArray', 'PolygonArray', ([], {}), '()\n', (705, 707), False, 'from selfie_msgs.msg import PolygonArray\n'), ((718, 776), 'rospy.Publisher', 'rospy.Publisher', (['"""/obstacles"""', 'PolygonArray'], {'queue_size': '(10)'}), "('/obstacles', PolygonArray, queue_size=10)\n", (733, 776), False, 'import rospy\n'), ((781, 796), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (791, 796), False, 'import time\n'), ((843, 858), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (853, 858), False, 'import time\n'), ((963, 978), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (973, 978), False, 'import time\n'), ((1025, 1038), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1035, 1038), False, 'import time\n'), ((1085, 1098), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1095, 1098), False, 'import time\n'), ((1145, 1158), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1155, 1158), False, 'import time\n'), ((1369, 1415), 'rospy.init_node', 'rospy.init_node', (['"""intersection_mock_client_py"""'], {}), "('intersection_mock_client_py')\n", (1384, 1415), False, 'import rospy\n')]
|
from itertools import groupby
import numpy as np
def best_path(mat: np.ndarray, labels: str) -> str:
"""Best path (greedy) decoder.
Take best-scoring character per time-step, then remove repeated characters and CTC blank characters.
See dissertation of Graves, p63.
Args:
mat: Output of neural network of shape TxC.
labels: The set of characters the neural network can recognize, excluding the CTC-blank.
Returns:
The decoded text.
"""
# get char indices along best path
best_path_indices = np.argmax(mat, axis=1)
# collapse best path (using itertools.groupby), map to chars, join char list to string
blank_idx = len(labels)
best_chars_collapsed = [labels[k] for k, _ in groupby(best_path_indices) if k != blank_idx]
res = ''.join(best_chars_collapsed)
return res
|
[
"itertools.groupby",
"numpy.argmax"
] |
[((554, 576), 'numpy.argmax', 'np.argmax', (['mat'], {'axis': '(1)'}), '(mat, axis=1)\n', (563, 576), True, 'import numpy as np\n'), ((747, 773), 'itertools.groupby', 'groupby', (['best_path_indices'], {}), '(best_path_indices)\n', (754, 773), False, 'from itertools import groupby\n')]
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.md') as f:
readme = f.read()
setup(
name="event-reminder",
version="1.0.0",
description="Show messages at a specific date with crontab-like scheduling expressions.",
author="ukitinu",
author_email="<EMAIL>",
url="https://github.com/ukitinu/event-reminder",
packages=['eventreminder', 'eventreminder.tests'],
license="MIT",
long_description=readme,
long_description_content_type='text/markdown',
keywords='crontab birthday',
include_package_data=True,
)
|
[
"distutils.core.setup"
] |
[((148, 596), 'distutils.core.setup', 'setup', ([], {'name': '"""event-reminder"""', 'version': '"""1.0.0"""', 'description': '"""Show messages at a specific date with crontab-like scheduling expressions."""', 'author': '"""ukitinu"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/ukitinu/event-reminder"""', 'packages': "['eventreminder', 'eventreminder.tests']", 'license': '"""MIT"""', 'long_description': 'readme', 'long_description_content_type': '"""text/markdown"""', 'keywords': '"""crontab birthday"""', 'include_package_data': '(True)'}), "(name='event-reminder', version='1.0.0', description=\n 'Show messages at a specific date with crontab-like scheduling expressions.'\n , author='ukitinu', author_email='<EMAIL>', url=\n 'https://github.com/ukitinu/event-reminder', packages=['eventreminder',\n 'eventreminder.tests'], license='MIT', long_description=readme,\n long_description_content_type='text/markdown', keywords=\n 'crontab birthday', include_package_data=True)\n", (153, 596), False, 'from distutils.core import setup\n')]
|
import smtplib
import json
import keyring
from datetime import date
from email.message import EmailMessage
def send_emails(posts):
# get login and service from cfg
# then get pass from keyring
with open('config.json', 'r') as f:
config = json.load(f)
service = config["MAIL"]["service"]
login = config["MAIL"]["login"]
password = keyring.get_password(service, login)
# format mail body
mail_body = ""
curr_date = date.today()
for i in posts:
mail_body += i['date'] + "\n" + i['thread'] + "\n" + i['link'] + "\n\n"
# init EmailMessage
msg = EmailMessage()
msg.set_content(
f"There are {len(posts)} threads with new posts! \n They are as follows:\n {mail_body}"
)
msg['From'] = login
msg['To'] = config['MAIL']['recipients']
msg['Subject'] = f'Scraper\'s new mail - {curr_date}'
try:
smtp_server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_server.ehlo()
smtp_server.login(login, password)
smtp_server.send_message(msg)
smtp_server.close()
except Exception as e:
print(e)
|
[
"json.load",
"smtplib.SMTP_SSL",
"email.message.EmailMessage",
"datetime.date.today",
"keyring.get_password"
] |
[((365, 401), 'keyring.get_password', 'keyring.get_password', (['service', 'login'], {}), '(service, login)\n', (385, 401), False, 'import keyring\n'), ((461, 473), 'datetime.date.today', 'date.today', ([], {}), '()\n', (471, 473), False, 'from datetime import date\n'), ((609, 623), 'email.message.EmailMessage', 'EmailMessage', ([], {}), '()\n', (621, 623), False, 'from email.message import EmailMessage\n'), ((260, 272), 'json.load', 'json.load', (['f'], {}), '(f)\n', (269, 272), False, 'import json\n'), ((912, 951), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['"""smtp.gmail.com"""', '(465)'], {}), "('smtp.gmail.com', 465)\n", (928, 951), False, 'import smtplib\n')]
|
import argparse
import sys
from pygments import highlight
from pygments.formatters import Terminal256Formatter
from fluent.pygments.lexer import FluentLexer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path')
args = parser.parse_args()
with open(args.path) as fh:
code = fh.read()
highlight(code, FluentLexer(), Terminal256Formatter(), sys.stdout)
if __name__ == '__main__':
main()
|
[
"fluent.pygments.lexer.FluentLexer",
"argparse.ArgumentParser",
"pygments.formatters.Terminal256Formatter"
] |
[((185, 210), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (208, 210), False, 'import argparse\n'), ((351, 364), 'fluent.pygments.lexer.FluentLexer', 'FluentLexer', ([], {}), '()\n', (362, 364), False, 'from fluent.pygments.lexer import FluentLexer\n'), ((366, 388), 'pygments.formatters.Terminal256Formatter', 'Terminal256Formatter', ([], {}), '()\n', (386, 388), False, 'from pygments.formatters import Terminal256Formatter\n')]
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: dota_match_metadata.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import List
import betterproto
from .base_gcmessages import CsoEconItem
from .dota_gcmessages_common import CMsgDotaMatch, CMsgMatchTips
from .dota_gcmessages_common_match_management import CLobbyTimedRewardDetails, CMsgMatchMatchmakingStats, CMvpData
from .dota_shared_enums import EdotammrBoostType, EEvent
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataFile(betterproto.Message):
version: int = betterproto.int32_field(1)
match_id: int = betterproto.uint64_field(2)
metadata: "CdotaMatchMetadata" = betterproto.message_field(3)
private_metadata: bytes = betterproto.bytes_field(5)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadata(betterproto.Message):
teams: List["CdotaMatchMetadataTeam"] = betterproto.message_field(1)
item_rewards: List["CLobbyTimedRewardDetails"] = betterproto.message_field(2)
lobby_id: int = betterproto.fixed64_field(3)
report_until_time: int = betterproto.fixed64_field(4)
event_game_custom_table: bytes = betterproto.bytes_field(5)
primary_event_id: int = betterproto.uint32_field(6)
match_tips: List["CMsgMatchTips"] = betterproto.message_field(7)
matchmaking_stats: "CMsgMatchMatchmakingStats" = betterproto.message_field(8)
mvp_data: "CMvpData" = betterproto.message_field(9)
guild_challenge_progress: List["CdotaMatchMetadataGuildChallengeProgress"] = betterproto.message_field(10)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeam(betterproto.Message):
dota_team: int = betterproto.uint32_field(1)
players: List["CdotaMatchMetadataTeamPlayer"] = betterproto.message_field(2)
graph_experience: List[float] = betterproto.float_field(3)
graph_gold_earned: List[float] = betterproto.float_field(4)
graph_net_worth: List[float] = betterproto.float_field(5)
cm_first_pick: bool = betterproto.bool_field(6)
cm_captain_player_id: int = betterproto.uint32_field(7)
cm_bans: List[int] = betterproto.uint32_field(8)
cm_picks: List[int] = betterproto.uint32_field(9)
cm_penalty: int = betterproto.uint32_field(10)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamPlayerKill(betterproto.Message):
victim_slot: int = betterproto.uint32_field(1)
count: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamItemPurchase(betterproto.Message):
item_id: int = betterproto.uint32_field(1)
purchase_time: int = betterproto.int32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamInventorySnapshot(betterproto.Message):
item_id: List[int] = betterproto.uint32_field(1)
game_time: int = betterproto.int32_field(2)
kills: int = betterproto.uint32_field(3)
deaths: int = betterproto.uint32_field(4)
assists: int = betterproto.uint32_field(5)
level: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamAutoStyleCriteria(betterproto.Message):
name_token: int = betterproto.uint32_field(1)
value: float = betterproto.float_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamStrangeGemProgress(betterproto.Message):
kill_eater_type: int = betterproto.uint32_field(1)
gem_item_def_index: int = betterproto.uint32_field(2)
required_hero_id: int = betterproto.uint32_field(3)
starting_value: int = betterproto.uint32_field(4)
ending_value: int = betterproto.uint32_field(5)
owner_item_def_index: int = betterproto.uint32_field(6)
owner_item_id: int = betterproto.uint64_field(7)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamVictoryPrediction(betterproto.Message):
item_id: int = betterproto.uint64_field(1)
item_def_index: int = betterproto.uint32_field(2)
starting_value: int = betterproto.uint32_field(3)
is_victory: bool = betterproto.bool_field(4)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamSubChallenge(betterproto.Message):
slot_id: int = betterproto.uint32_field(1)
start_value: int = betterproto.uint32_field(2)
end_value: int = betterproto.uint32_field(3)
completed: bool = betterproto.bool_field(4)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamCavernChallengeResult(betterproto.Message):
completed_path_id: int = betterproto.uint32_field(1)
claimed_room_id: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamActionGrant(betterproto.Message):
action_id: int = betterproto.uint32_field(1)
quantity: int = betterproto.uint32_field(2)
audit: int = betterproto.uint32_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamCandyGrant(betterproto.Message):
points: int = betterproto.uint32_field(1)
reason: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamEventData(betterproto.Message):
event_id: int = betterproto.uint32_field(1)
event_points: int = betterproto.uint32_field(2)
challenge_instance_id: int = betterproto.uint32_field(3)
challenge_quest_id: int = betterproto.uint32_field(4)
challenge_quest_challenge_id: int = betterproto.uint32_field(5)
challenge_completed: bool = betterproto.bool_field(6)
challenge_rank_completed: int = betterproto.uint32_field(7)
challenge_rank_previously_completed: int = betterproto.uint32_field(8)
event_owned: bool = betterproto.bool_field(9)
sub_challenges_with_progress: List["CdotaMatchMetadataTeamSubChallenge"] = betterproto.message_field(10)
wager_winnings: int = betterproto.uint32_field(11)
cavern_challenge_active: bool = betterproto.bool_field(12)
cavern_challenge_winnings: int = betterproto.uint32_field(13)
amount_wagered: int = betterproto.uint32_field(14)
periodic_point_adjustments: int = betterproto.uint32_field(16)
cavern_challenge_map_results: List["CdotaMatchMetadataTeamCavernChallengeResult"] = betterproto.message_field(17)
cavern_challenge_plus_shard_winnings: int = betterproto.uint32_field(18)
actions_granted: List["CdotaMatchMetadataTeamActionGrant"] = betterproto.message_field(19)
cavern_crawl_map_variant: int = betterproto.uint32_field(20)
team_wager_bonus_pct: int = betterproto.uint32_field(21)
wager_streak_pct: int = betterproto.uint32_field(22)
candy_points_granted: List["CdotaMatchMetadataTeamCandyGrant"] = betterproto.message_field(23)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamGauntletProgress(betterproto.Message):
gauntlet_tier: int = betterproto.uint32_field(2)
gauntlet_wins: int = betterproto.uint32_field(3)
gauntlet_losses: int = betterproto.uint32_field(4)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamPlayer(betterproto.Message):
account_id: int = betterproto.uint32_field(1)
ability_upgrades: List[int] = betterproto.uint32_field(2)
player_slot: int = betterproto.uint32_field(3)
equipped_econ_items: List["CsoEconItem"] = betterproto.message_field(4)
kills: List["CdotaMatchMetadataTeamPlayerKill"] = betterproto.message_field(5)
items: List["CdotaMatchMetadataTeamItemPurchase"] = betterproto.message_field(6)
avg_kills_x16: int = betterproto.uint32_field(7)
avg_deaths_x16: int = betterproto.uint32_field(8)
avg_assists_x16: int = betterproto.uint32_field(9)
avg_gpm_x16: int = betterproto.uint32_field(10)
avg_xpm_x16: int = betterproto.uint32_field(11)
best_kills_x16: int = betterproto.uint32_field(12)
best_assists_x16: int = betterproto.uint32_field(13)
best_gpm_x16: int = betterproto.uint32_field(14)
best_xpm_x16: int = betterproto.uint32_field(15)
win_streak: int = betterproto.uint32_field(16)
best_win_streak: int = betterproto.uint32_field(17)
fight_score: float = betterproto.float_field(18)
farm_score: float = betterproto.float_field(19)
support_score: float = betterproto.float_field(20)
push_score: float = betterproto.float_field(21)
level_up_times: List[int] = betterproto.uint32_field(22)
graph_net_worth: List[float] = betterproto.float_field(23)
inventory_snapshot: List["CdotaMatchMetadataTeamInventorySnapshot"] = betterproto.message_field(24)
avg_stats_calibrated: bool = betterproto.bool_field(25)
auto_style_criteria: List["CdotaMatchMetadataTeamAutoStyleCriteria"] = betterproto.message_field(26)
event_data: List["CdotaMatchMetadataTeamEventData"] = betterproto.message_field(29)
strange_gem_progress: List["CdotaMatchMetadataTeamStrangeGemProgress"] = betterproto.message_field(30)
hero_xp: int = betterproto.uint32_field(31)
camps_stacked: int = betterproto.uint32_field(32)
victory_prediction: List["CdotaMatchMetadataTeamVictoryPrediction"] = betterproto.message_field(33)
lane_selection_flags: int = betterproto.uint32_field(34)
rampages: int = betterproto.uint32_field(35)
triple_kills: int = betterproto.uint32_field(36)
aegis_snatched: int = betterproto.uint32_field(37)
rapiers_purchased: int = betterproto.uint32_field(38)
couriers_killed: int = betterproto.uint32_field(39)
net_worth_rank: int = betterproto.uint32_field(40)
support_gold_spent: int = betterproto.uint32_field(41)
observer_wards_placed: int = betterproto.uint32_field(42)
sentry_wards_placed: int = betterproto.uint32_field(43)
wards_dewarded: int = betterproto.uint32_field(44)
stun_duration: float = betterproto.float_field(45)
rank_mmr_boost_type: "EdotammrBoostType" = betterproto.enum_field(46)
gauntlet_progress: "CdotaMatchMetadataTeamGauntletProgress" = betterproto.message_field(47)
contract_progress: List["CdotaMatchMetadataTeamPlayerContractProgress"] = betterproto.message_field(48)
guild_ids: List[int] = betterproto.uint32_field(49)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataTeamPlayerContractProgress(betterproto.Message):
guild_id: int = betterproto.uint32_field(1)
event_id: int = betterproto.uint32_field(2)
challenge_instance_id: int = betterproto.uint32_field(3)
challenge_parameter: int = betterproto.uint32_field(4)
contract_stars: int = betterproto.uint32_field(5)
contract_slot: int = betterproto.uint32_field(6)
completed: bool = betterproto.bool_field(7)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataGuildChallengeProgress(betterproto.Message):
guild_id: int = betterproto.uint32_field(1)
event_id: "EEvent" = betterproto.enum_field(2)
challenge_instance_id: int = betterproto.uint32_field(3)
challenge_parameter: int = betterproto.uint32_field(4)
challenge_timestamp: int = betterproto.uint32_field(5)
challenge_progress_at_start: int = betterproto.uint32_field(6)
challenge_progress_accumulated: int = betterproto.uint32_field(7)
individual_progress: List["CdotaMatchMetadataGuildChallengeProgressIndividualProgress"] = betterproto.message_field(
8
)
@dataclass(eq=False, repr=False)
class CdotaMatchMetadataGuildChallengeProgressIndividualProgress(betterproto.Message):
account_id: int = betterproto.uint32_field(1)
progress: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadata(betterproto.Message):
teams: List["CdotaMatchPrivateMetadataTeam"] = betterproto.message_field(1)
graph_win_probability: List[float] = betterproto.float_field(2)
string_names: List["CdotaMatchPrivateMetadataStringName"] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataStringName(betterproto.Message):
id: int = betterproto.uint32_field(1)
name: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeam(betterproto.Message):
dota_team: int = betterproto.uint32_field(1)
players: List["CdotaMatchPrivateMetadataTeamPlayer"] = betterproto.message_field(2)
buildings: List["CdotaMatchPrivateMetadataTeamBuilding"] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayer(betterproto.Message):
account_id: int = betterproto.uint32_field(1)
player_slot: int = betterproto.uint32_field(2)
position_stream: bytes = betterproto.bytes_field(3)
combat_segments: List["CdotaMatchPrivateMetadataTeamPlayerCombatSegment"] = betterproto.message_field(4)
damage_unit_names: List[str] = betterproto.string_field(5)
buff_records: List["CdotaMatchPrivateMetadataTeamPlayerBuffRecord"] = betterproto.message_field(6)
graph_kills: List[float] = betterproto.float_field(7)
graph_deaths: List[float] = betterproto.float_field(8)
graph_assists: List[float] = betterproto.float_field(9)
graph_lasthits: List[float] = betterproto.float_field(10)
graph_denies: List[float] = betterproto.float_field(11)
gold_received: "CdotaMatchPrivateMetadataTeamPlayerGoldReceived" = betterproto.message_field(12)
xp_received: "CdotaMatchPrivateMetadataTeamPlayerXpReceived" = betterproto.message_field(13)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegment(betterproto.Message):
game_time: int = betterproto.int32_field(1)
damage_by_ability: List[
"CdotaMatchPrivateMetadataTeamPlayerCombatSegmentDamageByAbility"
] = betterproto.message_field(2)
healing_by_ability: List[
"CdotaMatchPrivateMetadataTeamPlayerCombatSegmentHealingByAbility"
] = betterproto.message_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegmentDamageByAbility(betterproto.Message):
source_unit_index: int = betterproto.uint32_field(3)
ability_id: int = betterproto.uint32_field(1)
by_hero_targets: List[
"CdotaMatchPrivateMetadataTeamPlayerCombatSegmentDamageByAbilityByHeroTarget"
] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegmentDamageByAbilityByHeroTarget(betterproto.Message):
hero_id: int = betterproto.uint32_field(1)
damage: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegmentHealingByAbility(betterproto.Message):
source_unit_index: int = betterproto.uint32_field(3)
ability_id: int = betterproto.uint32_field(1)
by_hero_targets: List[
"CdotaMatchPrivateMetadataTeamPlayerCombatSegmentHealingByAbilityByHeroTarget"
] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerCombatSegmentHealingByAbilityByHeroTarget(betterproto.Message):
hero_id: int = betterproto.uint32_field(1)
healing: int = betterproto.uint32_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerBuffRecord(betterproto.Message):
buff_ability_id: int = betterproto.uint32_field(1)
buff_modifier_name: str = betterproto.string_field(3)
by_hero_targets: List["CdotaMatchPrivateMetadataTeamPlayerBuffRecordByHeroTarget"] = betterproto.message_field(2)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerBuffRecordByHeroTarget(betterproto.Message):
hero_id: int = betterproto.uint32_field(1)
elapsed_duration: float = betterproto.float_field(2)
is_hidden: bool = betterproto.bool_field(3)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerGoldReceived(betterproto.Message):
creep: int = betterproto.uint32_field(1)
heroes: int = betterproto.uint32_field(2)
bounty_runes: int = betterproto.uint32_field(3)
passive: int = betterproto.uint32_field(4)
buildings: int = betterproto.uint32_field(5)
abilities: int = betterproto.uint32_field(6)
wards: int = betterproto.uint32_field(7)
other: int = betterproto.uint32_field(8)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamPlayerXpReceived(betterproto.Message):
creep: int = betterproto.uint32_field(1)
heroes: int = betterproto.uint32_field(2)
roshan: int = betterproto.uint32_field(3)
tome_of_knowledge: int = betterproto.uint32_field(4)
outpost: int = betterproto.uint32_field(5)
other: int = betterproto.uint32_field(6)
@dataclass(eq=False, repr=False)
class CdotaMatchPrivateMetadataTeamBuilding(betterproto.Message):
unit_name: str = betterproto.string_field(1)
position_quant_x: int = betterproto.uint32_field(2)
position_quant_y: int = betterproto.uint32_field(3)
death_time: float = betterproto.float_field(4)
@dataclass(eq=False, repr=False)
class CMsgDotadpcMatch(betterproto.Message):
match: "CMsgDotaMatch" = betterproto.message_field(1)
metadata: "CdotaMatchMetadata" = betterproto.message_field(2)
|
[
"betterproto.int32_field",
"betterproto.float_field",
"betterproto.bool_field",
"betterproto.string_field",
"betterproto.uint64_field",
"betterproto.message_field",
"betterproto.uint32_field",
"betterproto.fixed64_field",
"betterproto.bytes_field",
"dataclasses.dataclass",
"betterproto.enum_field"
] |
[((486, 517), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (495, 517), False, 'from dataclasses import dataclass\n'), ((789, 820), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (798, 820), False, 'from dataclasses import dataclass\n'), ((1571, 1602), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (1580, 1602), False, 'from dataclasses import dataclass\n'), ((2246, 2277), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (2255, 2277), False, 'from dataclasses import dataclass\n'), ((2438, 2469), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (2447, 2469), False, 'from dataclasses import dataclass\n'), ((2635, 2666), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (2644, 2666), False, 'from dataclasses import dataclass\n'), ((3022, 3053), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (3031, 3053), False, 'from dataclasses import dataclass\n'), ((3221, 3252), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (3230, 3252), False, 'from dataclasses import dataclass\n'), ((3713, 3744), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (3722, 3744), False, 'from dataclasses import dataclass\n'), ((4020, 4051), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (4029, 4051), False, 'from dataclasses import dataclass\n'), ((4313, 4344), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (4322, 4344), False, 'from dataclasses import dataclass\n'), ((4532, 4563), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (4541, 4563), False, 'from dataclasses import dataclass\n'), ((4771, 4802), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (4780, 4802), False, 'from dataclasses import dataclass\n'), ((4959, 4990), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (4968, 4990), False, 'from dataclasses import dataclass\n'), ((6575, 6606), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (6584, 6606), False, 'from dataclasses import dataclass\n'), ((6838, 6869), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (6847, 6869), False, 'from dataclasses import dataclass\n'), ((9946, 9977), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (9955, 9977), False, 'from dataclasses import dataclass\n'), ((10425, 10456), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (10434, 10456), False, 'from dataclasses import dataclass\n'), ((11081, 11112), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (11090, 11112), False, 'from dataclasses import dataclass\n'), ((11301, 11332), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (11310, 11332), False, 'from dataclasses import dataclass\n'), ((11631, 11662), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (11640, 11662), False, 'from dataclasses import dataclass\n'), ((11816, 11847), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (11825, 11847), False, 'from dataclasses import dataclass\n'), ((12138, 12169), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (12147, 12169), False, 'from dataclasses import dataclass\n'), ((13166, 13197), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (13175, 13197), False, 'from dataclasses import dataclass\n'), ((13608, 13639), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (13617, 13639), False, 'from dataclasses import dataclass\n'), ((13992, 14023), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (14001, 14023), False, 'from dataclasses import dataclass\n'), ((14224, 14255), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (14233, 14255), False, 'from dataclasses import dataclass\n'), ((14610, 14641), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (14619, 14641), False, 'from dataclasses import dataclass\n'), ((14844, 14875), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (14853, 14875), False, 'from dataclasses import dataclass\n'), ((15184, 15215), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (15193, 15215), False, 'from dataclasses import dataclass\n'), ((15457, 15488), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (15466, 15488), False, 'from dataclasses import dataclass\n'), ((15946, 15977), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (15955, 15977), False, 'from dataclasses import dataclass\n'), ((16341, 16372), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (16350, 16372), False, 'from dataclasses import dataclass\n'), ((16654, 16685), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)', 'repr': '(False)'}), '(eq=False, repr=False)\n', (16663, 16685), False, 'from dataclasses import dataclass\n'), ((588, 614), 'betterproto.int32_field', 'betterproto.int32_field', (['(1)'], {}), '(1)\n', (611, 614), False, 'import betterproto\n'), ((635, 662), 'betterproto.uint64_field', 'betterproto.uint64_field', (['(2)'], {}), '(2)\n', (659, 662), False, 'import betterproto\n'), ((700, 728), 'betterproto.message_field', 'betterproto.message_field', (['(3)'], {}), '(3)\n', (725, 728), False, 'import betterproto\n'), ((759, 785), 'betterproto.bytes_field', 'betterproto.bytes_field', (['(5)'], {}), '(5)\n', (782, 785), False, 'import betterproto\n'), ((912, 940), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (937, 940), False, 'import betterproto\n'), ((994, 1022), 'betterproto.message_field', 'betterproto.message_field', (['(2)'], {}), '(2)\n', (1019, 1022), False, 'import betterproto\n'), ((1043, 1071), 'betterproto.fixed64_field', 'betterproto.fixed64_field', (['(3)'], {}), '(3)\n', (1068, 1071), False, 'import betterproto\n'), ((1101, 1129), 'betterproto.fixed64_field', 'betterproto.fixed64_field', (['(4)'], {}), '(4)\n', (1126, 1129), False, 'import betterproto\n'), ((1167, 1193), 'betterproto.bytes_field', 'betterproto.bytes_field', (['(5)'], {}), '(5)\n', (1190, 1193), False, 'import betterproto\n'), ((1222, 1249), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(6)'], {}), '(6)\n', (1246, 1249), False, 'import betterproto\n'), ((1290, 1318), 'betterproto.message_field', 'betterproto.message_field', (['(7)'], {}), '(7)\n', (1315, 1318), False, 'import betterproto\n'), ((1372, 1400), 'betterproto.message_field', 'betterproto.message_field', (['(8)'], {}), '(8)\n', (1397, 1400), False, 'import betterproto\n'), ((1428, 1456), 'betterproto.message_field', 'betterproto.message_field', (['(9)'], {}), '(9)\n', (1453, 1456), False, 'import betterproto\n'), ((1538, 1567), 'betterproto.message_field', 'betterproto.message_field', (['(10)'], {}), '(10)\n', (1563, 1567), False, 'import betterproto\n'), ((1675, 1702), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (1699, 1702), False, 'import betterproto\n'), ((1755, 1783), 'betterproto.message_field', 'betterproto.message_field', (['(2)'], {}), '(2)\n', (1780, 1783), False, 'import betterproto\n'), ((1820, 1846), 'betterproto.float_field', 'betterproto.float_field', (['(3)'], {}), '(3)\n', (1843, 1846), False, 'import betterproto\n'), ((1884, 1910), 'betterproto.float_field', 'betterproto.float_field', (['(4)'], {}), '(4)\n', (1907, 1910), False, 'import betterproto\n'), ((1946, 1972), 'betterproto.float_field', 'betterproto.float_field', (['(5)'], {}), '(5)\n', (1969, 1972), False, 'import betterproto\n'), ((1999, 2024), 'betterproto.bool_field', 'betterproto.bool_field', (['(6)'], {}), '(6)\n', (2021, 2024), False, 'import betterproto\n'), ((2057, 2084), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(7)'], {}), '(7)\n', (2081, 2084), False, 'import betterproto\n'), ((2110, 2137), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(8)'], {}), '(8)\n', (2134, 2137), False, 'import betterproto\n'), ((2164, 2191), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(9)'], {}), '(9)\n', (2188, 2191), False, 'import betterproto\n'), ((2214, 2242), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(10)'], {}), '(10)\n', (2238, 2242), False, 'import betterproto\n'), ((2362, 2389), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (2386, 2389), False, 'import betterproto\n'), ((2407, 2434), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (2431, 2434), False, 'import betterproto\n'), ((2552, 2579), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (2576, 2579), False, 'import betterproto\n'), ((2605, 2631), 'betterproto.int32_field', 'betterproto.int32_field', (['(2)'], {}), '(2)\n', (2628, 2631), False, 'import betterproto\n'), ((2760, 2787), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (2784, 2787), False, 'import betterproto\n'), ((2809, 2835), 'betterproto.int32_field', 'betterproto.int32_field', (['(2)'], {}), '(2)\n', (2832, 2835), False, 'import betterproto\n'), ((2853, 2880), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (2877, 2880), False, 'import betterproto\n'), ((2899, 2926), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(4)'], {}), '(4)\n', (2923, 2926), False, 'import betterproto\n'), ((2946, 2973), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(5)'], {}), '(5)\n', (2970, 2973), False, 'import betterproto\n'), ((2991, 3018), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(6)'], {}), '(6)\n', (3015, 3018), False, 'import betterproto\n'), ((3144, 3171), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (3168, 3171), False, 'import betterproto\n'), ((3191, 3217), 'betterproto.float_field', 'betterproto.float_field', (['(2)'], {}), '(2)\n', (3214, 3217), False, 'import betterproto\n'), ((3349, 3376), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (3373, 3376), False, 'import betterproto\n'), ((3407, 3434), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (3431, 3434), False, 'import betterproto\n'), ((3463, 3490), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (3487, 3490), False, 'import betterproto\n'), ((3517, 3544), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(4)'], {}), '(4)\n', (3541, 3544), False, 'import betterproto\n'), ((3569, 3596), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(5)'], {}), '(5)\n', (3593, 3596), False, 'import betterproto\n'), ((3629, 3656), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(6)'], {}), '(6)\n', (3653, 3656), False, 'import betterproto\n'), ((3682, 3709), 'betterproto.uint64_field', 'betterproto.uint64_field', (['(7)'], {}), '(7)\n', (3706, 3709), False, 'import betterproto\n'), ((3832, 3859), 'betterproto.uint64_field', 'betterproto.uint64_field', (['(1)'], {}), '(1)\n', (3856, 3859), False, 'import betterproto\n'), ((3886, 3913), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (3910, 3913), False, 'import betterproto\n'), ((3940, 3967), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (3964, 3967), False, 'import betterproto\n'), ((3991, 4016), 'betterproto.bool_field', 'betterproto.bool_field', (['(4)'], {}), '(4)\n', (4013, 4016), False, 'import betterproto\n'), ((4134, 4161), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (4158, 4161), False, 'import betterproto\n'), ((4185, 4212), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (4209, 4212), False, 'import betterproto\n'), ((4234, 4261), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (4258, 4261), False, 'import betterproto\n'), ((4284, 4309), 'betterproto.bool_field', 'betterproto.bool_field', (['(4)'], {}), '(4)\n', (4306, 4309), False, 'import betterproto\n'), ((4446, 4473), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (4470, 4473), False, 'import betterproto\n'), ((4501, 4528), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (4525, 4528), False, 'import betterproto\n'), ((4647, 4674), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (4671, 4674), False, 'import betterproto\n'), ((4695, 4722), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (4719, 4722), False, 'import betterproto\n'), ((4740, 4767), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (4764, 4767), False, 'import betterproto\n'), ((4882, 4909), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (4906, 4909), False, 'import betterproto\n'), ((4928, 4955), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (4952, 4955), False, 'import betterproto\n'), ((5071, 5098), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (5095, 5098), False, 'import betterproto\n'), ((5123, 5150), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (5147, 5150), False, 'import betterproto\n'), ((5184, 5211), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (5208, 5211), False, 'import betterproto\n'), ((5242, 5269), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(4)'], {}), '(4)\n', (5266, 5269), False, 'import betterproto\n'), ((5310, 5337), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(5)'], {}), '(5)\n', (5334, 5337), False, 'import betterproto\n'), ((5370, 5395), 'betterproto.bool_field', 'betterproto.bool_field', (['(6)'], {}), '(6)\n', (5392, 5395), False, 'import betterproto\n'), ((5432, 5459), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(7)'], {}), '(7)\n', (5456, 5459), False, 'import betterproto\n'), ((5507, 5534), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(8)'], {}), '(8)\n', (5531, 5534), False, 'import betterproto\n'), ((5559, 5584), 'betterproto.bool_field', 'betterproto.bool_field', (['(9)'], {}), '(9)\n', (5581, 5584), False, 'import betterproto\n'), ((5664, 5693), 'betterproto.message_field', 'betterproto.message_field', (['(10)'], {}), '(10)\n', (5689, 5693), False, 'import betterproto\n'), ((5720, 5748), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(11)'], {}), '(11)\n', (5744, 5748), False, 'import betterproto\n'), ((5785, 5811), 'betterproto.bool_field', 'betterproto.bool_field', (['(12)'], {}), '(12)\n', (5807, 5811), False, 'import betterproto\n'), ((5849, 5877), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(13)'], {}), '(13)\n', (5873, 5877), False, 'import betterproto\n'), ((5904, 5932), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(14)'], {}), '(14)\n', (5928, 5932), False, 'import betterproto\n'), ((5971, 5999), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(16)'], {}), '(16)\n', (5995, 5999), False, 'import betterproto\n'), ((6088, 6117), 'betterproto.message_field', 'betterproto.message_field', (['(17)'], {}), '(17)\n', (6113, 6117), False, 'import betterproto\n'), ((6166, 6194), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(18)'], {}), '(18)\n', (6190, 6194), False, 'import betterproto\n'), ((6260, 6289), 'betterproto.message_field', 'betterproto.message_field', (['(19)'], {}), '(19)\n', (6285, 6289), False, 'import betterproto\n'), ((6326, 6354), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(20)'], {}), '(20)\n', (6350, 6354), False, 'import betterproto\n'), ((6387, 6415), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(21)'], {}), '(21)\n', (6411, 6415), False, 'import betterproto\n'), ((6444, 6472), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(22)'], {}), '(22)\n', (6468, 6472), False, 'import betterproto\n'), ((6542, 6571), 'betterproto.message_field', 'betterproto.message_field', (['(23)'], {}), '(23)\n', (6567, 6571), False, 'import betterproto\n'), ((6699, 6726), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (6723, 6726), False, 'import betterproto\n'), ((6752, 6779), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (6776, 6779), False, 'import betterproto\n'), ((6807, 6834), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(4)'], {}), '(4)\n', (6831, 6834), False, 'import betterproto\n'), ((6949, 6976), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (6973, 6976), False, 'import betterproto\n'), ((7011, 7038), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (7035, 7038), False, 'import betterproto\n'), ((7062, 7089), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (7086, 7089), False, 'import betterproto\n'), ((7137, 7165), 'betterproto.message_field', 'betterproto.message_field', (['(4)'], {}), '(4)\n', (7162, 7165), False, 'import betterproto\n'), ((7220, 7248), 'betterproto.message_field', 'betterproto.message_field', (['(5)'], {}), '(5)\n', (7245, 7248), False, 'import betterproto\n'), ((7305, 7333), 'betterproto.message_field', 'betterproto.message_field', (['(6)'], {}), '(6)\n', (7330, 7333), False, 'import betterproto\n'), ((7359, 7386), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(7)'], {}), '(7)\n', (7383, 7386), False, 'import betterproto\n'), ((7413, 7440), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(8)'], {}), '(8)\n', (7437, 7440), False, 'import betterproto\n'), ((7468, 7495), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(9)'], {}), '(9)\n', (7492, 7495), False, 'import betterproto\n'), ((7519, 7547), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(10)'], {}), '(10)\n', (7543, 7547), False, 'import betterproto\n'), ((7571, 7599), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(11)'], {}), '(11)\n', (7595, 7599), False, 'import betterproto\n'), ((7626, 7654), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(12)'], {}), '(12)\n', (7650, 7654), False, 'import betterproto\n'), ((7683, 7711), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(13)'], {}), '(13)\n', (7707, 7711), False, 'import betterproto\n'), ((7736, 7764), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(14)'], {}), '(14)\n', (7760, 7764), False, 'import betterproto\n'), ((7789, 7817), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(15)'], {}), '(15)\n', (7813, 7817), False, 'import betterproto\n'), ((7840, 7868), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(16)'], {}), '(16)\n', (7864, 7868), False, 'import betterproto\n'), ((7896, 7924), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(17)'], {}), '(17)\n', (7920, 7924), False, 'import betterproto\n'), ((7950, 7977), 'betterproto.float_field', 'betterproto.float_field', (['(18)'], {}), '(18)\n', (7973, 7977), False, 'import betterproto\n'), ((8002, 8029), 'betterproto.float_field', 'betterproto.float_field', (['(19)'], {}), '(19)\n', (8025, 8029), False, 'import betterproto\n'), ((8057, 8084), 'betterproto.float_field', 'betterproto.float_field', (['(20)'], {}), '(20)\n', (8080, 8084), False, 'import betterproto\n'), ((8109, 8136), 'betterproto.float_field', 'betterproto.float_field', (['(21)'], {}), '(21)\n', (8132, 8136), False, 'import betterproto\n'), ((8169, 8197), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(22)'], {}), '(22)\n', (8193, 8197), False, 'import betterproto\n'), ((8233, 8260), 'betterproto.float_field', 'betterproto.float_field', (['(23)'], {}), '(23)\n', (8256, 8260), False, 'import betterproto\n'), ((8335, 8364), 'betterproto.message_field', 'betterproto.message_field', (['(24)'], {}), '(24)\n', (8360, 8364), False, 'import betterproto\n'), ((8398, 8424), 'betterproto.bool_field', 'betterproto.bool_field', (['(25)'], {}), '(25)\n', (8420, 8424), False, 'import betterproto\n'), ((8500, 8529), 'betterproto.message_field', 'betterproto.message_field', (['(26)'], {}), '(26)\n', (8525, 8529), False, 'import betterproto\n'), ((8588, 8617), 'betterproto.message_field', 'betterproto.message_field', (['(29)'], {}), '(29)\n', (8613, 8617), False, 'import betterproto\n'), ((8695, 8724), 'betterproto.message_field', 'betterproto.message_field', (['(30)'], {}), '(30)\n', (8720, 8724), False, 'import betterproto\n'), ((8744, 8772), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(31)'], {}), '(31)\n', (8768, 8772), False, 'import betterproto\n'), ((8798, 8826), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(32)'], {}), '(32)\n', (8822, 8826), False, 'import betterproto\n'), ((8901, 8930), 'betterproto.message_field', 'betterproto.message_field', (['(33)'], {}), '(33)\n', (8926, 8930), False, 'import betterproto\n'), ((8963, 8991), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(34)'], {}), '(34)\n', (8987, 8991), False, 'import betterproto\n'), ((9012, 9040), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(35)'], {}), '(35)\n', (9036, 9040), False, 'import betterproto\n'), ((9065, 9093), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(36)'], {}), '(36)\n', (9089, 9093), False, 'import betterproto\n'), ((9120, 9148), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(37)'], {}), '(37)\n', (9144, 9148), False, 'import betterproto\n'), ((9178, 9206), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(38)'], {}), '(38)\n', (9202, 9206), False, 'import betterproto\n'), ((9234, 9262), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(39)'], {}), '(39)\n', (9258, 9262), False, 'import betterproto\n'), ((9289, 9317), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(40)'], {}), '(40)\n', (9313, 9317), False, 'import betterproto\n'), ((9348, 9376), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(41)'], {}), '(41)\n', (9372, 9376), False, 'import betterproto\n'), ((9410, 9438), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(42)'], {}), '(42)\n', (9434, 9438), False, 'import betterproto\n'), ((9470, 9498), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(43)'], {}), '(43)\n', (9494, 9498), False, 'import betterproto\n'), ((9525, 9553), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(44)'], {}), '(44)\n', (9549, 9553), False, 'import betterproto\n'), ((9581, 9608), 'betterproto.float_field', 'betterproto.float_field', (['(45)'], {}), '(45)\n', (9604, 9608), False, 'import betterproto\n'), ((9656, 9682), 'betterproto.enum_field', 'betterproto.enum_field', (['(46)'], {}), '(46)\n', (9678, 9682), False, 'import betterproto\n'), ((9749, 9778), 'betterproto.message_field', 'betterproto.message_field', (['(47)'], {}), '(47)\n', (9774, 9778), False, 'import betterproto\n'), ((9857, 9886), 'betterproto.message_field', 'betterproto.message_field', (['(48)'], {}), '(48)\n', (9882, 9886), False, 'import betterproto\n'), ((9914, 9942), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(49)'], {}), '(49)\n', (9938, 9942), False, 'import betterproto\n'), ((10071, 10098), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (10095, 10098), False, 'import betterproto\n'), ((10119, 10146), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (10143, 10146), False, 'import betterproto\n'), ((10180, 10207), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (10204, 10207), False, 'import betterproto\n'), ((10239, 10266), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(4)'], {}), '(4)\n', (10263, 10266), False, 'import betterproto\n'), ((10293, 10320), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(5)'], {}), '(5)\n', (10317, 10320), False, 'import betterproto\n'), ((10346, 10373), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(6)'], {}), '(6)\n', (10370, 10373), False, 'import betterproto\n'), ((10396, 10421), 'betterproto.bool_field', 'betterproto.bool_field', (['(7)'], {}), '(7)\n', (10418, 10421), False, 'import betterproto\n'), ((10546, 10573), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (10570, 10573), False, 'import betterproto\n'), ((10599, 10624), 'betterproto.enum_field', 'betterproto.enum_field', (['(2)'], {}), '(2)\n', (10621, 10624), False, 'import betterproto\n'), ((10658, 10685), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (10682, 10685), False, 'import betterproto\n'), ((10717, 10744), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(4)'], {}), '(4)\n', (10741, 10744), False, 'import betterproto\n'), ((10776, 10803), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(5)'], {}), '(5)\n', (10800, 10803), False, 'import betterproto\n'), ((10843, 10870), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(6)'], {}), '(6)\n', (10867, 10870), False, 'import betterproto\n'), ((10913, 10940), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(7)'], {}), '(7)\n', (10937, 10940), False, 'import betterproto\n'), ((11035, 11063), 'betterproto.message_field', 'betterproto.message_field', (['(8)'], {}), '(8)\n', (11060, 11063), False, 'import betterproto\n'), ((11222, 11249), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (11246, 11249), False, 'import betterproto\n'), ((11270, 11297), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (11294, 11297), False, 'import betterproto\n'), ((11438, 11466), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (11463, 11466), False, 'import betterproto\n'), ((11508, 11534), 'betterproto.float_field', 'betterproto.float_field', (['(2)'], {}), '(2)\n', (11531, 11534), False, 'import betterproto\n'), ((11599, 11627), 'betterproto.message_field', 'betterproto.message_field', (['(3)'], {}), '(3)\n', (11624, 11627), False, 'import betterproto\n'), ((11741, 11768), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (11765, 11768), False, 'import betterproto\n'), ((11785, 11812), 'betterproto.string_field', 'betterproto.string_field', (['(2)'], {}), '(2)\n', (11809, 11812), False, 'import betterproto\n'), ((11927, 11954), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (11951, 11954), False, 'import betterproto\n'), ((12014, 12042), 'betterproto.message_field', 'betterproto.message_field', (['(2)'], {}), '(2)\n', (12039, 12042), False, 'import betterproto\n'), ((12106, 12134), 'betterproto.message_field', 'betterproto.message_field', (['(3)'], {}), '(3)\n', (12131, 12134), False, 'import betterproto\n'), ((12256, 12283), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (12280, 12283), False, 'import betterproto\n'), ((12307, 12334), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (12331, 12334), False, 'import betterproto\n'), ((12364, 12390), 'betterproto.bytes_field', 'betterproto.bytes_field', (['(3)'], {}), '(3)\n', (12387, 12390), False, 'import betterproto\n'), ((12471, 12499), 'betterproto.message_field', 'betterproto.message_field', (['(4)'], {}), '(4)\n', (12496, 12499), False, 'import betterproto\n'), ((12535, 12562), 'betterproto.string_field', 'betterproto.string_field', (['(5)'], {}), '(5)\n', (12559, 12562), False, 'import betterproto\n'), ((12637, 12665), 'betterproto.message_field', 'betterproto.message_field', (['(6)'], {}), '(6)\n', (12662, 12665), False, 'import betterproto\n'), ((12697, 12723), 'betterproto.float_field', 'betterproto.float_field', (['(7)'], {}), '(7)\n', (12720, 12723), False, 'import betterproto\n'), ((12756, 12782), 'betterproto.float_field', 'betterproto.float_field', (['(8)'], {}), '(8)\n', (12779, 12782), False, 'import betterproto\n'), ((12816, 12842), 'betterproto.float_field', 'betterproto.float_field', (['(9)'], {}), '(9)\n', (12839, 12842), False, 'import betterproto\n'), ((12877, 12904), 'betterproto.float_field', 'betterproto.float_field', (['(10)'], {}), '(10)\n', (12900, 12904), False, 'import betterproto\n'), ((12937, 12964), 'betterproto.float_field', 'betterproto.float_field', (['(11)'], {}), '(11)\n', (12960, 12964), False, 'import betterproto\n'), ((13036, 13065), 'betterproto.message_field', 'betterproto.message_field', (['(12)'], {}), '(12)\n', (13061, 13065), False, 'import betterproto\n'), ((13133, 13162), 'betterproto.message_field', 'betterproto.message_field', (['(13)'], {}), '(13)\n', (13158, 13162), False, 'import betterproto\n'), ((13296, 13322), 'betterproto.int32_field', 'betterproto.int32_field', (['(1)'], {}), '(1)\n', (13319, 13322), False, 'import betterproto\n'), ((13434, 13462), 'betterproto.message_field', 'betterproto.message_field', (['(2)'], {}), '(2)\n', (13459, 13462), False, 'import betterproto\n'), ((13576, 13604), 'betterproto.message_field', 'betterproto.message_field', (['(3)'], {}), '(3)\n', (13601, 13604), False, 'import betterproto\n'), ((13761, 13788), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (13785, 13788), False, 'import betterproto\n'), ((13811, 13838), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (13835, 13838), False, 'import betterproto\n'), ((13960, 13988), 'betterproto.message_field', 'betterproto.message_field', (['(2)'], {}), '(2)\n', (13985, 13988), False, 'import betterproto\n'), ((14147, 14174), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (14171, 14174), False, 'import betterproto\n'), ((14193, 14220), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (14217, 14220), False, 'import betterproto\n'), ((14378, 14405), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (14402, 14405), False, 'import betterproto\n'), ((14428, 14455), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (14452, 14455), False, 'import betterproto\n'), ((14578, 14606), 'betterproto.message_field', 'betterproto.message_field', (['(2)'], {}), '(2)\n', (14603, 14606), False, 'import betterproto\n'), ((14766, 14793), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (14790, 14793), False, 'import betterproto\n'), ((14813, 14840), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (14837, 14840), False, 'import betterproto\n'), ((14977, 15004), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (15001, 15004), False, 'import betterproto\n'), ((15035, 15062), 'betterproto.string_field', 'betterproto.string_field', (['(3)'], {}), '(3)\n', (15059, 15062), False, 'import betterproto\n'), ((15152, 15180), 'betterproto.message_field', 'betterproto.message_field', (['(2)'], {}), '(2)\n', (15177, 15180), False, 'import betterproto\n'), ((15321, 15348), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (15345, 15348), False, 'import betterproto\n'), ((15379, 15405), 'betterproto.float_field', 'betterproto.float_field', (['(2)'], {}), '(2)\n', (15402, 15405), False, 'import betterproto\n'), ((15428, 15453), 'betterproto.bool_field', 'betterproto.bool_field', (['(3)'], {}), '(3)\n', (15450, 15453), False, 'import betterproto\n'), ((15582, 15609), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (15606, 15609), False, 'import betterproto\n'), ((15628, 15655), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (15652, 15655), False, 'import betterproto\n'), ((15680, 15707), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (15704, 15707), False, 'import betterproto\n'), ((15727, 15754), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(4)'], {}), '(4)\n', (15751, 15754), False, 'import betterproto\n'), ((15776, 15803), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(5)'], {}), '(5)\n', (15800, 15803), False, 'import betterproto\n'), ((15825, 15852), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(6)'], {}), '(6)\n', (15849, 15852), False, 'import betterproto\n'), ((15870, 15897), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(7)'], {}), '(7)\n', (15894, 15897), False, 'import betterproto\n'), ((15915, 15942), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(8)'], {}), '(8)\n', (15939, 15942), False, 'import betterproto\n'), ((16069, 16096), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(1)'], {}), '(1)\n', (16093, 16096), False, 'import betterproto\n'), ((16115, 16142), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (16139, 16142), False, 'import betterproto\n'), ((16161, 16188), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (16185, 16188), False, 'import betterproto\n'), ((16218, 16245), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(4)'], {}), '(4)\n', (16242, 16245), False, 'import betterproto\n'), ((16265, 16292), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(5)'], {}), '(5)\n', (16289, 16292), False, 'import betterproto\n'), ((16310, 16337), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(6)'], {}), '(6)\n', (16334, 16337), False, 'import betterproto\n'), ((16460, 16487), 'betterproto.string_field', 'betterproto.string_field', (['(1)'], {}), '(1)\n', (16484, 16487), False, 'import betterproto\n'), ((16516, 16543), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(2)'], {}), '(2)\n', (16540, 16543), False, 'import betterproto\n'), ((16572, 16599), 'betterproto.uint32_field', 'betterproto.uint32_field', (['(3)'], {}), '(3)\n', (16596, 16599), False, 'import betterproto\n'), ((16624, 16650), 'betterproto.float_field', 'betterproto.float_field', (['(4)'], {}), '(4)\n', (16647, 16650), False, 'import betterproto\n'), ((16760, 16788), 'betterproto.message_field', 'betterproto.message_field', (['(1)'], {}), '(1)\n', (16785, 16788), False, 'import betterproto\n'), ((16826, 16854), 'betterproto.message_field', 'betterproto.message_field', (['(2)'], {}), '(2)\n', (16851, 16854), False, 'import betterproto\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-11-05 16:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20171105_1034'),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('SSLC', 'SSLC'), ('+2', '+2'), ('EMB', 'Embassy Attestation'), ('BNK1', 'Bank Loan - 1 Year'), ('BNK4', 'Bank Loan - 4 Years'), ('CHAR', 'Character Certificate'), ('NRSD', 'Non Receipt of Stipend'), ('NRLP', 'Non Receipt of Laptop'), ('NRSP', 'Non Receipt of Scholarship'), ('OTH', 'Other')], max_length=4)),
('other', models.CharField(blank=True, max_length=100)),
('applicant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Hierarchy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('application_type', models.CharField(choices=[('SSLC', 'SSLC'), ('+2', '+2'), ('EMB', 'Embassy Attestation'), ('BNK1', 'Bank Loan - 1 Year'), ('BNK4', 'Bank Loan - 4 Years'), ('CHAR', 'Character Certificate'), ('NRSD', 'Non Receipt of Stipend'), ('NRLP', 'Non Receipt of Laptop'), ('NRSP', 'Non Receipt of Scholarship'), ('OTH', 'Other')], max_length=4)),
('sl_no', models.IntegerField()),
('user', models.CharField(choices=[('st', 'STUDENT'), ('tu', 'TUTOR'), ('ho', 'HOD'), ('of', 'OFFICE STAFF')], max_length=2)),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] |
[((464, 557), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (480, 557), False, 'from django.db import migrations, models\n'), ((581, 930), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('SSLC', 'SSLC'), ('+2', '+2'), ('EMB', 'Embassy Attestation'), ('BNK1',\n 'Bank Loan - 1 Year'), ('BNK4', 'Bank Loan - 4 Years'), ('CHAR',\n 'Character Certificate'), ('NRSD', 'Non Receipt of Stipend'), ('NRLP',\n 'Non Receipt of Laptop'), ('NRSP', 'Non Receipt of Scholarship'), (\n 'OTH', 'Other')]", 'max_length': '(4)'}), "(choices=[('SSLC', 'SSLC'), ('+2', '+2'), ('EMB',\n 'Embassy Attestation'), ('BNK1', 'Bank Loan - 1 Year'), ('BNK4',\n 'Bank Loan - 4 Years'), ('CHAR', 'Character Certificate'), ('NRSD',\n 'Non Receipt of Stipend'), ('NRLP', 'Non Receipt of Laptop'), ('NRSP',\n 'Non Receipt of Scholarship'), ('OTH', 'Other')], max_length=4)\n", (597, 930), False, 'from django.db import migrations, models\n'), ((943, 987), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)'}), '(blank=True, max_length=100)\n', (959, 987), False, 'from django.db import migrations, models\n'), ((1020, 1116), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1037, 1116), False, 'from django.db import migrations, models\n'), ((1246, 1339), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1262, 1339), False, 'from django.db import migrations, models\n'), ((1375, 1724), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('SSLC', 'SSLC'), ('+2', '+2'), ('EMB', 'Embassy Attestation'), ('BNK1',\n 'Bank Loan - 1 Year'), ('BNK4', 'Bank Loan - 4 Years'), ('CHAR',\n 'Character Certificate'), ('NRSD', 'Non Receipt of Stipend'), ('NRLP',\n 'Non Receipt of Laptop'), ('NRSP', 'Non Receipt of Scholarship'), (\n 'OTH', 'Other')]", 'max_length': '(4)'}), "(choices=[('SSLC', 'SSLC'), ('+2', '+2'), ('EMB',\n 'Embassy Attestation'), ('BNK1', 'Bank Loan - 1 Year'), ('BNK4',\n 'Bank Loan - 4 Years'), ('CHAR', 'Character Certificate'), ('NRSD',\n 'Non Receipt of Stipend'), ('NRLP', 'Non Receipt of Laptop'), ('NRSP',\n 'Non Receipt of Scholarship'), ('OTH', 'Other')], max_length=4)\n", (1391, 1724), False, 'from django.db import migrations, models\n'), ((1737, 1758), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1756, 1758), False, 'from django.db import migrations, models\n'), ((1786, 1905), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('st', 'STUDENT'), ('tu', 'TUTOR'), ('ho', 'HOD'), ('of', 'OFFICE STAFF')]", 'max_length': '(2)'}), "(choices=[('st', 'STUDENT'), ('tu', 'TUTOR'), ('ho', 'HOD'),\n ('of', 'OFFICE STAFF')], max_length=2)\n", (1802, 1905), False, 'from django.db import migrations, models\n')]
|
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import time
from MockSSH import SSHCommand
class HangingCommand(SSHCommand):
def __init__(self, name, hang_time, *args):
self.name = name
self.hang_time = hang_time
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
time.sleep(self.hang_time)
self.write("Done!\n")
self.exit()
class AmbiguousCommand(SSHCommand):
def __init__(self, name, *args):
self.name = name
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
self.write("working -> done!\n")
self.exit()
class MultiAsyncWriteCommand(SSHCommand):
def __init__(self, name, count, interval, *args):
self.name = name
self.count = count
self.interval = interval
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
for i in range(self.count):
self.write("Line %d\n" % (i + 1))
time.sleep(self.interval)
self.exit()
class SkippingLineCommand(SSHCommand):
def __init__(self, name, lines, *args):
self.name = name
self.lines = lines
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
for _ in range(self.lines):
self.write("\r\n")
self.write("%s lines skipped!\n" % self.lines)
self.exit()
def exit_command_success(instance):
instance.protocol.call_command(instance.protocol.commands['_exit'])
def passwd_change_protocol_prompt(instance):
instance.protocol.prompt = "hostname#"
instance.protocol.password_input = False
def passwd_write_password_to_transport(instance):
instance.writeln("MockSSH: password is %s" % instance.valid_password)
class KeystrokeAnsweredCommand(SSHCommand):
def __init__(self, name):
self.name = name
self.protocol = None # set in __call__
def __call__(self, protocol, *args):
SSHCommand.__init__(self, protocol, self.name, *args)
return self
def start(self):
self.write("whatup?")
this = self
def finish():
this.writeln("k")
this.writeln("K pressed")
this.exit()
this.protocol.keyHandlers.pop("k")
self.protocol.keyHandlers.update({"k": finish})
|
[
"MockSSH.SSHCommand.__init__",
"time.sleep"
] |
[((859, 912), 'MockSSH.SSHCommand.__init__', 'SSHCommand.__init__', (['self', 'protocol', 'self.name', '*args'], {}), '(self, protocol, self.name, *args)\n', (878, 912), False, 'from MockSSH import SSHCommand\n'), ((963, 989), 'time.sleep', 'time.sleep', (['self.hang_time'], {}), '(self.hang_time)\n', (973, 989), False, 'import time\n'), ((1238, 1291), 'MockSSH.SSHCommand.__init__', 'SSHCommand.__init__', (['self', 'protocol', 'self.name', '*args'], {}), '(self, protocol, self.name, *args)\n', (1257, 1291), False, 'from MockSSH import SSHCommand\n'), ((1676, 1729), 'MockSSH.SSHCommand.__init__', 'SSHCommand.__init__', (['self', 'protocol', 'self.name', '*args'], {}), '(self, protocol, self.name, *args)\n', (1695, 1729), False, 'from MockSSH import SSHCommand\n'), ((2148, 2201), 'MockSSH.SSHCommand.__init__', 'SSHCommand.__init__', (['self', 'protocol', 'self.name', '*args'], {}), '(self, protocol, self.name, *args)\n', (2167, 2201), False, 'from MockSSH import SSHCommand\n'), ((2957, 3010), 'MockSSH.SSHCommand.__init__', 'SSHCommand.__init__', (['self', 'protocol', 'self.name', '*args'], {}), '(self, protocol, self.name, *args)\n', (2976, 3010), False, 'from MockSSH import SSHCommand\n'), ((1866, 1891), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (1876, 1891), False, 'import time\n')]
|
#!/usr/bin/env python3
import time
import argparse
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_chemistry.ambertools.reduce_remove_hydrogens import reduce_remove_hydrogens
from biobb_structure_utils.utils.extract_molecule import extract_molecule
from biobb_structure_utils.utils.cat_pdb import cat_pdb
from biobb_model.model.fix_side_chain import fix_side_chain
from biobb_model.model.mutate import mutate
from biobb_md.gromacs.pdb2gmx import pdb2gmx
from biobb_md.gromacs.editconf import editconf
from biobb_md.gromacs.solvate import solvate
from biobb_md.gromacs.grompp import grompp
from biobb_md.gromacs.genion import genion
from biobb_md.gromacs.mdrun import mdrun
from biobb_md.gromacs.make_ndx import make_ndx
from biobb_analysis.gromacs.gmx_energy import gmx_energy
from biobb_analysis.gromacs.gmx_rgyr import gmx_rgyr
from biobb_analysis.gromacs.gmx_trjconv_str import gmx_trjconv_str
from biobb_analysis.gromacs.gmx_image import gmx_image
from biobb_analysis.gromacs.gmx_rms import gmx_rms
def main(config, system=None):
start_time = time.time()
conf = settings.ConfReader(config, system)
global_log, _ = fu.get_logs(path=conf.get_working_dir_path(), light_format=True)
global_prop = conf.get_prop_dic(global_log=global_log)
global_paths = conf.get_paths_dic()
global_log.info("step0_reduce_remove_hydrogens: Removing Hydrogens")
reduce_remove_hydrogens(**global_paths["step0_reduce_remove_hydrogens"], properties=global_prop["step0_reduce_remove_hydrogens"])
global_log.info("step1_extract_molecule: Extracting Protein")
extract_molecule(**global_paths["step1_extract_molecule"], properties=global_prop["step1_extract_molecule"])
global_log.info("step00_cat_pdb: Concatenating protein with included ions")
cat_pdb(**global_paths["step00_cat_pdb"], properties=global_prop["step00_cat_pdb"])
global_log.info("step2_fix_side_chain: Modeling the missing heavy atoms in the structure side chains")
fix_side_chain(**global_paths["step2_fix_side_chain"], properties=global_prop["step2_fix_side_chain"])
for mutation_number, mutation in enumerate(conf.properties['mutations']):
global_log.info('')
global_log.info("Mutation: %s %d/%d" % (mutation, mutation_number+1, len(conf.properties['mutations'])))
global_log.info('')
prop = conf.get_prop_dic(prefix=mutation, global_log=global_log)
paths = conf.get_paths_dic(prefix=mutation)
global_log.info("step3_mutate: Modeling mutation")
prop['step3_mutate']['mutation_list'] = mutation
paths['step3_mutate']['input_pdb_path'] = global_paths['step2_fix_side_chain']['output_pdb_path']
mutate(**paths["step3_mutate"], properties=prop["step3_mutate"])
global_log.info("step4_pdb2gmx: Generate the topology")
pdb2gmx(**paths["step4_pdb2gmx"], properties=prop["step4_pdb2gmx"])
global_log.info("step5_editconf: Create the solvent box")
editconf(**paths["step5_editconf"], properties=prop["step5_editconf"])
global_log.info("step6_solvate: Fill the solvent box with water molecules")
solvate(**paths["step6_solvate"], properties=prop["step6_solvate"])
global_log.info("step7_grompp_genion: Preprocess ion generation")
grompp(**paths["step7_grompp_genion"], properties=prop["step7_grompp_genion"])
global_log.info("step8_genion: Ion generation")
genion(**paths["step8_genion"], properties=prop["step8_genion"])
global_log.info("step9_grompp_min: Preprocess energy minimization")
grompp(**paths["step9_grompp_min"], properties=prop["step9_grompp_min"])
global_log.info("step10_mdrun_min: Execute energy minimization")
mdrun(**paths["step10_mdrun_min"], properties=prop["step10_mdrun_min"])
global_log.info("step100_make_ndx: Creating an index file for the whole system")
make_ndx(**paths["step100_make_ndx"], properties=prop["step100_make_ndx"])
global_log.info("step11_grompp_nvt: Preprocess system temperature equilibration")
grompp(**paths["step11_grompp_nvt"], properties=prop["step11_grompp_nvt"])
global_log.info("step12_mdrun_nvt: Execute system temperature equilibration")
mdrun(**paths["step12_mdrun_nvt"], properties=prop["step12_mdrun_nvt"])
global_log.info("step13_grompp_npt: Preprocess system pressure equilibration")
grompp(**paths["step13_grompp_npt"], properties=prop["step13_grompp_npt"])
global_log.info("step14_mdrun_npt: Execute system pressure equilibration")
mdrun(**paths["step14_mdrun_npt"], properties=prop["step14_mdrun_npt"])
global_log.info("step15_grompp_md: Preprocess free dynamics")
grompp(**paths["step15_grompp_md"], properties=prop["step15_grompp_md"])
global_log.info("step16_mdrun_md: Execute free molecular dynamics simulation")
mdrun(**paths["step16_mdrun_md"], properties=prop["step16_mdrun_md"])
global_log.info("step17_gmx_image1: Image Trajectory, step1, moving ligand to center of the water box")
gmx_image(**paths["step17_gmx_image1"], properties=prop["step17_gmx_image1"])
global_log.info("step18_gmx_image2: Image Trajectory, step2, removing rotation")
gmx_image(**paths["step18_gmx_image2"], properties=prop["step18_gmx_image2"])
global_log.info("step19_gmx_trjconv_str: Convert final structure from GRO to PDB")
gmx_trjconv_str(**paths["step19_gmx_trjconv_str"], properties=prop["step19_gmx_trjconv_str"])
global_log.info("step20_gmx_energy: Generate energy plot from minimization/equilibration")
gmx_energy(**paths["step20_gmx_energy"], properties=prop["step20_gmx_energy"])
global_log.info("step21_gmx_rgyr: Generate Radius of Gyration plot for the resulting setup trajectory from the free md step")
gmx_rgyr(**paths["step21_gmx_rgyr"], properties=prop["step21_gmx_rgyr"])
global_log.info("step22_rmsd_first: Generate RMSd (against 1st snp.) plot for the resulting setup trajectory from the free md step")
gmx_rms(**paths["step22_rmsd_first"], properties=prop["step22_rmsd_first"])
global_log.info("step23_rmsd_exp: Generate RMSd (against exp.) plot for the resulting setup trajectory from the free md step")
gmx_rms(**paths["step23_rmsd_exp"], properties=prop["step23_rmsd_exp"])
if conf.properties['run_md']:
global_log.info("step24_grompp_md: Preprocess long MD simulation after setup")
grompp(**paths["step24_grompp_md"], properties=prop["step24_grompp_md"])
elapsed_time = time.time() - start_time
global_log.info('')
global_log.info('')
global_log.info('Execution successful: ')
global_log.info(' Workflow_path: %s' % conf.get_working_dir_path())
global_log.info(' Config File: %s' % config)
if system:
global_log.info(' System: %s' % system)
global_log.info('')
global_log.info('Elapsed time: %.1f minutes' % (elapsed_time/60))
global_log.info('')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Based on the official Gromacs tutorial")
parser.add_argument('--config', required=True)
parser.add_argument('--system', required=False)
args = parser.parse_args()
main(args.config, args.system)
|
[
"biobb_analysis.gromacs.gmx_rgyr.gmx_rgyr",
"biobb_structure_utils.utils.extract_molecule.extract_molecule",
"biobb_model.model.mutate.mutate",
"argparse.ArgumentParser",
"biobb_md.gromacs.solvate.solvate",
"biobb_md.gromacs.editconf.editconf",
"biobb_analysis.gromacs.gmx_image.gmx_image",
"biobb_md.gromacs.make_ndx.make_ndx",
"biobb_structure_utils.utils.cat_pdb.cat_pdb",
"biobb_analysis.gromacs.gmx_rms.gmx_rms",
"biobb_md.gromacs.genion.genion",
"biobb_common.configuration.settings.ConfReader",
"biobb_analysis.gromacs.gmx_trjconv_str.gmx_trjconv_str",
"biobb_md.gromacs.mdrun.mdrun",
"biobb_chemistry.ambertools.reduce_remove_hydrogens.reduce_remove_hydrogens",
"biobb_md.gromacs.pdb2gmx.pdb2gmx",
"biobb_model.model.fix_side_chain.fix_side_chain",
"biobb_md.gromacs.grompp.grompp",
"time.time",
"biobb_analysis.gromacs.gmx_energy.gmx_energy"
] |
[((1112, 1123), 'time.time', 'time.time', ([], {}), '()\n', (1121, 1123), False, 'import time\n'), ((1135, 1170), 'biobb_common.configuration.settings.ConfReader', 'settings.ConfReader', (['config', 'system'], {}), '(config, system)\n', (1154, 1170), False, 'from biobb_common.configuration import settings\n'), ((1433, 1566), 'biobb_chemistry.ambertools.reduce_remove_hydrogens.reduce_remove_hydrogens', 'reduce_remove_hydrogens', ([], {'properties': "global_prop['step0_reduce_remove_hydrogens']"}), "(**global_paths['step0_reduce_remove_hydrogens'],\n properties=global_prop['step0_reduce_remove_hydrogens'])\n", (1456, 1566), False, 'from biobb_chemistry.ambertools.reduce_remove_hydrogens import reduce_remove_hydrogens\n'), ((1634, 1747), 'biobb_structure_utils.utils.extract_molecule.extract_molecule', 'extract_molecule', ([], {'properties': "global_prop['step1_extract_molecule']"}), "(**global_paths['step1_extract_molecule'], properties=\n global_prop['step1_extract_molecule'])\n", (1650, 1747), False, 'from biobb_structure_utils.utils.extract_molecule import extract_molecule\n'), ((1828, 1916), 'biobb_structure_utils.utils.cat_pdb.cat_pdb', 'cat_pdb', ([], {'properties': "global_prop['step00_cat_pdb']"}), "(**global_paths['step00_cat_pdb'], properties=global_prop[\n 'step00_cat_pdb'])\n", (1835, 1916), False, 'from biobb_structure_utils.utils.cat_pdb import cat_pdb\n'), ((2024, 2131), 'biobb_model.model.fix_side_chain.fix_side_chain', 'fix_side_chain', ([], {'properties': "global_prop['step2_fix_side_chain']"}), "(**global_paths['step2_fix_side_chain'], properties=\n global_prop['step2_fix_side_chain'])\n", (2038, 2131), False, 'from biobb_model.model.fix_side_chain import fix_side_chain\n'), ((7130, 7207), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Based on the official Gromacs tutorial"""'}), "(description='Based on the official Gromacs tutorial')\n", (7153, 7207), False, 'import argparse\n'), ((2732, 2796), 'biobb_model.model.mutate.mutate', 'mutate', ([], {'properties': "prop['step3_mutate']"}), "(**paths['step3_mutate'], properties=prop['step3_mutate'])\n", (2738, 2796), False, 'from biobb_model.model.mutate import mutate\n'), ((2870, 2937), 'biobb_md.gromacs.pdb2gmx.pdb2gmx', 'pdb2gmx', ([], {'properties': "prop['step4_pdb2gmx']"}), "(**paths['step4_pdb2gmx'], properties=prop['step4_pdb2gmx'])\n", (2877, 2937), False, 'from biobb_md.gromacs.pdb2gmx import pdb2gmx\n'), ((3013, 3083), 'biobb_md.gromacs.editconf.editconf', 'editconf', ([], {'properties': "prop['step5_editconf']"}), "(**paths['step5_editconf'], properties=prop['step5_editconf'])\n", (3021, 3083), False, 'from biobb_md.gromacs.editconf import editconf\n'), ((3177, 3244), 'biobb_md.gromacs.solvate.solvate', 'solvate', ([], {'properties': "prop['step6_solvate']"}), "(**paths['step6_solvate'], properties=prop['step6_solvate'])\n", (3184, 3244), False, 'from biobb_md.gromacs.solvate import solvate\n'), ((3328, 3406), 'biobb_md.gromacs.grompp.grompp', 'grompp', ([], {'properties': "prop['step7_grompp_genion']"}), "(**paths['step7_grompp_genion'], properties=prop['step7_grompp_genion'])\n", (3334, 3406), False, 'from biobb_md.gromacs.grompp import grompp\n'), ((3472, 3536), 'biobb_md.gromacs.genion.genion', 'genion', ([], {'properties': "prop['step8_genion']"}), "(**paths['step8_genion'], properties=prop['step8_genion'])\n", (3478, 3536), False, 'from biobb_md.gromacs.genion import genion\n'), ((3622, 3694), 'biobb_md.gromacs.grompp.grompp', 'grompp', ([], {'properties': "prop['step9_grompp_min']"}), "(**paths['step9_grompp_min'], properties=prop['step9_grompp_min'])\n", (3628, 3694), False, 'from biobb_md.gromacs.grompp import grompp\n'), ((3777, 3848), 'biobb_md.gromacs.mdrun.mdrun', 'mdrun', ([], {'properties': "prop['step10_mdrun_min']"}), "(**paths['step10_mdrun_min'], properties=prop['step10_mdrun_min'])\n", (3782, 3848), False, 'from biobb_md.gromacs.mdrun import mdrun\n'), ((3947, 4021), 'biobb_md.gromacs.make_ndx.make_ndx', 'make_ndx', ([], {'properties': "prop['step100_make_ndx']"}), "(**paths['step100_make_ndx'], properties=prop['step100_make_ndx'])\n", (3955, 4021), False, 'from biobb_md.gromacs.make_ndx import make_ndx\n'), ((4121, 4195), 'biobb_md.gromacs.grompp.grompp', 'grompp', ([], {'properties': "prop['step11_grompp_nvt']"}), "(**paths['step11_grompp_nvt'], properties=prop['step11_grompp_nvt'])\n", (4127, 4195), False, 'from biobb_md.gromacs.grompp import grompp\n'), ((4291, 4362), 'biobb_md.gromacs.mdrun.mdrun', 'mdrun', ([], {'properties': "prop['step12_mdrun_nvt']"}), "(**paths['step12_mdrun_nvt'], properties=prop['step12_mdrun_nvt'])\n", (4296, 4362), False, 'from biobb_md.gromacs.mdrun import mdrun\n'), ((4459, 4533), 'biobb_md.gromacs.grompp.grompp', 'grompp', ([], {'properties': "prop['step13_grompp_npt']"}), "(**paths['step13_grompp_npt'], properties=prop['step13_grompp_npt'])\n", (4465, 4533), False, 'from biobb_md.gromacs.grompp import grompp\n'), ((4626, 4697), 'biobb_md.gromacs.mdrun.mdrun', 'mdrun', ([], {'properties': "prop['step14_mdrun_npt']"}), "(**paths['step14_mdrun_npt'], properties=prop['step14_mdrun_npt'])\n", (4631, 4697), False, 'from biobb_md.gromacs.mdrun import mdrun\n'), ((4777, 4849), 'biobb_md.gromacs.grompp.grompp', 'grompp', ([], {'properties': "prop['step15_grompp_md']"}), "(**paths['step15_grompp_md'], properties=prop['step15_grompp_md'])\n", (4783, 4849), False, 'from biobb_md.gromacs.grompp import grompp\n'), ((4946, 5015), 'biobb_md.gromacs.mdrun.mdrun', 'mdrun', ([], {'properties': "prop['step16_mdrun_md']"}), "(**paths['step16_mdrun_md'], properties=prop['step16_mdrun_md'])\n", (4951, 5015), False, 'from biobb_md.gromacs.mdrun import mdrun\n'), ((5137, 5214), 'biobb_analysis.gromacs.gmx_image.gmx_image', 'gmx_image', ([], {'properties': "prop['step17_gmx_image1']"}), "(**paths['step17_gmx_image1'], properties=prop['step17_gmx_image1'])\n", (5146, 5214), False, 'from biobb_analysis.gromacs.gmx_image import gmx_image\n'), ((5313, 5390), 'biobb_analysis.gromacs.gmx_image.gmx_image', 'gmx_image', ([], {'properties': "prop['step18_gmx_image2']"}), "(**paths['step18_gmx_image2'], properties=prop['step18_gmx_image2'])\n", (5322, 5390), False, 'from biobb_analysis.gromacs.gmx_image import gmx_image\n'), ((5491, 5589), 'biobb_analysis.gromacs.gmx_trjconv_str.gmx_trjconv_str', 'gmx_trjconv_str', ([], {'properties': "prop['step19_gmx_trjconv_str']"}), "(**paths['step19_gmx_trjconv_str'], properties=prop[\n 'step19_gmx_trjconv_str'])\n", (5506, 5589), False, 'from biobb_analysis.gromacs.gmx_trjconv_str import gmx_trjconv_str\n'), ((5693, 5771), 'biobb_analysis.gromacs.gmx_energy.gmx_energy', 'gmx_energy', ([], {'properties': "prop['step20_gmx_energy']"}), "(**paths['step20_gmx_energy'], properties=prop['step20_gmx_energy'])\n", (5703, 5771), False, 'from biobb_analysis.gromacs.gmx_energy import gmx_energy\n'), ((5915, 5987), 'biobb_analysis.gromacs.gmx_rgyr.gmx_rgyr', 'gmx_rgyr', ([], {'properties': "prop['step21_gmx_rgyr']"}), "(**paths['step21_gmx_rgyr'], properties=prop['step21_gmx_rgyr'])\n", (5923, 5987), False, 'from biobb_analysis.gromacs.gmx_rgyr import gmx_rgyr\n'), ((6138, 6213), 'biobb_analysis.gromacs.gmx_rms.gmx_rms', 'gmx_rms', ([], {'properties': "prop['step22_rmsd_first']"}), "(**paths['step22_rmsd_first'], properties=prop['step22_rmsd_first'])\n", (6145, 6213), False, 'from biobb_analysis.gromacs.gmx_rms import gmx_rms\n'), ((6358, 6429), 'biobb_analysis.gromacs.gmx_rms.gmx_rms', 'gmx_rms', ([], {'properties': "prop['step23_rmsd_exp']"}), "(**paths['step23_rmsd_exp'], properties=prop['step23_rmsd_exp'])\n", (6365, 6429), False, 'from biobb_analysis.gromacs.gmx_rms import gmx_rms\n'), ((6665, 6676), 'time.time', 'time.time', ([], {}), '()\n', (6674, 6676), False, 'import time\n'), ((6572, 6644), 'biobb_md.gromacs.grompp.grompp', 'grompp', ([], {'properties': "prop['step24_grompp_md']"}), "(**paths['step24_grompp_md'], properties=prop['step24_grompp_md'])\n", (6578, 6644), False, 'from biobb_md.gromacs.grompp import grompp\n')]
|
__author__ = ["<NAME>"]
__description__ = "Text cleaner functions that deal with casing."
__email__ = ["<EMAIL>"]
__status__ = "Prototype"
import re
def clean_cases(text: str) -> str:
"""Makes text all lowercase.
Arguments:
text:
The text to be converted to all lowercase.
Returns:
The lowercase text.
"""
return text.lower()
def kebab_to_snake_case(text: str) -> str:
"""Convert a kebab-cased-text to snake_case.
Arguments:
text:
The text to be converted to snake case. Must be valid kebab case.
Returns:
The text in kebab case form.
"""
return text.replace("-", "_")
def split_camel_cased(text: str) -> str:
"""Split camelCased elements with a space.
Arguments:
text:
The text to be processed.
Returns:
The text with all camelCased elements split into different elements.
"""
return re.sub("(?!^)([A-Z][a-z]+)", r" \1", text)
|
[
"re.sub"
] |
[((942, 984), 're.sub', 're.sub', (['"""(?!^)([A-Z][a-z]+)"""', '""" \\\\1"""', 'text'], {}), "('(?!^)([A-Z][a-z]+)', ' \\\\1', text)\n", (948, 984), False, 'import re\n')]
|
import unittest
from pycozmo.image_encoder import ImageEncoder, str_to_image, ImageDecoder, image_to_str
from pycozmo.util import hex_dump, hex_load
from pycozmo.tests.image_encoder_fixtures import FIXTURES
class TestImageEncoder(unittest.TestCase):
@staticmethod
def _encode(sim: str) -> str:
im = str_to_image(sim)
encoder = ImageEncoder(im)
buf = encoder.encode()
res = hex_dump(buf)
return res
def assertSameImage(self, sim: str, seq: str) -> None:
buffer = hex_load(seq)
decoder = ImageDecoder(buffer)
decoder.decode()
actual = image_to_str(decoder.image)
self.assertEqual(sim.strip(), actual.strip())
def test_blank(self):
fixture = FIXTURES["blank"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_fill_screen(self):
fixture = FIXTURES["fill_screen"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_fill_screen2(self):
fixture = FIXTURES["fill_screen2"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left(self):
fixture = FIXTURES["top_left"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left_5(self):
fixture = FIXTURES["top_left_5"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left_1_8(self):
fixture = FIXTURES["top_left_1_8"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_left_line(self):
fixture = FIXTURES["top_left_line"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_top_line(self):
fixture = FIXTURES["top_line"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_bottom_line(self):
fixture = FIXTURES["bottom_line"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_left_line(self):
fixture = FIXTURES["left_line"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_right_line(self):
fixture = FIXTURES["right_line"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_columns(self):
fixture = FIXTURES["columns"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect(self):
fixture = FIXTURES["rect"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect2(self):
fixture = FIXTURES["rect2"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect3(self):
fixture = FIXTURES["rect3"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_rect4(self):
fixture = FIXTURES["rect4"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_diagonal(self):
fixture = FIXTURES["diagonal"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_diagonal2(self):
fixture = FIXTURES["diagonal2"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_blocks(self):
fixture = FIXTURES["blocks"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_pycozmo(self):
fixture = FIXTURES["pycozmo"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_tl(self):
fixture = FIXTURES["chessboard_tl"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_bl(self):
fixture = FIXTURES["chessboard_bl"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_tr(self):
fixture = FIXTURES["chessboard_tr"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard_br(self):
fixture = FIXTURES["chessboard_br"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_tl(self):
fixture = FIXTURES["chessboard2_tl"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_bl(self):
fixture = FIXTURES["chessboard2_bl"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_tr(self):
fixture = FIXTURES["chessboard2_tr"]
sim = fixture["image"]
expected = fixture["seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
def test_chessboard2_br(self):
fixture = FIXTURES["chessboard2_br"]
sim = fixture["image"]
expected = fixture["alt_seq"]
actual = self._encode(sim)
self.assertEqual(expected, actual)
self.assertSameImage(sim, actual)
|
[
"pycozmo.image_encoder.ImageEncoder",
"pycozmo.image_encoder.ImageDecoder",
"pycozmo.util.hex_dump",
"pycozmo.image_encoder.str_to_image",
"pycozmo.util.hex_load",
"pycozmo.image_encoder.image_to_str"
] |
[((320, 337), 'pycozmo.image_encoder.str_to_image', 'str_to_image', (['sim'], {}), '(sim)\n', (332, 337), False, 'from pycozmo.image_encoder import ImageEncoder, str_to_image, ImageDecoder, image_to_str\n'), ((356, 372), 'pycozmo.image_encoder.ImageEncoder', 'ImageEncoder', (['im'], {}), '(im)\n', (368, 372), False, 'from pycozmo.image_encoder import ImageEncoder, str_to_image, ImageDecoder, image_to_str\n'), ((418, 431), 'pycozmo.util.hex_dump', 'hex_dump', (['buf'], {}), '(buf)\n', (426, 431), False, 'from pycozmo.util import hex_dump, hex_load\n'), ((528, 541), 'pycozmo.util.hex_load', 'hex_load', (['seq'], {}), '(seq)\n', (536, 541), False, 'from pycozmo.util import hex_dump, hex_load\n'), ((560, 580), 'pycozmo.image_encoder.ImageDecoder', 'ImageDecoder', (['buffer'], {}), '(buffer)\n', (572, 580), False, 'from pycozmo.image_encoder import ImageEncoder, str_to_image, ImageDecoder, image_to_str\n'), ((623, 650), 'pycozmo.image_encoder.image_to_str', 'image_to_str', (['decoder.image'], {}), '(decoder.image)\n', (635, 650), False, 'from pycozmo.image_encoder import ImageEncoder, str_to_image, ImageDecoder, image_to_str\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# use all cores
#import os
#os.system("taskset -p 0xff %d" % os.getpid())
pd.options.mode.chained_assignment = None # deactivating slicing warns
def load_seattle_speed_matrix():
""" Loads the whole Seattle `speed_matrix_2015` into memory.
Caution ~ 200 mb of data
:param:
:return df (pandas.DataFrame): speed matrix as DataFrame. Columns are sensors, rows are timestamps
"""
speed_matrix = './data/Seattle_Loop_Dataset/speed_matrix_2015'
print('Loading data...')
df = pd.read_pickle(speed_matrix)
df.index = pd.to_datetime(df.index, format='%Y-%m-%d %H:%M')
print('Load completed.')
return df
def best_moving_average(df, col, average_window_in_hours=27, from_date=None, to_date=None, plot=False):
""" Calculates the moving average in a window of `average_window_in_hours` hours and propagates
into the future.
Beware! This code uses data from the future to perform predictions.
Meaning it is meant to be used to generate the "perfect" moving average baseline.
:param df (pandas.DataFrame): dataset being used
:param col (str): column for which the moving average will be applied
:param average_window_in_hours (int): the window (in hours) used to generate predictions
:param from_date (str): initial date to be shown in the plot, format: "YYYY-MM-DD"
:param to_date (str): end date to be shown in the plot
:param plot (bool): plot moving average and original df
:return MAE, RMSE (tuple): Both metrics are calculated for the column `col`
"""
ndf = df[[col]]
window_size = average_window_in_hours*12
ndf['preds'] = ndf.rolling(window=window_size).mean().shift(1)
MAE = ndf.apply((lambda x: np.abs(x[0] - x[1])), axis=1).dropna().mean()
RMSE = np.sqrt(ndf.apply((lambda x: np.power(x[0] - x[1], 2)), axis=1).dropna().mean())
if plot:
if from_date is not None and to_date is not None:
ndf.resample('1h').mean().loc[from_date:to_date].plot(figsize=(12, 7))
else:
ndf.resample('1h').mean()[:500].plot(figsize=(12, 7))
plt.show()
return (MAE, RMSE)
def calculate_metrics(df, average_window_in_hours, verbose=5, save=True):
""" Calculates MAE and RMSE for all columns of `df`, taking a sliding window of `average_window_in_hours` hours.
:param df (panads.DataFrame): dataset being used
:param average_window_in_hours (int): the window (in hours) used to generate predictions
:param verbose (int): option to display the calculations on-the-fly.
Values are going to be displayed after `verbose` iterations.
:param save (bool):
:return mae_and_rmse (dict): dictionary containing (MAE, RMSE) for each column of `df`
"""
mae_and_rmse = {}
for (it, col) in enumerate(df.columns):
MAE, RMSE = best_moving_average(df, col, average_window_in_hours)
mae_and_rmse[col] = (MAE, RMSE)
if it%verbose == 0:
print('Column: {}, MAE: {}, RMSE: {}'.format(col, MAE, RMSE))
if save:
# TODO: add param to attribute filename and filedir
pd.DataFrame(mae_rmse, index=['MAE', 'RMSE']).to_csv('./experiment_results/seattle_best_moving_average_mae_rmse.csv')
return mae_and_rmse
def real_moving_average(df, col, sliding_window_in_hours, forecast_window_in_minutes):
""" Calculating the moving average using a sliding window of `sliding_window_in_hours`
on a forecast window of `forecast_window_in_minutes` over the dataset.
Returns a dataframe with the forecast for the given dataframe.
"""
sliding_window = 12*sliding_window_in_hours
forecast_window = ((forecast_window_in_minutes+5)//5)
X = df[col].values
Y = X[:sliding_window]
for i in range(forecast_window):
ypred = np.mean(Y[i: i+sliding_window])
Y = np.append(Y, ypred)
forecast_df = pd.DataFrame(
data=Y[len(Y)-forecast_window:],
index=df.index[sliding_window:sliding_window+forecast_window]
)
return forecast_df
# still need to compute MAE and RMSE for all data
def moving_average_forecast(df, col, sliding_window_in_hours, forecast_window_in_minutes):
""" Applies moving average forecast across all the dataset. Stride can be applied to make forecasting faster,
ie, stride makes the sliding window jump a window of `stride_in_minutes`.
Returns a pandas.DataFrame containing a side-by-side comparison of the real dataframe and its predictions,
for all predicted values.
"""
sliding_window = 12*sliding_window_in_hours
forecast_window = ((forecast_window_in_minutes+5)//5)
stride_in_minutes = 60
stride = (stride_in_minutes//5)
all_predictions = []
if stride_in_minutes == 0:
max_it = len(df)
else:
max_it = len(df)//stride
for i in range(max_it):
try:
smaller_df = df.iloc[i*stride: (sliding_window+forecast_window) + (i+1)*stride]
preds = real_moving_average(smaller_df, col, sliding_window_in_hours, forecast_window_in_minutes)
fdf = pd.concat([smaller_df[[col]].loc[preds.index[0]:preds.index[-1]],preds], axis=1)
fdf = fdf.rename(columns={0:col+'_pred'})
all_predictions.append(fdf)
except:
pass
return pd.concat(all_predictions, axis=0)
def metrics(preds_df):
""" Given a `preds_df` containing two columns, the first with real values and the second being preds,
returns MAE and RMSE
"""
preds = preds_df
MAE = np.mean(np.abs(preds[preds.columns[0]] - preds[preds.columns[1]] ))
RMSE = np.sqrt(np.mean(np.power(preds[preds.columns[0]] - preds[preds.columns[1]], 2)))
return (MAE, RMSE)
def main():
# this options should go into an argument parser
SLIDING_WINDOW_IN_HOURS = 4
FORECAST_WINDOW_IN_MINUTES = 15
STRIDE_IN_MINUTES = 60
df = load_seattle_speed_matrix()
metrics_dict = {}
for col in df.columns:
print(col)
preds = moving_average_forecast(df, col, SLIDING_WINDOW_IN_HOURS, FORECAST_WINDOW_IN_MINUTES)
mae_rmse = metrics(preds)
metrics_dict[col] = mae_rmse
pd.DataFrame(metrics_dict, index=['MAE', 'RMSE']).to_csv('./experiment_results/training_window_4_hour_forecast_window_15_min_mae_rmse_seattle.csv')
if __name__ == '__main__':
main()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.power",
"numpy.append",
"numpy.mean",
"pandas.to_datetime",
"pandas.read_pickle",
"pandas.concat"
] |
[((580, 608), 'pandas.read_pickle', 'pd.read_pickle', (['speed_matrix'], {}), '(speed_matrix)\n', (594, 608), True, 'import pandas as pd\n'), ((624, 673), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {'format': '"""%Y-%m-%d %H:%M"""'}), "(df.index, format='%Y-%m-%d %H:%M')\n", (638, 673), True, 'import pandas as pd\n'), ((5405, 5439), 'pandas.concat', 'pd.concat', (['all_predictions'], {'axis': '(0)'}), '(all_predictions, axis=0)\n', (5414, 5439), True, 'import pandas as pd\n'), ((2169, 2179), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2177, 2179), True, 'import matplotlib.pyplot as plt\n'), ((3886, 3918), 'numpy.mean', 'np.mean', (['Y[i:i + sliding_window]'], {}), '(Y[i:i + sliding_window])\n', (3893, 3918), True, 'import numpy as np\n'), ((3930, 3949), 'numpy.append', 'np.append', (['Y', 'ypred'], {}), '(Y, ypred)\n', (3939, 3949), True, 'import numpy as np\n'), ((5644, 5701), 'numpy.abs', 'np.abs', (['(preds[preds.columns[0]] - preds[preds.columns[1]])'], {}), '(preds[preds.columns[0]] - preds[preds.columns[1]])\n', (5650, 5701), True, 'import numpy as np\n'), ((5186, 5271), 'pandas.concat', 'pd.concat', (['[smaller_df[[col]].loc[preds.index[0]:preds.index[-1]], preds]'], {'axis': '(1)'}), '([smaller_df[[col]].loc[preds.index[0]:preds.index[-1]], preds],\n axis=1)\n', (5195, 5271), True, 'import pandas as pd\n'), ((5731, 5793), 'numpy.power', 'np.power', (['(preds[preds.columns[0]] - preds[preds.columns[1]])', '(2)'], {}), '(preds[preds.columns[0]] - preds[preds.columns[1]], 2)\n', (5739, 5793), True, 'import numpy as np\n'), ((6277, 6326), 'pandas.DataFrame', 'pd.DataFrame', (['metrics_dict'], {'index': "['MAE', 'RMSE']"}), "(metrics_dict, index=['MAE', 'RMSE'])\n", (6289, 6326), True, 'import pandas as pd\n'), ((3192, 3237), 'pandas.DataFrame', 'pd.DataFrame', (['mae_rmse'], {'index': "['MAE', 'RMSE']"}), "(mae_rmse, index=['MAE', 'RMSE'])\n", (3204, 3237), True, 'import pandas as pd\n'), ((1789, 1808), 'numpy.abs', 'np.abs', (['(x[0] - x[1])'], {}), '(x[0] - x[1])\n', (1795, 1808), True, 'import numpy as np\n'), ((1875, 1899), 'numpy.power', 'np.power', (['(x[0] - x[1])', '(2)'], {}), '(x[0] - x[1], 2)\n', (1883, 1899), True, 'import numpy as np\n')]
|
from itsdangerous import URLSafeTimedSerializer
from . import app
ts = URLSafeTimedSerializer(app.config['SECRET_KEY'])
|
[
"itsdangerous.URLSafeTimedSerializer"
] |
[((72, 120), 'itsdangerous.URLSafeTimedSerializer', 'URLSafeTimedSerializer', (["app.config['SECRET_KEY']"], {}), "(app.config['SECRET_KEY'])\n", (94, 120), False, 'from itsdangerous import URLSafeTimedSerializer\n')]
|
from django.conf import settings
from django.conf.urls.static import static
from django.urls import path,include
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
from .forms import LoginForm
urlpatterns = [
path('', views.index, name="home"),
path('register', views.register, name='register'),
path('profile', views.profile, name='profile'),
path('timeline/new', views.new_post, name='new_post'),
path('profile/edit', views.edit_profile, name='edit_profile'),
path('<user_name>', views.users, name='user_profile'),
path('post/<int:image_id>', views.image_view, name='image_view'),
path('login/', auth_views.LoginView.as_view(authentication_form=LoginForm), name='login'),
path('logout/', views.logout_view, name='logout'),
path('search/', views.search,name='search'),
# method views
path('follow/<user_name>', views.follow, name='follow'),
path('like/<int:image_id>',views.like,name='like')
]
|
[
"django.contrib.auth.views.LoginView.as_view",
"django.urls.path"
] |
[((267, 301), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""home"""'}), "('', views.index, name='home')\n", (271, 301), False, 'from django.urls import path, include\n'), ((306, 355), 'django.urls.path', 'path', (['"""register"""', 'views.register'], {'name': '"""register"""'}), "('register', views.register, name='register')\n", (310, 355), False, 'from django.urls import path, include\n'), ((360, 406), 'django.urls.path', 'path', (['"""profile"""', 'views.profile'], {'name': '"""profile"""'}), "('profile', views.profile, name='profile')\n", (364, 406), False, 'from django.urls import path, include\n'), ((411, 464), 'django.urls.path', 'path', (['"""timeline/new"""', 'views.new_post'], {'name': '"""new_post"""'}), "('timeline/new', views.new_post, name='new_post')\n", (415, 464), False, 'from django.urls import path, include\n'), ((469, 530), 'django.urls.path', 'path', (['"""profile/edit"""', 'views.edit_profile'], {'name': '"""edit_profile"""'}), "('profile/edit', views.edit_profile, name='edit_profile')\n", (473, 530), False, 'from django.urls import path, include\n'), ((535, 588), 'django.urls.path', 'path', (['"""<user_name>"""', 'views.users'], {'name': '"""user_profile"""'}), "('<user_name>', views.users, name='user_profile')\n", (539, 588), False, 'from django.urls import path, include\n'), ((593, 657), 'django.urls.path', 'path', (['"""post/<int:image_id>"""', 'views.image_view'], {'name': '"""image_view"""'}), "('post/<int:image_id>', views.image_view, name='image_view')\n", (597, 657), False, 'from django.urls import path, include\n'), ((756, 805), 'django.urls.path', 'path', (['"""logout/"""', 'views.logout_view'], {'name': '"""logout"""'}), "('logout/', views.logout_view, name='logout')\n", (760, 805), False, 'from django.urls import path, include\n'), ((810, 854), 'django.urls.path', 'path', (['"""search/"""', 'views.search'], {'name': '"""search"""'}), "('search/', views.search, name='search')\n", (814, 854), False, 'from django.urls import path, include\n'), ((876, 931), 'django.urls.path', 'path', (['"""follow/<user_name>"""', 'views.follow'], {'name': '"""follow"""'}), "('follow/<user_name>', views.follow, name='follow')\n", (880, 931), False, 'from django.urls import path, include\n'), ((936, 988), 'django.urls.path', 'path', (['"""like/<int:image_id>"""', 'views.like'], {'name': '"""like"""'}), "('like/<int:image_id>', views.like, name='like')\n", (940, 988), False, 'from django.urls import path, include\n'), ((677, 736), 'django.contrib.auth.views.LoginView.as_view', 'auth_views.LoginView.as_view', ([], {'authentication_form': 'LoginForm'}), '(authentication_form=LoginForm)\n', (705, 736), True, 'from django.contrib.auth import views as auth_views\n')]
|
""" This module contains a class that describes an object in the world. """
import numpy as np
class Object:
"""
Object is a simple wireframe composed of multiple points connected by
lines that can be drawn in the viewport.
"""
TOTAL_OBJECTS = -1
def __init__(self, points=None, name=None, color=None):
self._points = [] if points is None else points
self._name = self.default_name() if name is None else name
self._color = (0, 0, 0) if color is None else color
Object.TOTAL_OBJECTS += 1
@staticmethod
def default_name():
""" Default name for new objects. """
return "object{}".format(Object.TOTAL_OBJECTS + 1)
@property
def points(self):
""" The points in the wireframe. """
return self._points
@property
def name(self):
""" Name of the object. """
return self._name
@property
def color(self):
""" Color of the object. """
return self._color
@property
def center(self):
""" Center of the object. """
points = set()
for face in self._points:
points.update(face)
x_points = [point[0] for point in points]
y_points = [point[1] for point in points]
z_points = [point[2] for point in points]
return \
(np.average(x_points), np.average(y_points), np.average(z_points))
def _transform(self, matrix, center=None, offset=None):
center = self.center if center is None else center
# move object to center
operation_matrix = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[-center[0], -center[1], -center[2], 1],
])
# perform operation
operation_matrix = operation_matrix.dot([
matrix[0] + [0],
matrix[1] + [0],
matrix[2] + [0],
([0, 0, 0] if offset is None else offset) + [1],
])
# move object back to original position
operation_matrix = operation_matrix.dot([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[center[0], center[1], center[2], 1],
])
for fpos, face in enumerate(self._points):
for ppos, point in enumerate(face):
new_point = np.dot(point + (1,), operation_matrix)
self._points[fpos][ppos] = tuple(new_point[:3])
def move(self, offset):
""" Moves the object by an offset = (x, y). """
self._transform(
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
], center=None, offset=list(offset))
def zoom(self, factor):
""" Zooms in the object by 'factor' times. """
self._transform(
[
[factor, 0, 0],
[0, factor, 0],
[0, 0, factor],
])
@staticmethod
def generate_rotation_matrix(x_angle, y_angle, z_angle):
""" Generates the matrix that rotates points. """
return np.array([
[1, 0, 0],
[0, np.cos(x_angle), -np.sin(x_angle)],
[0, np.sin(x_angle), np.cos(x_angle)],
]).dot([
[np.cos(y_angle), 0, np.sin(y_angle)],
[0, 1, 0],
[-np.sin(y_angle), 0, np.cos(y_angle)],
]).dot([
[np.cos(z_angle), -np.sin(z_angle), 0],
[np.sin(z_angle), np.cos(z_angle), 0],
[0, 0, 1],
]).tolist()
def rotate(self, x_angle, y_angle, z_angle, center=None):
""" Rotates the object around center, the angle is in radians. """
self._transform(
Object.generate_rotation_matrix(x_angle, y_angle, z_angle),
center)
def project(self):
""" Projects the 3D objects to 2D. Using perspective projection. """
def _project(point):
return (
point[0]/(point[2]/Window.COP_DISTANCE+1),
point[1]/(point[2]/Window.COP_DISTANCE+1))
self._points = [list(map(_project, face)) for face in self._points]
def clip(self, window):
""" Weiler-Atherton polygon clipping algorithm. """
def connect_points(clipped, side1, side2, window):
""" Connects points of the window. """
edge = side1
while edge != side2:
clipped.append(window.points[0][edge])
edge = (edge - 1) % 4
boundaries = window.real_boundaries
clipped = []
for face in self._points:
new_face = []
entered, exited = None, None
for i in range(len(face) - 1):
points, side = Object._clip_line(
face[i], face[i + 1], *boundaries[0], *boundaries[1])
if not points: # clipped line is outside window
continue
if side[0] is not None: # entered
if exited is not None:
connect_points(new_face, exited, side[0], window)
else:
entered = side[0]
if side[1] is not None: # exited
exited = side[1]
new_face.append(points[0])
new_face.append(points[1])
else:
new_face.append(points[0])
if new_face and face[0] == face[-1]:
if entered is not None:
connect_points(new_face, exited, entered, window)
new_face.append(new_face[0])
clipped.append(new_face)
self._points = clipped
@staticmethod
def _clip_line(point1, point2, xmin, ymin, xmax, ymax):
""" Liang-Barsky line clipping algorithm. """
deltax, deltay = point2[0] - point1[0], point2[1] - point1[1]
deltas = [-deltax, -deltay, deltax, deltay] # p
distances = [ # q
point1[0] - xmin, point1[1] - ymin,
xmax - point1[0], ymax - point1[1]]
ratios = np.divide(distances, deltas) # r
pct1, pct2 = 0, 1 # how much of the line is inside the window
side = [None, None]
for i in range(4):
if deltas[i] == 0 and distances[i] < 0:
return (), side
if deltas[i] < 0:
if ratios[i] > pct1: # entered
side[0] = i
pct1 = ratios[i]
if deltas[i] > 0:
if ratios[i] < pct2: # exited
side[1] = i
pct2 = ratios[i]
if pct1 > pct2:
return (), side
clipped = (
tuple(np.add((point1[0], point1[1]), (pct1*deltax, pct1*deltay))),
tuple(np.add((point1[0], point1[1]), (pct2*deltax, pct2*deltay))),
)
return clipped, side
@staticmethod
def build_from_file(path):
""" Returns objects described in an OBJ file. """
with open(path) as obj:
raw_file = obj.read()
file_lines = [line.split(" ") for line in raw_file.split("\n")]
vertices = {}
faces = []
for number, line in enumerate(file_lines):
if line[0] == "v":
vertices[number + 1] = tuple(map(float, line[1:]))
if line[0] == "f":
face = []
for index in line[1:]:
face.append(vertices[int(index)])
face.append(vertices[int(line[1])])
faces.append(face)
return Object(points=faces)
class Window(Object):
"""
The window object.
This object delimits what should be drawn in the viewport. Moving and
rescaling it has the effect to change which portion of the world is
drawn at the viewport.
"""
BORDER = 0.05
def __init__(self, width, height):
points = [
(-width/2, height/2, 0),
(-width/2, -height/2, 0),
(width/2, -height/2, 0),
(width/2, height/2, 0),
]
points.append(points[0])
super().__init__([points], "window", (0, 0, 0))
self._rotation_matrix = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
@property
def expanded_boundaries(self):
""" Boundaries a little bigger than the actual window. """
width = self._points[0][3][0] - self._points[0][1][0]
height = self._points[0][3][1] - self._points[0][1][1]
factor = np.multiply((width, height), Window.BORDER)
return (
np.subtract(self._points[0][1], factor),
np.add(self._points[0][3], factor))
@property
def real_boundaries(self):
""" Returns windows' bottom left and upper right coordinates. """
return (self._points[0][1], self._points[0][3])
@property
def inv_rotation_matrix(self):
""" This matrix rotates the window back to its original position. """
return np.linalg.inv(self._rotation_matrix).tolist()
def move(self, offset):
# rotate offset vector to move window relative to its own directions
offset = np.dot(offset, self._rotation_matrix)
super().move(offset)
def zoom(self, factor):
# save original state
original_points = self._points.copy()
# apply the zoom operation
super().zoom(factor**(-1))
# find new window size
minimum, maximum = self.real_boundaries
width = np.abs(maximum[0] - minimum[0])
height = np.abs(maximum[1] - minimum[1])
# if zoom was exceeded, go back to original state and raise an error
if width < 10 and height < 10:
self._points = original_points
raise RuntimeError("Maximum zoom in exceeded")
def rotate(self, x_angle, y_angle, z_angle, center=None):
# find M = R^-1 * T * R
# R is the rotation matrix, it saves the rotation state of the window
# T is the matrix of the rotation that is being applied
matrix = Object.generate_rotation_matrix(x_angle, y_angle, z_angle)
matrix = np.dot(self.inv_rotation_matrix, matrix)
matrix = np.dot(matrix, self._rotation_matrix)
self._transform(matrix.tolist())
# update rotation matrix
self._rotation_matrix = np.dot(self._rotation_matrix, matrix)
def clip(self, _):
pass
class Curve(Object):
""" A Bezier curve with four control points. """
def __init__(self, points, name=None, color=None):
curve = Curve._generate_curve(points)
curve.append(curve[-1]) # add stub point for clipping
super().__init__(
points=[curve], name=name, color=color)
@staticmethod
def _generate_curve(points):
def f(t, i):
return np.array([t**3, t**2, t, 1]).dot(np.array([
[-1, 3, -3, 1],
[3, -6, 3, 0],
[-3, 3, 0, 0],
[1, 0, 0, 0],
])).dot(np.array([p[i] for p in points]))
step = 0.02
x_points = [f(t, 0) for t in np.arange(0, 1+step, step)]
y_points = [f(t, 1) for t in np.arange(0, 1+step, step)]
z_points = [f(t, 2) for t in np.arange(0, 1+step, step)]
return list(zip(x_points, y_points, z_points))
class Spline(Object):
""" A Spline curve with arbitrary amount of control points. """
def __init__(self, points, name=None, color=None):
curves = []
for i in range(len(points) - 3):
# build a curve for every four control points
curve = Spline._generate_curve(points[i:i+4])
curve.append(curve[-1]) # add stub point for clipping
curves.append(curve)
super().__init__(
points=curves, name=name, color=color)
@staticmethod
def _generate_curve(points):
coef = np.multiply(1/6, np.array([
[-1, 3, -3, 1],
[3, -6, 3, 0],
[-3, 0, 3, 0],
[1, 4, 1, 0],
])).dot(np.array(points))
number_of_points = 50
delta = 1/number_of_points
deltas = np.array([
[0, 0, 0, 1],
[delta**3, delta**2, delta, 0],
[6*delta**3, 2*delta**2, 0, 0],
[6*delta**3, 0, 0, 0],
]).dot(coef)
points = [tuple(deltas[0])]
for _ in range(number_of_points):
# update coordinates using forward differences
deltas[0] += deltas[1]
deltas[1] += deltas[2]
deltas[2] += deltas[3]
points.append(tuple(deltas[0]))
return points
|
[
"numpy.divide",
"numpy.multiply",
"numpy.abs",
"numpy.average",
"numpy.subtract",
"numpy.sin",
"numpy.array",
"numpy.linalg.inv",
"numpy.arange",
"numpy.cos",
"numpy.dot",
"numpy.add"
] |
[((1601, 1698), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [-center[0], -center[1], -center\n [2], 1]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [-center[0], -center[1],\n -center[2], 1]])\n', (1609, 1698), True, 'import numpy as np\n'), ((6089, 6117), 'numpy.divide', 'np.divide', (['distances', 'deltas'], {}), '(distances, deltas)\n', (6098, 6117), True, 'import numpy as np\n'), ((8215, 8258), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (8223, 8258), True, 'import numpy as np\n'), ((8555, 8598), 'numpy.multiply', 'np.multiply', (['(width, height)', 'Window.BORDER'], {}), '((width, height), Window.BORDER)\n', (8566, 8598), True, 'import numpy as np\n'), ((9205, 9242), 'numpy.dot', 'np.dot', (['offset', 'self._rotation_matrix'], {}), '(offset, self._rotation_matrix)\n', (9211, 9242), True, 'import numpy as np\n'), ((9544, 9575), 'numpy.abs', 'np.abs', (['(maximum[0] - minimum[0])'], {}), '(maximum[0] - minimum[0])\n', (9550, 9575), True, 'import numpy as np\n'), ((9593, 9624), 'numpy.abs', 'np.abs', (['(maximum[1] - minimum[1])'], {}), '(maximum[1] - minimum[1])\n', (9599, 9624), True, 'import numpy as np\n'), ((10174, 10214), 'numpy.dot', 'np.dot', (['self.inv_rotation_matrix', 'matrix'], {}), '(self.inv_rotation_matrix, matrix)\n', (10180, 10214), True, 'import numpy as np\n'), ((10232, 10269), 'numpy.dot', 'np.dot', (['matrix', 'self._rotation_matrix'], {}), '(matrix, self._rotation_matrix)\n', (10238, 10269), True, 'import numpy as np\n'), ((10376, 10413), 'numpy.dot', 'np.dot', (['self._rotation_matrix', 'matrix'], {}), '(self._rotation_matrix, matrix)\n', (10382, 10413), True, 'import numpy as np\n'), ((1355, 1375), 'numpy.average', 'np.average', (['x_points'], {}), '(x_points)\n', (1365, 1375), True, 'import numpy as np\n'), ((1377, 1397), 'numpy.average', 'np.average', (['y_points'], {}), '(y_points)\n', (1387, 1397), True, 'import numpy as np\n'), ((1399, 1419), 'numpy.average', 'np.average', (['z_points'], {}), '(z_points)\n', (1409, 1419), True, 'import numpy as np\n'), ((8628, 8667), 'numpy.subtract', 'np.subtract', (['self._points[0][1]', 'factor'], {}), '(self._points[0][1], factor)\n', (8639, 8667), True, 'import numpy as np\n'), ((8681, 8715), 'numpy.add', 'np.add', (['self._points[0][3]', 'factor'], {}), '(self._points[0][3], factor)\n', (8687, 8715), True, 'import numpy as np\n'), ((12077, 12093), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (12085, 12093), True, 'import numpy as np\n'), ((2358, 2396), 'numpy.dot', 'np.dot', (['(point + (1,))', 'operation_matrix'], {}), '(point + (1,), operation_matrix)\n', (2364, 2396), True, 'import numpy as np\n'), ((6716, 6778), 'numpy.add', 'np.add', (['(point1[0], point1[1])', '(pct1 * deltax, pct1 * deltay)'], {}), '((point1[0], point1[1]), (pct1 * deltax, pct1 * deltay))\n', (6722, 6778), True, 'import numpy as np\n'), ((6795, 6857), 'numpy.add', 'np.add', (['(point1[0], point1[1])', '(pct2 * deltax, pct2 * deltay)'], {}), '((point1[0], point1[1]), (pct2 * deltax, pct2 * deltay))\n', (6801, 6857), True, 'import numpy as np\n'), ((9036, 9072), 'numpy.linalg.inv', 'np.linalg.inv', (['self._rotation_matrix'], {}), '(self._rotation_matrix)\n', (9049, 9072), True, 'import numpy as np\n'), ((11050, 11082), 'numpy.array', 'np.array', (['[p[i] for p in points]'], {}), '([p[i] for p in points])\n', (11058, 11082), True, 'import numpy as np\n'), ((11142, 11170), 'numpy.arange', 'np.arange', (['(0)', '(1 + step)', 'step'], {}), '(0, 1 + step, step)\n', (11151, 11170), True, 'import numpy as np\n'), ((11207, 11235), 'numpy.arange', 'np.arange', (['(0)', '(1 + step)', 'step'], {}), '(0, 1 + step, step)\n', (11216, 11235), True, 'import numpy as np\n'), ((11272, 11300), 'numpy.arange', 'np.arange', (['(0)', '(1 + step)', 'step'], {}), '(0, 1 + step, step)\n', (11281, 11300), True, 'import numpy as np\n'), ((12178, 12309), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [delta ** 3, delta ** 2, delta, 0], [6 * delta ** 3, 2 * \n delta ** 2, 0, 0], [6 * delta ** 3, 0, 0, 0]]'], {}), '([[0, 0, 0, 1], [delta ** 3, delta ** 2, delta, 0], [6 * delta ** 3,\n 2 * delta ** 2, 0, 0], [6 * delta ** 3, 0, 0, 0]])\n', (12186, 12309), True, 'import numpy as np\n'), ((11942, 12012), 'numpy.array', 'np.array', (['[[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 0, 3, 0], [1, 4, 1, 0]]'], {}), '([[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 0, 3, 0], [1, 4, 1, 0]])\n', (11950, 12012), True, 'import numpy as np\n'), ((10895, 10965), 'numpy.array', 'np.array', (['[[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 3, 0, 0], [1, 0, 0, 0]]'], {}), '([[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 3, 0, 0], [1, 0, 0, 0]])\n', (10903, 10965), True, 'import numpy as np\n'), ((3412, 3427), 'numpy.cos', 'np.cos', (['z_angle'], {}), '(z_angle)\n', (3418, 3427), True, 'import numpy as np\n'), ((3464, 3479), 'numpy.sin', 'np.sin', (['z_angle'], {}), '(z_angle)\n', (3470, 3479), True, 'import numpy as np\n'), ((3481, 3496), 'numpy.cos', 'np.cos', (['z_angle'], {}), '(z_angle)\n', (3487, 3496), True, 'import numpy as np\n'), ((10862, 10894), 'numpy.array', 'np.array', (['[t ** 3, t ** 2, t, 1]'], {}), '([t ** 3, t ** 2, t, 1])\n', (10870, 10894), True, 'import numpy as np\n'), ((3430, 3445), 'numpy.sin', 'np.sin', (['z_angle'], {}), '(z_angle)\n', (3436, 3445), True, 'import numpy as np\n'), ((3269, 3284), 'numpy.cos', 'np.cos', (['y_angle'], {}), '(y_angle)\n', (3275, 3284), True, 'import numpy as np\n'), ((3289, 3304), 'numpy.sin', 'np.sin', (['y_angle'], {}), '(y_angle)\n', (3295, 3304), True, 'import numpy as np\n'), ((3364, 3379), 'numpy.cos', 'np.cos', (['y_angle'], {}), '(y_angle)\n', (3370, 3379), True, 'import numpy as np\n'), ((3344, 3359), 'numpy.sin', 'np.sin', (['y_angle'], {}), '(y_angle)\n', (3350, 3359), True, 'import numpy as np\n'), ((3152, 3167), 'numpy.cos', 'np.cos', (['x_angle'], {}), '(x_angle)\n', (3158, 3167), True, 'import numpy as np\n'), ((3204, 3219), 'numpy.sin', 'np.sin', (['x_angle'], {}), '(x_angle)\n', (3210, 3219), True, 'import numpy as np\n'), ((3221, 3236), 'numpy.cos', 'np.cos', (['x_angle'], {}), '(x_angle)\n', (3227, 3236), True, 'import numpy as np\n'), ((3170, 3185), 'numpy.sin', 'np.sin', (['x_angle'], {}), '(x_angle)\n', (3176, 3185), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3.5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
class Server:
def __init__(self, *args, loop=None, **kwargs):
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._args = args
self._kwargs = kwargs
async def __aenter__(self):
self._server = await self._loop.create_server(
*self._args,
**self._kwargs,
)
return self._server
async def __aexit__(self, exc_type, exc, tb):
if self._server.sockets is not None:
self._server.close()
await self._server.wait_closed()
|
[
"asyncio.get_event_loop"
] |
[((701, 725), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (723, 725), False, 'import asyncio\n')]
|
"""Utils functions."""
from copy import deepcopy
import mne
import numpy as np
from ._logs import logger
# TODO: Add test for this. Also compare speed with latest version of numpy.
# Also compared speed with a numba implementation.
def _corr_vectors(A, B, axis=0):
# based on:
# https://github.com/wmvanvliet/mne_microstates/blob/master/microstates.py
# written by <NAME> <<EMAIL>>
"""Compute pairwise correlation of multiple pairs of vectors.
Fast way to compute correlation of multiple pairs of vectors without
computing all pairs as would with corr(A,B). Borrowed from Oli at
StackOverflow. Note the resulting coefficients vary slightly from the ones
obtained from corr due to differences in the order of the calculations.
(Differences are of a magnitude of 1e-9 to 1e-17 depending on the tested
data).
Parameters
----------
A : ndarray, shape (n, m)
The first collection of vectors
B : ndarray, shape (n, m)
The second collection of vectors
axis : int
The axis that contains the elements of each vector. Defaults to 0.
Returns
-------
corr : ndarray, shape (m, )
For each pair of vectors, the correlation between them.
"""
if A.shape != B.shape:
raise ValueError("All input arrays must have the same shape")
# If maps is null, divide will not trhow an error.
np.seterr(divide="ignore", invalid="ignore")
An = A - np.mean(A, axis=axis)
Bn = B - np.mean(B, axis=axis)
An /= np.linalg.norm(An, axis=axis)
Bn /= np.linalg.norm(Bn, axis=axis)
corr = np.sum(An * Bn, axis=axis)
corr = np.nan_to_num(corr, posinf=0, neginf=0)
np.seterr(divide="warn", invalid="warn")
return corr
# TODO: To be removed when ChInfo is implemented.
def _copy_info(inst, sfreq):
ch_names = inst.info["ch_names"]
ch_types = [
mne.channel_type(inst.info, idx)
for idx in range(0, inst.info["nchan"])
]
new_info = mne.create_info(ch_names, sfreq=sfreq, ch_types=ch_types)
if inst.get_montage():
montage = inst.get_montage()
new_info.set_montage(montage)
return new_info
def _compare_infos(cluster_info, inst_info):
"""Check that channels in cluster_info are all present in inst_info."""
for ch in cluster_info["ch_names"]:
if ch not in inst_info["ch_names"]:
raise ValueError(
"Instance to segment into microstates sequence does not have "
"the same channels as the instance used for fitting."
)
# Extract loc arrays
cluster_loc = list()
for ch in cluster_info["chs"]:
cluster_loc.append((ch["ch_name"], deepcopy(ch["loc"])))
inst_loc = list()
for ch in inst_info["chs"]:
if ch["ch_name"] in cluster_info["ch_names"]:
inst_loc.append((ch["ch_name"], deepcopy(ch["loc"])))
cluster_loc = [loc[1] for loc in sorted(cluster_loc, key=lambda x: x[0])]
inst_loc = [loc[1] for loc in sorted(inst_loc, key=lambda x: x[0])]
# Compare loc
assert len(cluster_loc) == len(inst_loc) # sanity-check
for l1, l2 in zip(cluster_loc, inst_loc):
if not np.allclose(l1, l2, equal_nan=True):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same channels montage as the instance used for fitting. "
)
break
# Compare attributes in chs
cluster_kinds = list()
cluster_units = list()
cluster_coord_frame = list()
for ch in cluster_info["chs"]:
cluster_kinds.append((ch["ch_name"], ch["kind"]))
cluster_units.append((ch["ch_name"], ch["unit"]))
cluster_coord_frame.append((ch["ch_name"], ch["coord_frame"]))
inst_kinds = list()
inst_units = list()
inst_coord_frames = list()
for ch in inst_info["chs"]:
if ch["ch_name"] in cluster_info["ch_names"]:
inst_kinds.append((ch["ch_name"], ch["kind"]))
inst_units.append((ch["ch_name"], ch["unit"]))
inst_coord_frames.append((ch["ch_name"], ch["coord_frame"]))
cluster_kinds = [
elt[1] for elt in sorted(cluster_kinds, key=lambda x: x[0])
]
cluster_units = [
elt[1] for elt in sorted(cluster_units, key=lambda x: x[0])
]
cluster_coord_frame = [
elt[1] for elt in sorted(cluster_coord_frame, key=lambda x: x[0])
]
inst_kinds = [elt[1] for elt in sorted(inst_kinds, key=lambda x: x[0])]
inst_units = [elt[1] for elt in sorted(inst_units, key=lambda x: x[0])]
inst_coord_frames = [
elt[1] for elt in sorted(inst_coord_frames, key=lambda x: x[0])
]
if not all(
kind1 == kind2 for kind1, kind2 in zip(cluster_kinds, inst_kinds)
):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same channels kinds as the instance used for fitting. "
)
if not all(
unit1 == unit2 for unit1, unit2 in zip(cluster_units, inst_units)
):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same channels units as the instance used for fitting. "
)
if not all(
f1 == f2 for f1, f2 in zip(cluster_coord_frame, inst_coord_frames)
):
logger.warning(
"Instance to segment into microstates sequence does not have "
"the same coordinate frames as the instance used for fitting. "
)
|
[
"copy.deepcopy",
"numpy.sum",
"numpy.nan_to_num",
"numpy.seterr",
"numpy.allclose",
"mne.channel_type",
"mne.create_info",
"numpy.mean",
"numpy.linalg.norm"
] |
[((1399, 1443), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1408, 1443), True, 'import numpy as np\n'), ((1524, 1553), 'numpy.linalg.norm', 'np.linalg.norm', (['An'], {'axis': 'axis'}), '(An, axis=axis)\n', (1538, 1553), True, 'import numpy as np\n'), ((1564, 1593), 'numpy.linalg.norm', 'np.linalg.norm', (['Bn'], {'axis': 'axis'}), '(Bn, axis=axis)\n', (1578, 1593), True, 'import numpy as np\n'), ((1605, 1631), 'numpy.sum', 'np.sum', (['(An * Bn)'], {'axis': 'axis'}), '(An * Bn, axis=axis)\n', (1611, 1631), True, 'import numpy as np\n'), ((1643, 1682), 'numpy.nan_to_num', 'np.nan_to_num', (['corr'], {'posinf': '(0)', 'neginf': '(0)'}), '(corr, posinf=0, neginf=0)\n', (1656, 1682), True, 'import numpy as np\n'), ((1687, 1727), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""warn"""', 'invalid': '"""warn"""'}), "(divide='warn', invalid='warn')\n", (1696, 1727), True, 'import numpy as np\n'), ((1989, 2046), 'mne.create_info', 'mne.create_info', (['ch_names'], {'sfreq': 'sfreq', 'ch_types': 'ch_types'}), '(ch_names, sfreq=sfreq, ch_types=ch_types)\n', (2004, 2046), False, 'import mne\n'), ((1457, 1478), 'numpy.mean', 'np.mean', (['A'], {'axis': 'axis'}), '(A, axis=axis)\n', (1464, 1478), True, 'import numpy as np\n'), ((1492, 1513), 'numpy.mean', 'np.mean', (['B'], {'axis': 'axis'}), '(B, axis=axis)\n', (1499, 1513), True, 'import numpy as np\n'), ((1887, 1919), 'mne.channel_type', 'mne.channel_type', (['inst.info', 'idx'], {}), '(inst.info, idx)\n', (1903, 1919), False, 'import mne\n'), ((3185, 3220), 'numpy.allclose', 'np.allclose', (['l1', 'l2'], {'equal_nan': '(True)'}), '(l1, l2, equal_nan=True)\n', (3196, 3220), True, 'import numpy as np\n'), ((2698, 2717), 'copy.deepcopy', 'deepcopy', (["ch['loc']"], {}), "(ch['loc'])\n", (2706, 2717), False, 'from copy import deepcopy\n'), ((2872, 2891), 'copy.deepcopy', 'deepcopy', (["ch['loc']"], {}), "(ch['loc'])\n", (2880, 2891), False, 'from copy import deepcopy\n')]
|
from django.db import models
from django.utils import timezone
STATE_CHOICES = [
("Good", "Good"),
("Needs repair", "Needs repair"),
("In repair", "In repair"),
]
class Equipment(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Item(models.Model):
kind = models.ForeignKey("equipment.Equipment", on_delete=models.CASCADE, related_name="+")
person = models.ForeignKey("users.Person", on_delete=models.CASCADE, related_name="+")
code = models.CharField(max_length=500)
brand = models.CharField(max_length=200)
specifications = models.TextField()
series_number = models.CharField(max_length=200)
state = models.CharField(choices=STATE_CHOICES, default="Good", max_length=100)
registered_date = models.DateTimeField(default=timezone.now)
return_date = models.DateTimeField()
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.DateTimeField",
"django.db.models.ForeignKey"
] |
[((220, 252), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (236, 252), False, 'from django.db import models\n'), ((341, 429), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""equipment.Equipment"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""+"""'}), "('equipment.Equipment', on_delete=models.CASCADE,\n related_name='+')\n", (358, 429), False, 'from django.db import models\n'), ((439, 516), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""users.Person"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""+"""'}), "('users.Person', on_delete=models.CASCADE, related_name='+')\n", (456, 516), False, 'from django.db import models\n'), ((528, 560), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (544, 560), False, 'from django.db import models\n'), ((573, 605), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (589, 605), False, 'from django.db import models\n'), ((627, 645), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (643, 645), False, 'from django.db import models\n'), ((666, 698), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (682, 698), False, 'from django.db import models\n'), ((711, 782), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'STATE_CHOICES', 'default': '"""Good"""', 'max_length': '(100)'}), "(choices=STATE_CHOICES, default='Good', max_length=100)\n", (727, 782), False, 'from django.db import models\n'), ((805, 847), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (825, 847), False, 'from django.db import models\n'), ((866, 888), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (886, 888), False, 'from django.db import models\n')]
|
import numpy as numpy
a = numpy.arange(150)
# a[0::2] *= numpy.sqrt(2)/2.0 * (numpy.cos(2) - numpy.sin(2))
a[0::2] *= 2
print(a)
|
[
"numpy.arange"
] |
[((26, 43), 'numpy.arange', 'numpy.arange', (['(150)'], {}), '(150)\n', (38, 43), True, 'import numpy as numpy\n')]
|
import shodan
import requests
SHODAN_API_KEY = ""
api = shodan.Shodan(SHODAN_API_KEY)
domain = 'www.python.org'
dnsResolve = 'https://api.shodan.io/dns/resolve?hostnames=' + domain + '&key=' + SHODAN_API_KEY
try:
resolved = requests.get(dnsResolve)
hostIP = resolved.json()[domain]
host = api.host(hostIP)
print("IP: %s" % host['ip_str'])
print("Organization: %s" % host.get('org', 'n/a'))
print("Operating System: %s" % host.get('os', 'n/a'))
for item in host['data']:
print("Port: %s" % item['port'])
print("Banner: %s" % item['data'])
except shodan.APIError as exception:
print('Error: %s' % exception)
|
[
"requests.get",
"shodan.Shodan"
] |
[((58, 87), 'shodan.Shodan', 'shodan.Shodan', (['SHODAN_API_KEY'], {}), '(SHODAN_API_KEY)\n', (71, 87), False, 'import shodan\n'), ((234, 258), 'requests.get', 'requests.get', (['dnsResolve'], {}), '(dnsResolve)\n', (246, 258), False, 'import requests\n')]
|
from actors.actions.hit_and_run_action import HitAndRunAction
from actors.actions.input_driven_action import InputDrivenAction
from actors.actions.shoot_at_action import ShootAtAction
from actors.actor_target import ActorTarget
from actors.components.components import Components
from actors.components.health import Health
from actors.components.inventory import Inventory
from actors.interactions.null_interaction import NullInteraction
from actors.projectile import Projectile
from actors.actions.move_action import MoveAction
from actors.actions.use_action import UseAction
from input.keyboard_input import KeyboardInput
from items.gun import Gun
from views.actor_camera import ActorCamera
from views.json_environment import JsonEnvironment
from views.point_camera import PointCamera
from views.pyxel.pyxel_renderer import PyxelRenderer
from views.pyxel.shaders.color_mapped_shader import ColorMappedShader
from views.pyxel.shaders.flicker_shader import FlickerShader
from views.pyxel.pyxel_area_view import PyxelAreaView
from views.pyxel.shaders.perlin_noise_shader import PerlinNoiseShader
from world.area_builder import AreaBuilder
from actors.actor import Actor
from world.rendered_area import RenderedArea
from utilities.countdown import Countdown
import threading
player_key = 'p'
input_action = InputDrivenAction({
'w': MoveAction(0, -1), 's': MoveAction(0, 1), 'a': MoveAction(-1, 0), 'd': MoveAction(1, 0),
'i': UseAction(0, -1), 'k': UseAction(0, 1), 'j': UseAction(-1, 0), 'l': UseAction(1, 0)
}, KeyboardInput())
gun = Gun(lambda aim_dir: Projectile(aim_dir, "*"))
inventory = Inventory(frozenset([gun]))
player_target = ActorTarget(player_key)
cowboy_components = Components(frozenset([inventory, Health(99, 99)]))
shoot_at_action = ShootAtAction(player_target, UseAction())
hit_and_run_action = HitAndRunAction(player_target, shoot_at_action, MoveAction(), 3, Countdown(4, 0))
bandit = Actor(hit_and_run_action, NullInteraction(), "b", cowboy_components)
player = Actor(input_action, NullInteraction(), player_key, cowboy_components)
mapped_shader = ColorMappedShader(JsonEnvironment('config/pyxel_environment.json'))
pyxel_view = PyxelAreaView(PyxelRenderer(range(8)), PerlinNoiseShader(), FlickerShader(mapped_shader, 4), mapped_shader)
camera = ActorCamera(player_key, PointCamera(0, 0, 6, pyxel_view))
# TODO: Action that waits for an actor to enter within a certain distance? Make enemies idle about!
area = RenderedArea(AreaBuilder().rectangle(11, 11)
.with_actor(player, 0, 0)
.with_open_space(11, 5)
.to_area(), camera)
def update_loop(a):
while True:
a = a.update()
thread = threading.Thread(target=lambda: update_loop(area))
thread.start()
pyxel_view.run(128, 128)
|
[
"views.pyxel.shaders.perlin_noise_shader.PerlinNoiseShader",
"utilities.countdown.Countdown",
"actors.actions.use_action.UseAction",
"actors.actions.move_action.MoveAction",
"world.area_builder.AreaBuilder",
"views.json_environment.JsonEnvironment",
"actors.actor_target.ActorTarget",
"views.pyxel.shaders.flicker_shader.FlickerShader",
"input.keyboard_input.KeyboardInput",
"views.point_camera.PointCamera",
"actors.projectile.Projectile",
"actors.components.health.Health",
"actors.interactions.null_interaction.NullInteraction"
] |
[((1647, 1670), 'actors.actor_target.ActorTarget', 'ActorTarget', (['player_key'], {}), '(player_key)\n', (1658, 1670), False, 'from actors.actor_target import ActorTarget\n'), ((1522, 1537), 'input.keyboard_input.KeyboardInput', 'KeyboardInput', ([], {}), '()\n', (1535, 1537), False, 'from input.keyboard_input import KeyboardInput\n'), ((1789, 1800), 'actors.actions.use_action.UseAction', 'UseAction', ([], {}), '()\n', (1798, 1800), False, 'from actors.actions.use_action import UseAction\n'), ((1871, 1883), 'actors.actions.move_action.MoveAction', 'MoveAction', ([], {}), '()\n', (1881, 1883), False, 'from actors.actions.move_action import MoveAction\n'), ((1888, 1903), 'utilities.countdown.Countdown', 'Countdown', (['(4)', '(0)'], {}), '(4, 0)\n', (1897, 1903), False, 'from utilities.countdown import Countdown\n'), ((1940, 1957), 'actors.interactions.null_interaction.NullInteraction', 'NullInteraction', ([], {}), '()\n', (1955, 1957), False, 'from actors.interactions.null_interaction import NullInteraction\n'), ((2012, 2029), 'actors.interactions.null_interaction.NullInteraction', 'NullInteraction', ([], {}), '()\n', (2027, 2029), False, 'from actors.interactions.null_interaction import NullInteraction\n'), ((2096, 2144), 'views.json_environment.JsonEnvironment', 'JsonEnvironment', (['"""config/pyxel_environment.json"""'], {}), "('config/pyxel_environment.json')\n", (2111, 2144), False, 'from views.json_environment import JsonEnvironment\n'), ((2198, 2217), 'views.pyxel.shaders.perlin_noise_shader.PerlinNoiseShader', 'PerlinNoiseShader', ([], {}), '()\n', (2215, 2217), False, 'from views.pyxel.shaders.perlin_noise_shader import PerlinNoiseShader\n'), ((2219, 2250), 'views.pyxel.shaders.flicker_shader.FlickerShader', 'FlickerShader', (['mapped_shader', '(4)'], {}), '(mapped_shader, 4)\n', (2232, 2250), False, 'from views.pyxel.shaders.flicker_shader import FlickerShader\n'), ((2300, 2332), 'views.point_camera.PointCamera', 'PointCamera', (['(0)', '(0)', '(6)', 'pyxel_view'], {}), '(0, 0, 6, pyxel_view)\n', (2311, 2332), False, 'from views.point_camera import PointCamera\n'), ((1337, 1354), 'actors.actions.move_action.MoveAction', 'MoveAction', (['(0)', '(-1)'], {}), '(0, -1)\n', (1347, 1354), False, 'from actors.actions.move_action import MoveAction\n'), ((1361, 1377), 'actors.actions.move_action.MoveAction', 'MoveAction', (['(0)', '(1)'], {}), '(0, 1)\n', (1371, 1377), False, 'from actors.actions.move_action import MoveAction\n'), ((1384, 1401), 'actors.actions.move_action.MoveAction', 'MoveAction', (['(-1)', '(0)'], {}), '(-1, 0)\n', (1394, 1401), False, 'from actors.actions.move_action import MoveAction\n'), ((1408, 1424), 'actors.actions.move_action.MoveAction', 'MoveAction', (['(1)', '(0)'], {}), '(1, 0)\n', (1418, 1424), False, 'from actors.actions.move_action import MoveAction\n'), ((1435, 1451), 'actors.actions.use_action.UseAction', 'UseAction', (['(0)', '(-1)'], {}), '(0, -1)\n', (1444, 1451), False, 'from actors.actions.use_action import UseAction\n'), ((1458, 1473), 'actors.actions.use_action.UseAction', 'UseAction', (['(0)', '(1)'], {}), '(0, 1)\n', (1467, 1473), False, 'from actors.actions.use_action import UseAction\n'), ((1480, 1496), 'actors.actions.use_action.UseAction', 'UseAction', (['(-1)', '(0)'], {}), '(-1, 0)\n', (1489, 1496), False, 'from actors.actions.use_action import UseAction\n'), ((1503, 1518), 'actors.actions.use_action.UseAction', 'UseAction', (['(1)', '(0)'], {}), '(1, 0)\n', (1512, 1518), False, 'from actors.actions.use_action import UseAction\n'), ((1565, 1589), 'actors.projectile.Projectile', 'Projectile', (['aim_dir', '"""*"""'], {}), "(aim_dir, '*')\n", (1575, 1589), False, 'from actors.projectile import Projectile\n'), ((1724, 1738), 'actors.components.health.Health', 'Health', (['(99)', '(99)'], {}), '(99, 99)\n', (1730, 1738), False, 'from actors.components.health import Health\n'), ((2454, 2467), 'world.area_builder.AreaBuilder', 'AreaBuilder', ([], {}), '()\n', (2465, 2467), False, 'from world.area_builder import AreaBuilder\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2020-08-11 01:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0004_auto_20200809_1815'),
]
operations = [
migrations.AddField(
model_name='images',
name='created_time',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='images',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='images',
name='user_profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Profile'),
),
migrations.AddField(
model_name='profile',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency"
] |
[((319, 376), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (350, 376), False, 'from django.db import migrations, models\n'), ((563, 618), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (583, 618), False, 'from django.db import migrations, models\n'), ((736, 842), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)\n', (753, 842), False, 'from django.db import migrations, models\n'), ((964, 1061), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""users.Profile"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n to='users.Profile')\n", (981, 1061), False, 'from django.db import migrations, models\n'), ((1176, 1282), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)\n', (1193, 1282), False, 'from django.db import migrations, models\n')]
|
import os
import PySimpleGUI as sg
sg.change_look_and_feel('DarkAmber') # colour
# layout of window
layout = [
[sg.Frame(layout=[
[sg.Radio('1. Estadao', 1, default=False, key='estadao'),
sg.Radio('2. Folha', 1,
default=False, key='folha'),
sg.Radio('3. Uol Notícias', 1, default=False, key='uol')]],
title='Selecione o site para a pesquisa', title_color='white',
relief=sg.RELIEF_SUNKEN, tooltip='Use these to set flags')],
[sg.Text('Nome do arquivo:'), sg.InputText(key='nomearquivo')],
[sg.Text('Palavras chaves:'), sg.InputText(key='palavrachave')],
[sg.Text('Quantidade de resultados:'), sg.InputText(key='quantidade')],
[sg.Submit('Pesquisar'), sg.Button('Cancelar')],
]
window = sg.Window('Mudanças Climáticas Search', layout) # make the window
event, values = window.read()
def Iniciar():
nomearquivo = values['nomearquivo']
palavrachave = values['palavrachave']
quantidade = values['quantidade']
count = 0
while count == 0:
if event in (None, 'Cancelar'):
count+=1
return 'Cancelou o programa'
elif values['estadao'] == True:
opcao = 'estadao'
count+=1
elif values['folha'] == True:
opcao = 'folha'
count+=1
elif values['uol'] == True:
opcao = 'uol'
count+=1
return nomearquivo, palavrachave, opcao, quantidade
window.close()
|
[
"PySimpleGUI.Button",
"PySimpleGUI.InputText",
"PySimpleGUI.Submit",
"PySimpleGUI.Text",
"PySimpleGUI.Radio",
"PySimpleGUI.Window",
"PySimpleGUI.change_look_and_feel"
] |
[((36, 72), 'PySimpleGUI.change_look_and_feel', 'sg.change_look_and_feel', (['"""DarkAmber"""'], {}), "('DarkAmber')\n", (59, 72), True, 'import PySimpleGUI as sg\n'), ((773, 820), 'PySimpleGUI.Window', 'sg.Window', (['"""Mudanças Climáticas Search"""', 'layout'], {}), "('Mudanças Climáticas Search', layout)\n", (782, 820), True, 'import PySimpleGUI as sg\n'), ((500, 527), 'PySimpleGUI.Text', 'sg.Text', (['"""Nome do arquivo:"""'], {}), "('Nome do arquivo:')\n", (507, 527), True, 'import PySimpleGUI as sg\n'), ((529, 560), 'PySimpleGUI.InputText', 'sg.InputText', ([], {'key': '"""nomearquivo"""'}), "(key='nomearquivo')\n", (541, 560), True, 'import PySimpleGUI as sg\n'), ((568, 595), 'PySimpleGUI.Text', 'sg.Text', (['"""Palavras chaves:"""'], {}), "('Palavras chaves:')\n", (575, 595), True, 'import PySimpleGUI as sg\n'), ((597, 629), 'PySimpleGUI.InputText', 'sg.InputText', ([], {'key': '"""palavrachave"""'}), "(key='palavrachave')\n", (609, 629), True, 'import PySimpleGUI as sg\n'), ((637, 673), 'PySimpleGUI.Text', 'sg.Text', (['"""Quantidade de resultados:"""'], {}), "('Quantidade de resultados:')\n", (644, 673), True, 'import PySimpleGUI as sg\n'), ((675, 705), 'PySimpleGUI.InputText', 'sg.InputText', ([], {'key': '"""quantidade"""'}), "(key='quantidade')\n", (687, 705), True, 'import PySimpleGUI as sg\n'), ((713, 735), 'PySimpleGUI.Submit', 'sg.Submit', (['"""Pesquisar"""'], {}), "('Pesquisar')\n", (722, 735), True, 'import PySimpleGUI as sg\n'), ((737, 758), 'PySimpleGUI.Button', 'sg.Button', (['"""Cancelar"""'], {}), "('Cancelar')\n", (746, 758), True, 'import PySimpleGUI as sg\n'), ((146, 201), 'PySimpleGUI.Radio', 'sg.Radio', (['"""1. Estadao"""', '(1)'], {'default': '(False)', 'key': '"""estadao"""'}), "('1. Estadao', 1, default=False, key='estadao')\n", (154, 201), True, 'import PySimpleGUI as sg\n'), ((212, 263), 'PySimpleGUI.Radio', 'sg.Radio', (['"""2. Folha"""', '(1)'], {'default': '(False)', 'key': '"""folha"""'}), "('2. Folha', 1, default=False, key='folha')\n", (220, 263), True, 'import PySimpleGUI as sg\n'), ((295, 351), 'PySimpleGUI.Radio', 'sg.Radio', (['"""3. Uol Notícias"""', '(1)'], {'default': '(False)', 'key': '"""uol"""'}), "('3. Uol Notícias', 1, default=False, key='uol')\n", (303, 351), True, 'import PySimpleGUI as sg\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-25 16:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('boards', '0028_auto_20160925_1809'),
('members', '0008_auto_20160923_2056'),
('dev_environment', '0002_auto_20160921_1748'),
]
operations = [
migrations.AlterModelOptions(
name='interruption',
options={'verbose_name': 'Interruption', 'verbose_name_plural': 'Interruptions'},
),
migrations.AlterIndexTogether(
name='interruption',
index_together=set([('datetime', 'board', 'member'), ('member', 'datetime', 'board')]),
),
]
|
[
"django.db.migrations.AlterModelOptions"
] |
[((393, 528), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""interruption"""', 'options': "{'verbose_name': 'Interruption', 'verbose_name_plural': 'Interruptions'}"}), "(name='interruption', options={'verbose_name':\n 'Interruption', 'verbose_name_plural': 'Interruptions'})\n", (421, 528), False, 'from django.db import migrations\n')]
|
import pytest
import sh
def test_invalid():
try:
sh.python(["-m", "zuul_lint", "tests/data/zuul-config-invalid.yaml"])
except sh.ErrorReturnCode_1:
return
except sh.ErrorReturnCode as e:
pytest.fail(e)
pytest.fail("Expected to fail")
def test_valid():
try:
sh.python(["-m", "zuul_lint", "tests/data/zuul-config-valid.yaml"])
except sh.ErrorReturnCode as e:
pytest.fail(e)
|
[
"pytest.fail",
"sh.python"
] |
[((244, 275), 'pytest.fail', 'pytest.fail', (['"""Expected to fail"""'], {}), "('Expected to fail')\n", (255, 275), False, 'import pytest\n'), ((63, 132), 'sh.python', 'sh.python', (["['-m', 'zuul_lint', 'tests/data/zuul-config-invalid.yaml']"], {}), "(['-m', 'zuul_lint', 'tests/data/zuul-config-invalid.yaml'])\n", (72, 132), False, 'import sh\n'), ((313, 380), 'sh.python', 'sh.python', (["['-m', 'zuul_lint', 'tests/data/zuul-config-valid.yaml']"], {}), "(['-m', 'zuul_lint', 'tests/data/zuul-config-valid.yaml'])\n", (322, 380), False, 'import sh\n'), ((225, 239), 'pytest.fail', 'pytest.fail', (['e'], {}), '(e)\n', (236, 239), False, 'import pytest\n'), ((425, 439), 'pytest.fail', 'pytest.fail', (['e'], {}), '(e)\n', (436, 439), False, 'import pytest\n')]
|
import os
import sys
input_path = sys.argv[1].rstrip(os.sep)
output_path = sys.argv[2]
filenames = os.listdir(input_path)
with open(output_path, 'w') as f:
for i, filename in enumerate(filenames):
filepath = os.sep.join([input_path, filename])
label = filename[:filename.rfind('.')].split('_')[1]
line = '{}\t{}\t{}\n'.format(i, label, filepath)
f.write(line)
|
[
"os.listdir",
"os.sep.join"
] |
[((101, 123), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (111, 123), False, 'import os\n'), ((223, 258), 'os.sep.join', 'os.sep.join', (['[input_path, filename]'], {}), '([input_path, filename])\n', (234, 258), False, 'import os\n')]
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# Documents
#
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import re
import sublime
import sublime_plugin
st_version = int(sublime.version())
if st_version < 3000:
import stino
else:
from . import stino
class SketchListener(sublime_plugin.EventListener):
def __init__(self):
super(SketchListener, self).__init__()
self.sketch_files_dict = {}
self.file_view_dict = {}
pattern_text = r'^(\S*?):([0-9]+?):'
self.pattern = re.compile(pattern_text, re.M | re.S)
def on_activated(self, view):
stino.main.set_status(view)
def on_close(self, view):
monitor_module = stino.pyarduino.base.serial_monitor
if stino.st_console.is_monitor_view(view):
name = view.name()
serial_port = name.split('-')[1].strip()
if serial_port in monitor_module.serials_in_use:
cur_serial_monitor = monitor_module.serial_monitor_dict.get(
serial_port, None)
if cur_serial_monitor:
cur_serial_monitor.stop()
monitor_module.serials_in_use.remove(serial_port)
def on_selection_modified(self, view):
view_name = view.name()
if view_name.startswith('build|') or view_name.startswith('upload|'):
view_selection = view.sel()
region = view_selection[0]
region = view.line(region)
text = view.substr(region)
matches = list(self.pattern.finditer(text))
if matches:
view_selection.clear()
view_selection.add(region)
match = matches[0]
file_path, line_no = match.groups()
if os.path.isfile(file_path):
file_view = view.window().open_file(file_path)
error_point = file_view.text_point(int(line_no) - 1, 0)
region = file_view.line(error_point)
selection = file_view.sel()
selection.clear()
selection.add(region)
file_view.show(error_point)
def on_modified(self, view):
if st_version < 3000:
flag = sublime.DRAW_OUTLINED
else:
flag = sublime.DRAW_NO_FILL
view_name = view.name()
if view_name.startswith('build|') or view_name.startswith('upload|'):
sketch_path = view_name.split('|')[1]
files = self.sketch_files_dict.get(sketch_path, [])
for file_path in files:
file_view = self.file_view_dict.get(file_path, None)
if file_view in sublime.active_window().views():
key = 'stino.' + file_path
file_view.erase_regions(key)
console_regions = []
file_regions_dict = {}
files = []
text = view.substr(sublime.Region(0, view.size()))
matches = self.pattern.finditer(text)
for match in matches:
cur_point = match.start()
line_region = view.line(cur_point)
console_regions.append(line_region)
file_path, line_no = match.groups()
file_view = view.window().open_file(file_path)
error_point = file_view.text_point(int(line_no) - 1, 0)
line_region = file_view.line(error_point)
if not file_path in files:
files.append(file_path)
self.file_view_dict[file_path] = file_view
regions = file_regions_dict.setdefault(file_path, [])
if not line_region in regions:
regions.append(line_region)
file_regions_dict[file_path] = regions
view.add_regions('build_error', console_regions, 'string',
'circle', flag)
self.sketch_files_dict[sketch_path] = files
for file_path in files:
key = 'stino.' + file_path
file_view = self.file_view_dict.get(file_path)
regions = file_regions_dict.get(file_path, [])
file_view.add_regions(key, regions, 'string', 'circle',
flag)
if regions:
region = regions[0]
file_view.show(region)
class ShowArduinoMenuCommand(sublime_plugin.WindowCommand):
def run(self):
show_arduino_menu = stino.settings.get('show_arduino_menu', True)
stino.settings.set('show_arduino_menu', not show_arduino_menu)
stino.main.create_menus()
def is_checked(self):
show_arduino_menu = stino.settings.get('show_arduino_menu', True)
return show_arduino_menu
class UpdateMenuCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.update_menu()
class NewSketchCommand(sublime_plugin.WindowCommand):
def run(self):
caption = stino.i18n.translate('Name for New Sketch:')
self.window.show_input_panel(caption, '', self.on_done, None, None)
def on_done(self, sketch_name):
stino.main.new_sketch(self.window, sketch_name)
class OpenSketchCommand(sublime_plugin.WindowCommand):
def run(self, sketch_path):
new_window = stino.settings.get('open_project_in_new_window', False)
if new_window:
sublime.run_command('new_window')
window = sublime.windows()[-1]
else:
window = self.window
stino.main.open_sketch(window, sketch_path)
class ImportLibraryCommand(sublime_plugin.TextCommand):
def run(self, edit, library_path):
stino.main.import_library(self.view, edit, library_path)
class ShowSketchFolderCommand(sublime_plugin.TextCommand):
def run(self, edit):
file_path = self.view.file_name()
if file_path:
dir_path = os.path.dirname(file_path)
url = 'file://' + dir_path
sublime.run_command('open_url', {'url': url})
class CompileSketchCommand(sublime_plugin.TextCommand):
def run(self, edit):
stino.main.handle_sketch(self.view, stino.main.build_sketch)
class UploadSketchCommand(sublime_plugin.TextCommand):
def run(self, edit):
stino.main.handle_sketch(self.view, stino.main.upload_sketch)
class UploadUsingProgrammerCommand(sublime_plugin.TextCommand):
def run(self, edit):
stino.main.handle_sketch(self.view, stino.main.upload_sketch,
using_programmer=True)
class SetExtraFlagCommand(sublime_plugin.WindowCommand):
def run(self):
caption = stino.i18n.translate('Extra compilation flags:')
extra_flag = stino.settings.get('extra_flag', '')
self.window.show_input_panel(caption, extra_flag, self.on_done,
None, None)
def on_done(self, extra_flag):
stino.settings.set('extra_flag', extra_flag)
class ToggleFullCompilationCommand(sublime_plugin.WindowCommand):
def run(self):
build_verbose = stino.settings.get('full_compilation', False)
stino.settings.set('full_compilation', not build_verbose)
def is_checked(self):
build_verbose = stino.settings.get('full_compilation', False)
return build_verbose
class ShowCompilationOutputCommand(sublime_plugin.WindowCommand):
def run(self):
build_verbose = stino.settings.get('build_verbose', False)
stino.settings.set('build_verbose', not build_verbose)
def is_checked(self):
build_verbose = stino.settings.get('build_verbose', False)
return build_verbose
class ShowUploadOutputCommand(sublime_plugin.WindowCommand):
def run(self):
upload_verbose = stino.settings.get('upload_verbose', False)
stino.settings.set('upload_verbose', not upload_verbose)
def is_checked(self):
upload_verbose = stino.settings.get('upload_verbose', False)
return upload_verbose
class VerifyCodeCommand(sublime_plugin.WindowCommand):
def run(self):
verify_code = stino.settings.get('verify_code', False)
stino.settings.set('verify_code', not verify_code)
def is_checked(self):
verify_code = stino.settings.get('verify_code', False)
return verify_code
class ToggleBareGccOnlyCommand(sublime_plugin.WindowCommand):
def run(self):
bare_gcc = stino.settings.get('bare_gcc', False)
stino.settings.set('bare_gcc', not bare_gcc)
def is_checked(self):
bare_gcc = stino.settings.get('bare_gcc', False)
return bare_gcc
class ChooseBuildFolderCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.change_build_dir(self.window)
class SelectBoardCommand(sublime_plugin.WindowCommand):
def run(self, board_id):
stino.main.change_board(self.window, board_id)
def is_checked(self, board_id):
target_board_id = stino.settings.get('target_board_id', '')
return board_id == target_board_id
class SelectSubBoardCommand(sublime_plugin.WindowCommand):
def run(self, option_index, sub_board_id):
stino.main.change_sub_board(self.window, option_index, sub_board_id)
def is_checked(self, option_index, sub_board_id):
target_board_id = stino.settings.get('target_board_id', '')
target_sub_board_ids = stino.settings.get(target_board_id, [])
return sub_board_id in target_sub_board_ids
class SelectProgrammerCommand(sublime_plugin.WindowCommand):
def run(self, programmer_id):
stino.main.change_programmer(programmer_id)
def is_checked(self, programmer_id):
target_programmer_id = stino.settings.get('target_programmer_id', '')
return programmer_id == target_programmer_id
class BurnBootloaderCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.burn_bootloader(self.window)
class SelectSerialPortCommand(sublime_plugin.WindowCommand):
def run(self, serial_port):
stino.settings.set('serial_port', serial_port)
stino.main.set_status(self.window.active_view())
def is_checked(self, serial_port):
target_serial_port = stino.settings.get('serial_port', '')
return serial_port == target_serial_port
class RunSerialMonitorCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.toggle_serial_monitor(self.window)
def is_checked(self):
monitor_module = stino.pyarduino.base.serial_monitor
state = False
serial_port = stino.settings.get('serial_port', '')
if serial_port in monitor_module.serials_in_use:
serial_monitor = monitor_module.serial_monitor_dict.get(
serial_port)
if serial_monitor and serial_monitor.is_running():
state = True
return state
class SendSerialMessageCommand(sublime_plugin.WindowCommand):
def run(self):
caption = stino.i18n.translate('Send:')
self.window.show_input_panel(caption, '', self.on_done, None, None)
def on_done(self, text):
stino.main.send_serial_message(text)
class ChooseBaudrateCommand(sublime_plugin.WindowCommand):
def run(self, baudrate):
stino.settings.set('baudrate', baudrate)
def is_checked(self, baudrate):
target_baudrate = stino.settings.get('baudrate', 9600)
return baudrate == target_baudrate
class ChooseLineEndingCommand(sublime_plugin.WindowCommand):
def run(self, line_ending):
stino.settings.set('line_ending', line_ending)
def is_checked(self, line_ending):
target_line_ending = stino.settings.get('line_ending', '\n')
return line_ending == target_line_ending
class ChooseDisplayModeCommand(sublime_plugin.WindowCommand):
def run(self, display_mode):
stino.settings.set('display_mode', display_mode)
def is_checked(self, display_mode):
target_display_mode = stino.settings.get('display_mode', 'Text')
return display_mode == target_display_mode
class AutoFormatCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command('reindent', {'single_line': False})
class ArchiveSketchCommand(sublime_plugin.TextCommand):
def run(self, edit):
file_path = self.view.file_name()
if file_path:
sketch_path = os.path.dirname(file_path)
stino.main.archive_sketch(self.view.window(), sketch_path)
class ChooseArduinoFolderCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.select_arduino_dir(self.window)
class ChangeSketchbookFolderCommand(sublime_plugin.WindowCommand):
def run(self):
stino.main.change_sketchbook_dir(self.window)
class ToggleGlobalSettings(sublime_plugin.WindowCommand):
def run(self):
global_settings = stino.settings.get('global_settings', True)
stino.settings.set('global_settings', not global_settings)
def is_checked(self):
return True
class ToggleBigProject(sublime_plugin.WindowCommand):
def run(self):
big_project = stino.settings.get('big_project', False)
stino.settings.set('big_project', not big_project)
stino.main.update_menu()
def is_checked(self):
big_project = stino.settings.get('big_project', False)
return big_project
class ToggleOpenProjectInNewWindowCommand(sublime_plugin.WindowCommand):
def run(self):
new_window = stino.settings.get('open_project_in_new_window', False)
stino.settings.set('open_project_in_new_window', not new_window)
def is_checked(self):
new_window = stino.settings.get('open_project_in_new_window', False)
return new_window
class SelectLanguageCommand(sublime_plugin.WindowCommand):
def run(self, lang_id):
stino.i18n.change_lang(lang_id)
stino.main.create_menus()
def is_checked(self, lang_id):
target_lang_id = stino.settings.get('lang_id', 'en')
return lang_id == target_lang_id
class OpenRefCommand(sublime_plugin.WindowCommand):
def run(self, url):
url = stino.main.get_url(url)
sublime.run_command('open_url', {'url': url})
class FindInReferenceCommand(sublime_plugin.TextCommand):
def run(self, edit):
stino.main.find_in_ref(self.view)
class StinoDocumentsCommand(sublime_plugin.WindowCommand):
def run(self):
sublime.run_command('open_url',
{'url': 'https://github.com/Robot-Will/Stino'})
class AboutStinoCommand(sublime_plugin.WindowCommand):
def run(self):
sublime.run_command('open_url',
{'url': 'https://github.com/Robot-Will/Stino'})
class NoneCommandCommand(sublime_plugin.WindowCommand):
def run(self):
pass
def is_enabled(self):
return False
class PanelOutputCommand(sublime_plugin.TextCommand):
def run(self, edit, text):
pos = self.view.size()
self.view.insert(edit, pos, text)
self.view.show(pos)
class ShowItemListCommand(sublime_plugin.WindowCommand):
def run(self, item_type):
stino.main.show_items_panel(self.window, item_type)
|
[
"stino.main.show_items_panel",
"stino.main.create_menus",
"stino.i18n.change_lang",
"stino.main.open_sketch",
"sublime.windows",
"os.path.isfile",
"stino.main.get_url",
"stino.main.toggle_serial_monitor",
"stino.main.find_in_ref",
"sublime.run_command",
"stino.main.new_sketch",
"os.path.dirname",
"stino.settings.set",
"stino.st_console.is_monitor_view",
"stino.main.import_library",
"stino.main.set_status",
"stino.main.send_serial_message",
"stino.main.burn_bootloader",
"stino.main.change_sketchbook_dir",
"stino.i18n.translate",
"stino.main.change_board",
"stino.settings.get",
"sublime.version",
"re.compile",
"stino.main.update_menu",
"stino.main.change_programmer",
"stino.main.change_sub_board",
"stino.main.select_arduino_dir",
"sublime.active_window",
"stino.main.handle_sketch",
"stino.main.change_build_dir"
] |
[((307, 324), 'sublime.version', 'sublime.version', ([], {}), '()\n', (322, 324), False, 'import sublime\n'), ((658, 695), 're.compile', 're.compile', (['pattern_text', '(re.M | re.S)'], {}), '(pattern_text, re.M | re.S)\n', (668, 695), False, 'import re\n'), ((739, 766), 'stino.main.set_status', 'stino.main.set_status', (['view'], {}), '(view)\n', (760, 766), False, 'import stino\n'), ((870, 908), 'stino.st_console.is_monitor_view', 'stino.st_console.is_monitor_view', (['view'], {}), '(view)\n', (902, 908), False, 'import stino\n'), ((4670, 4715), 'stino.settings.get', 'stino.settings.get', (['"""show_arduino_menu"""', '(True)'], {}), "('show_arduino_menu', True)\n", (4688, 4715), False, 'import stino\n'), ((4724, 4786), 'stino.settings.set', 'stino.settings.set', (['"""show_arduino_menu"""', '(not show_arduino_menu)'], {}), "('show_arduino_menu', not show_arduino_menu)\n", (4742, 4786), False, 'import stino\n'), ((4795, 4820), 'stino.main.create_menus', 'stino.main.create_menus', ([], {}), '()\n', (4818, 4820), False, 'import stino\n'), ((4876, 4921), 'stino.settings.get', 'stino.settings.get', (['"""show_arduino_menu"""', '(True)'], {}), "('show_arduino_menu', True)\n", (4894, 4921), False, 'import stino\n'), ((5039, 5063), 'stino.main.update_menu', 'stino.main.update_menu', ([], {}), '()\n', (5061, 5063), False, 'import stino\n'), ((5157, 5201), 'stino.i18n.translate', 'stino.i18n.translate', (['"""Name for New Sketch:"""'], {}), "('Name for New Sketch:')\n", (5177, 5201), False, 'import stino\n'), ((5323, 5370), 'stino.main.new_sketch', 'stino.main.new_sketch', (['self.window', 'sketch_name'], {}), '(self.window, sketch_name)\n', (5344, 5370), False, 'import stino\n'), ((5481, 5536), 'stino.settings.get', 'stino.settings.get', (['"""open_project_in_new_window"""', '(False)'], {}), "('open_project_in_new_window', False)\n", (5499, 5536), False, 'import stino\n'), ((5704, 5747), 'stino.main.open_sketch', 'stino.main.open_sketch', (['window', 'sketch_path'], {}), '(window, sketch_path)\n', (5726, 5747), False, 'import stino\n'), ((5853, 5909), 'stino.main.import_library', 'stino.main.import_library', (['self.view', 'edit', 'library_path'], {}), '(self.view, edit, library_path)\n', (5878, 5909), False, 'import stino\n'), ((6298, 6358), 'stino.main.handle_sketch', 'stino.main.handle_sketch', (['self.view', 'stino.main.build_sketch'], {}), '(self.view, stino.main.build_sketch)\n', (6322, 6358), False, 'import stino\n'), ((6449, 6510), 'stino.main.handle_sketch', 'stino.main.handle_sketch', (['self.view', 'stino.main.upload_sketch'], {}), '(self.view, stino.main.upload_sketch)\n', (6473, 6510), False, 'import stino\n'), ((6610, 6698), 'stino.main.handle_sketch', 'stino.main.handle_sketch', (['self.view', 'stino.main.upload_sketch'], {'using_programmer': '(True)'}), '(self.view, stino.main.upload_sketch,\n using_programmer=True)\n', (6634, 6698), False, 'import stino\n'), ((6824, 6872), 'stino.i18n.translate', 'stino.i18n.translate', (['"""Extra compilation flags:"""'], {}), "('Extra compilation flags:')\n", (6844, 6872), False, 'import stino\n'), ((6894, 6930), 'stino.settings.get', 'stino.settings.get', (['"""extra_flag"""', '""""""'], {}), "('extra_flag', '')\n", (6912, 6930), False, 'import stino\n'), ((7096, 7140), 'stino.settings.set', 'stino.settings.set', (['"""extra_flag"""', 'extra_flag'], {}), "('extra_flag', extra_flag)\n", (7114, 7140), False, 'import stino\n'), ((7252, 7297), 'stino.settings.get', 'stino.settings.get', (['"""full_compilation"""', '(False)'], {}), "('full_compilation', False)\n", (7270, 7297), False, 'import stino\n'), ((7306, 7363), 'stino.settings.set', 'stino.settings.set', (['"""full_compilation"""', '(not build_verbose)'], {}), "('full_compilation', not build_verbose)\n", (7324, 7363), False, 'import stino\n'), ((7415, 7460), 'stino.settings.get', 'stino.settings.get', (['"""full_compilation"""', '(False)'], {}), "('full_compilation', False)\n", (7433, 7460), False, 'import stino\n'), ((7601, 7643), 'stino.settings.get', 'stino.settings.get', (['"""build_verbose"""', '(False)'], {}), "('build_verbose', False)\n", (7619, 7643), False, 'import stino\n'), ((7652, 7706), 'stino.settings.set', 'stino.settings.set', (['"""build_verbose"""', '(not build_verbose)'], {}), "('build_verbose', not build_verbose)\n", (7670, 7706), False, 'import stino\n'), ((7758, 7800), 'stino.settings.get', 'stino.settings.get', (['"""build_verbose"""', '(False)'], {}), "('build_verbose', False)\n", (7776, 7800), False, 'import stino\n'), ((7937, 7980), 'stino.settings.get', 'stino.settings.get', (['"""upload_verbose"""', '(False)'], {}), "('upload_verbose', False)\n", (7955, 7980), False, 'import stino\n'), ((7989, 8045), 'stino.settings.set', 'stino.settings.set', (['"""upload_verbose"""', '(not upload_verbose)'], {}), "('upload_verbose', not upload_verbose)\n", (8007, 8045), False, 'import stino\n'), ((8098, 8141), 'stino.settings.get', 'stino.settings.get', (['"""upload_verbose"""', '(False)'], {}), "('upload_verbose', False)\n", (8116, 8141), False, 'import stino\n'), ((8270, 8310), 'stino.settings.get', 'stino.settings.get', (['"""verify_code"""', '(False)'], {}), "('verify_code', False)\n", (8288, 8310), False, 'import stino\n'), ((8319, 8369), 'stino.settings.set', 'stino.settings.set', (['"""verify_code"""', '(not verify_code)'], {}), "('verify_code', not verify_code)\n", (8337, 8369), False, 'import stino\n'), ((8419, 8459), 'stino.settings.get', 'stino.settings.get', (['"""verify_code"""', '(False)'], {}), "('verify_code', False)\n", (8437, 8459), False, 'import stino\n'), ((8589, 8626), 'stino.settings.get', 'stino.settings.get', (['"""bare_gcc"""', '(False)'], {}), "('bare_gcc', False)\n", (8607, 8626), False, 'import stino\n'), ((8635, 8679), 'stino.settings.set', 'stino.settings.set', (['"""bare_gcc"""', '(not bare_gcc)'], {}), "('bare_gcc', not bare_gcc)\n", (8653, 8679), False, 'import stino\n'), ((8726, 8763), 'stino.settings.get', 'stino.settings.get', (['"""bare_gcc"""', '(False)'], {}), "('bare_gcc', False)\n", (8744, 8763), False, 'import stino\n'), ((8879, 8919), 'stino.main.change_build_dir', 'stino.main.change_build_dir', (['self.window'], {}), '(self.window)\n', (8906, 8919), False, 'import stino\n'), ((9015, 9061), 'stino.main.change_board', 'stino.main.change_board', (['self.window', 'board_id'], {}), '(self.window, board_id)\n', (9038, 9061), False, 'import stino\n'), ((9125, 9166), 'stino.settings.get', 'stino.settings.get', (['"""target_board_id"""', '""""""'], {}), "('target_board_id', '')\n", (9143, 9166), False, 'import stino\n'), ((9326, 9394), 'stino.main.change_sub_board', 'stino.main.change_sub_board', (['self.window', 'option_index', 'sub_board_id'], {}), '(self.window, option_index, sub_board_id)\n', (9353, 9394), False, 'import stino\n'), ((9476, 9517), 'stino.settings.get', 'stino.settings.get', (['"""target_board_id"""', '""""""'], {}), "('target_board_id', '')\n", (9494, 9517), False, 'import stino\n'), ((9549, 9588), 'stino.settings.get', 'stino.settings.get', (['target_board_id', '[]'], {}), '(target_board_id, [])\n', (9567, 9588), False, 'import stino\n'), ((9746, 9789), 'stino.main.change_programmer', 'stino.main.change_programmer', (['programmer_id'], {}), '(programmer_id)\n', (9774, 9789), False, 'import stino\n'), ((9863, 9909), 'stino.settings.get', 'stino.settings.get', (['"""target_programmer_id"""', '""""""'], {}), "('target_programmer_id', '')\n", (9881, 9909), False, 'import stino\n'), ((10051, 10090), 'stino.main.burn_bootloader', 'stino.main.burn_bootloader', (['self.window'], {}), '(self.window)\n', (10077, 10090), False, 'import stino\n'), ((10194, 10240), 'stino.settings.set', 'stino.settings.set', (['"""serial_port"""', 'serial_port'], {}), "('serial_port', serial_port)\n", (10212, 10240), False, 'import stino\n'), ((10367, 10404), 'stino.settings.get', 'stino.settings.get', (['"""serial_port"""', '""""""'], {}), "('serial_port', '')\n", (10385, 10404), False, 'import stino\n'), ((10544, 10589), 'stino.main.toggle_serial_monitor', 'stino.main.toggle_serial_monitor', (['self.window'], {}), '(self.window)\n', (10576, 10589), False, 'import stino\n'), ((10722, 10759), 'stino.settings.get', 'stino.settings.get', (['"""serial_port"""', '""""""'], {}), "('serial_port', '')\n", (10740, 10759), False, 'import stino\n'), ((11129, 11158), 'stino.i18n.translate', 'stino.i18n.translate', (['"""Send:"""'], {}), "('Send:')\n", (11149, 11158), False, 'import stino\n'), ((11273, 11309), 'stino.main.send_serial_message', 'stino.main.send_serial_message', (['text'], {}), '(text)\n', (11303, 11309), False, 'import stino\n'), ((11408, 11448), 'stino.settings.set', 'stino.settings.set', (['"""baudrate"""', 'baudrate'], {}), "('baudrate', baudrate)\n", (11426, 11448), False, 'import stino\n'), ((11512, 11548), 'stino.settings.get', 'stino.settings.get', (['"""baudrate"""', '(9600)'], {}), "('baudrate', 9600)\n", (11530, 11548), False, 'import stino\n'), ((11695, 11741), 'stino.settings.set', 'stino.settings.set', (['"""line_ending"""', 'line_ending'], {}), "('line_ending', line_ending)\n", (11713, 11741), False, 'import stino\n'), ((11811, 11850), 'stino.settings.get', 'stino.settings.get', (['"""line_ending"""', '"""\n"""'], {}), "('line_ending', '\\n')\n", (11829, 11850), False, 'import stino\n'), ((12005, 12053), 'stino.settings.set', 'stino.settings.set', (['"""display_mode"""', 'display_mode'], {}), "('display_mode', display_mode)\n", (12023, 12053), False, 'import stino\n'), ((12125, 12167), 'stino.settings.get', 'stino.settings.get', (['"""display_mode"""', '"""Text"""'], {}), "('display_mode', 'Text')\n", (12143, 12167), False, 'import stino\n'), ((12727, 12769), 'stino.main.select_arduino_dir', 'stino.main.select_arduino_dir', (['self.window'], {}), '(self.window)\n', (12756, 12769), False, 'import stino\n'), ((12866, 12911), 'stino.main.change_sketchbook_dir', 'stino.main.change_sketchbook_dir', (['self.window'], {}), '(self.window)\n', (12898, 12911), False, 'import stino\n'), ((13017, 13060), 'stino.settings.get', 'stino.settings.get', (['"""global_settings"""', '(True)'], {}), "('global_settings', True)\n", (13035, 13060), False, 'import stino\n'), ((13069, 13127), 'stino.settings.set', 'stino.settings.set', (['"""global_settings"""', '(not global_settings)'], {}), "('global_settings', not global_settings)\n", (13087, 13127), False, 'import stino\n'), ((13272, 13312), 'stino.settings.get', 'stino.settings.get', (['"""big_project"""', '(False)'], {}), "('big_project', False)\n", (13290, 13312), False, 'import stino\n'), ((13321, 13371), 'stino.settings.set', 'stino.settings.set', (['"""big_project"""', '(not big_project)'], {}), "('big_project', not big_project)\n", (13339, 13371), False, 'import stino\n'), ((13380, 13404), 'stino.main.update_menu', 'stino.main.update_menu', ([], {}), '()\n', (13402, 13404), False, 'import stino\n'), ((13454, 13494), 'stino.settings.get', 'stino.settings.get', (['"""big_project"""', '(False)'], {}), "('big_project', False)\n", (13472, 13494), False, 'import stino\n'), ((13637, 13692), 'stino.settings.get', 'stino.settings.get', (['"""open_project_in_new_window"""', '(False)'], {}), "('open_project_in_new_window', False)\n", (13655, 13692), False, 'import stino\n'), ((13701, 13765), 'stino.settings.set', 'stino.settings.set', (['"""open_project_in_new_window"""', '(not new_window)'], {}), "('open_project_in_new_window', not new_window)\n", (13719, 13765), False, 'import stino\n'), ((13814, 13869), 'stino.settings.get', 'stino.settings.get', (['"""open_project_in_new_window"""', '(False)'], {}), "('open_project_in_new_window', False)\n", (13832, 13869), False, 'import stino\n'), ((13993, 14024), 'stino.i18n.change_lang', 'stino.i18n.change_lang', (['lang_id'], {}), '(lang_id)\n', (14015, 14024), False, 'import stino\n'), ((14033, 14058), 'stino.main.create_menus', 'stino.main.create_menus', ([], {}), '()\n', (14056, 14058), False, 'import stino\n'), ((14120, 14155), 'stino.settings.get', 'stino.settings.get', (['"""lang_id"""', '"""en"""'], {}), "('lang_id', 'en')\n", (14138, 14155), False, 'import stino\n'), ((14289, 14312), 'stino.main.get_url', 'stino.main.get_url', (['url'], {}), '(url)\n', (14307, 14312), False, 'import stino\n'), ((14321, 14366), 'sublime.run_command', 'sublime.run_command', (['"""open_url"""', "{'url': url}"], {}), "('open_url', {'url': url})\n", (14340, 14366), False, 'import sublime\n'), ((14460, 14493), 'stino.main.find_in_ref', 'stino.main.find_in_ref', (['self.view'], {}), '(self.view)\n', (14482, 14493), False, 'import stino\n'), ((14582, 14661), 'sublime.run_command', 'sublime.run_command', (['"""open_url"""', "{'url': 'https://github.com/Robot-Will/Stino'}"], {}), "('open_url', {'url': 'https://github.com/Robot-Will/Stino'})\n", (14601, 14661), False, 'import sublime\n'), ((14774, 14853), 'sublime.run_command', 'sublime.run_command', (['"""open_url"""', "{'url': 'https://github.com/Robot-Will/Stino'}"], {}), "('open_url', {'url': 'https://github.com/Robot-Will/Stino'})\n", (14793, 14853), False, 'import sublime\n'), ((15305, 15356), 'stino.main.show_items_panel', 'stino.main.show_items_panel', (['self.window', 'item_type'], {}), '(self.window, item_type)\n', (15332, 15356), False, 'import stino\n'), ((5572, 5605), 'sublime.run_command', 'sublime.run_command', (['"""new_window"""'], {}), "('new_window')\n", (5591, 5605), False, 'import sublime\n'), ((6083, 6109), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (6098, 6109), False, 'import os\n'), ((6161, 6206), 'sublime.run_command', 'sublime.run_command', (['"""open_url"""', "{'url': url}"], {}), "('open_url', {'url': url})\n", (6180, 6206), False, 'import sublime\n'), ((12536, 12562), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (12551, 12562), False, 'import os\n'), ((1901, 1926), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1915, 1926), False, 'import os\n'), ((5627, 5644), 'sublime.windows', 'sublime.windows', ([], {}), '()\n', (5642, 5644), False, 'import sublime\n'), ((2825, 2848), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (2846, 2848), False, 'import sublime\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is pytest for twinpy.properties.hexagonal.
"""
from copy import deepcopy
import numpy as np
from twinpy.properties import hexagonal
a = 2.93
c = 4.65
def test_check_hexagonal_lattice(ti_cell_wyckoff_c):
"""
Check check_hexagonal_lattice.
"""
hexagonal_lattice = ti_cell_wyckoff_c[0]
hexagonal.check_hexagonal_lattice(lattice=hexagonal_lattice)
def test_check_cell_is_hcp(ti_cell_wyckoff_c, ti_cell_wyckoff_d):
"""
Check check_cell_is_hcp.
"""
for cell in [ti_cell_wyckoff_c, ti_cell_wyckoff_d]:
hexagonal.check_cell_is_hcp(cell=cell)
def test_convert_direction():
"""
Check convert_direction_from_four_to_three
and convert_direction_from_three_to_four.
Note:
Let basis vectors for hexagonal lattice be a_1, a_2 and c,
a_1 = [1,0,0] = 1/3[2,-1,-1,0].
"""
def _test_convert_direction_from_three_to_four(three, four_expected):
_four = hexagonal.convert_direction_from_three_to_four(
three=three)
np.testing.assert_allclose(_four, four_expected)
def _test_convert_direction_from_four_to_three(four, three_expected):
_three = hexagonal.convert_direction_from_four_to_three(
four=four)
np.testing.assert_allclose(_three, three_expected)
a_1_three = np.array([1.,0.,0.])
a_1_four = np.array([2.,-1.,-1.,0.]) / 3.
_test_convert_direction_from_three_to_four(three=a_1_three,
four_expected=a_1_four)
_test_convert_direction_from_four_to_three(four=a_1_four,
three_expected=a_1_three)
def test_hexagonal_direction(ti_cell_wyckoff_c):
"""
Check HexagonalDirection.
"""
def _test_reset_indices(hex_dr, three):
_hex_dr = deepcopy(hex_dr)
_hex_dr.reset_indices(three=three)
_three_expected = _hex_dr.three
np.testing.assert_allclose(three, _three_expected)
def _test_inverse(hex_dr):
_inv_hex_dr = deepcopy(hex_dr)
_inv_hex_dr.inverse()
_three = hex_dr.three
_inv_three = _inv_hex_dr.three
np.testing.assert_allclose(_three, _inv_three*(-1.))
def _test_get_cartesian(hex_dr, cart_expected):
_cart = hex_dr.get_cartesian(normalize=False)
_cart_normalized = hex_dr.get_cartesian(normalize=True)
_norm = np.linalg.norm(_cart_normalized)
np.testing.assert_allclose(_cart, cart_expected)
np.testing.assert_allclose(_norm, 1.)
lattice = ti_cell_wyckoff_c[0]
three_a1 = np.array([1.,0.,0.]) # a_1
three_c = np.array([0.,0.,1.]) # c
a1_cart = np.array([a,0.,0.]) # cartesian coordinate for vector a_1
hex_dr_a1 = hexagonal.HexagonalDirection(lattice=lattice, three=three_a1)
_test_reset_indices(hex_dr=hex_dr_a1,
three=three_c)
_test_inverse(hex_dr=hex_dr_a1)
_test_get_cartesian(hex_dr=hex_dr_a1, cart_expected=a1_cart)
def test_convert_plane():
"""
Check convert_plane_from_four_to_three
and convert_plane_from_three_to_four.
Note:
(10-12) plane is equal to (102).
"""
def _test_convert_plane_from_three_to_four(three, four_expected):
_four = hexagonal.convert_plane_from_three_to_four(
three=three)
np.testing.assert_allclose(_four, four_expected)
def _test_convert_plane_from_four_to_three(four, three_expected):
_three = hexagonal.convert_plane_from_four_to_three(
four=four)
np.testing.assert_allclose(_three, three_expected)
twin_three = np.array([1.,0.,2.])
twin_four = np.array([1.,0.,-1.,2.])
_test_convert_plane_from_three_to_four(three=twin_three,
four_expected=twin_four)
_test_convert_plane_from_four_to_three(four=twin_four,
three_expected=twin_three)
def test_hexagonal_plane(ti_cell_wyckoff_c):
"""
Check HexagonalPlane.
"""
def _test_reset_indices(hex_pln, four):
_hex_pln = deepcopy(hex_pln)
_hex_pln.reset_indices(four=four)
_four = _hex_pln.four
np.testing.assert_allclose(_four, four)
def _test_inverse(hex_pln):
_inv_hex_pln = deepcopy(hex_pln)
_inv_hex_pln.inverse()
four = hex_pln.four
_inv_four = _inv_hex_pln.four
np.testing.assert_allclose(_inv_four, four*(-1))
def _test_get_distance_from_plane(hex_pln, frac_coord, d_expected):
_d = hex_pln.get_distance_from_plane(frac_coord=frac_coord)
np.testing.assert_allclose(_d, d_expected)
def _test_get_plane_interval(hex_pln, d_expected):
_d = hex_pln.get_plane_interval()
np.testing.assert_allclose(_d, d_expected)
lattice = ti_cell_wyckoff_c[0]
basal_four = np.array([0.,0.,0.,1.])
twin_four = np.array([1.,0.,-1.,2.])
hex_pln_basal = hexagonal.HexagonalPlane(lattice=lattice,
four=basal_four)
hex_pln_twin = hexagonal.HexagonalPlane(lattice=lattice,
four=twin_four)
c_three = np.array([0.,0.,1.])
_test_reset_indices(hex_pln=hex_pln_twin,
four=basal_four)
_test_inverse(hex_pln=hex_pln_twin)
_test_get_distance_from_plane(hex_pln=hex_pln_basal,
frac_coord=c_three,
d_expected=c)
_test_get_plane_interval(hex_pln=hex_pln_basal,
d_expected=c)
|
[
"twinpy.properties.hexagonal.HexagonalPlane",
"twinpy.properties.hexagonal.convert_direction_from_three_to_four",
"copy.deepcopy",
"twinpy.properties.hexagonal.convert_direction_from_four_to_three",
"twinpy.properties.hexagonal.check_cell_is_hcp",
"twinpy.properties.hexagonal.HexagonalDirection",
"twinpy.properties.hexagonal.convert_plane_from_four_to_three",
"numpy.array",
"numpy.linalg.norm",
"numpy.testing.assert_allclose",
"twinpy.properties.hexagonal.convert_plane_from_three_to_four",
"twinpy.properties.hexagonal.check_hexagonal_lattice"
] |
[((363, 423), 'twinpy.properties.hexagonal.check_hexagonal_lattice', 'hexagonal.check_hexagonal_lattice', ([], {'lattice': 'hexagonal_lattice'}), '(lattice=hexagonal_lattice)\n', (396, 423), False, 'from twinpy.properties import hexagonal\n'), ((1366, 1391), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (1374, 1391), True, 'import numpy as np\n'), ((2626, 2651), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (2634, 2651), True, 'import numpy as np\n'), ((2668, 2693), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (2676, 2693), True, 'import numpy as np\n'), ((2708, 2731), 'numpy.array', 'np.array', (['[a, 0.0, 0.0]'], {}), '([a, 0.0, 0.0])\n', (2716, 2731), True, 'import numpy as np\n'), ((2783, 2844), 'twinpy.properties.hexagonal.HexagonalDirection', 'hexagonal.HexagonalDirection', ([], {'lattice': 'lattice', 'three': 'three_a1'}), '(lattice=lattice, three=three_a1)\n', (2811, 2844), False, 'from twinpy.properties import hexagonal\n'), ((3661, 3686), 'numpy.array', 'np.array', (['[1.0, 0.0, 2.0]'], {}), '([1.0, 0.0, 2.0])\n', (3669, 3686), True, 'import numpy as np\n'), ((3698, 3729), 'numpy.array', 'np.array', (['[1.0, 0.0, -1.0, 2.0]'], {}), '([1.0, 0.0, -1.0, 2.0])\n', (3706, 3729), True, 'import numpy as np\n'), ((4894, 4924), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0])\n', (4902, 4924), True, 'import numpy as np\n'), ((4934, 4965), 'numpy.array', 'np.array', (['[1.0, 0.0, -1.0, 2.0]'], {}), '([1.0, 0.0, -1.0, 2.0])\n', (4942, 4965), True, 'import numpy as np\n'), ((4979, 5037), 'twinpy.properties.hexagonal.HexagonalPlane', 'hexagonal.HexagonalPlane', ([], {'lattice': 'lattice', 'four': 'basal_four'}), '(lattice=lattice, four=basal_four)\n', (5003, 5037), False, 'from twinpy.properties import hexagonal\n'), ((5102, 5159), 'twinpy.properties.hexagonal.HexagonalPlane', 'hexagonal.HexagonalPlane', ([], {'lattice': 'lattice', 'four': 'twin_four'}), '(lattice=lattice, four=twin_four)\n', (5126, 5159), False, 'from twinpy.properties import hexagonal\n'), ((5218, 5243), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (5226, 5243), True, 'import numpy as np\n'), ((601, 639), 'twinpy.properties.hexagonal.check_cell_is_hcp', 'hexagonal.check_cell_is_hcp', ([], {'cell': 'cell'}), '(cell=cell)\n', (628, 639), False, 'from twinpy.properties import hexagonal\n'), ((989, 1048), 'twinpy.properties.hexagonal.convert_direction_from_three_to_four', 'hexagonal.convert_direction_from_three_to_four', ([], {'three': 'three'}), '(three=three)\n', (1035, 1048), False, 'from twinpy.properties import hexagonal\n'), ((1074, 1122), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_four', 'four_expected'], {}), '(_four, four_expected)\n', (1100, 1122), True, 'import numpy as np\n'), ((1215, 1272), 'twinpy.properties.hexagonal.convert_direction_from_four_to_three', 'hexagonal.convert_direction_from_four_to_three', ([], {'four': 'four'}), '(four=four)\n', (1261, 1272), False, 'from twinpy.properties import hexagonal\n'), ((1298, 1348), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_three', 'three_expected'], {}), '(_three, three_expected)\n', (1324, 1348), True, 'import numpy as np\n'), ((1402, 1434), 'numpy.array', 'np.array', (['[2.0, -1.0, -1.0, 0.0]'], {}), '([2.0, -1.0, -1.0, 0.0])\n', (1410, 1434), True, 'import numpy as np\n'), ((1862, 1878), 'copy.deepcopy', 'deepcopy', (['hex_dr'], {}), '(hex_dr)\n', (1870, 1878), False, 'from copy import deepcopy\n'), ((1970, 2020), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['three', '_three_expected'], {}), '(three, _three_expected)\n', (1996, 2020), True, 'import numpy as np\n'), ((2075, 2091), 'copy.deepcopy', 'deepcopy', (['hex_dr'], {}), '(hex_dr)\n', (2083, 2091), False, 'from copy import deepcopy\n'), ((2199, 2252), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_three', '(_inv_three * -1.0)'], {}), '(_three, _inv_three * -1.0)\n', (2225, 2252), True, 'import numpy as np\n'), ((2439, 2471), 'numpy.linalg.norm', 'np.linalg.norm', (['_cart_normalized'], {}), '(_cart_normalized)\n', (2453, 2471), True, 'import numpy as np\n'), ((2480, 2528), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_cart', 'cart_expected'], {}), '(_cart, cart_expected)\n', (2506, 2528), True, 'import numpy as np\n'), ((2537, 2575), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_norm', '(1.0)'], {}), '(_norm, 1.0)\n', (2563, 2575), True, 'import numpy as np\n'), ((3295, 3350), 'twinpy.properties.hexagonal.convert_plane_from_three_to_four', 'hexagonal.convert_plane_from_three_to_four', ([], {'three': 'three'}), '(three=three)\n', (3337, 3350), False, 'from twinpy.properties import hexagonal\n'), ((3376, 3424), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_four', 'four_expected'], {}), '(_four, four_expected)\n', (3402, 3424), True, 'import numpy as np\n'), ((3513, 3566), 'twinpy.properties.hexagonal.convert_plane_from_four_to_three', 'hexagonal.convert_plane_from_four_to_three', ([], {'four': 'four'}), '(four=four)\n', (3555, 3566), False, 'from twinpy.properties import hexagonal\n'), ((3592, 3642), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_three', 'three_expected'], {}), '(_three, three_expected)\n', (3618, 3642), True, 'import numpy as np\n'), ((4134, 4151), 'copy.deepcopy', 'deepcopy', (['hex_pln'], {}), '(hex_pln)\n', (4142, 4151), False, 'from copy import deepcopy\n'), ((4232, 4271), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_four', 'four'], {}), '(_four, four)\n', (4258, 4271), True, 'import numpy as np\n'), ((4328, 4345), 'copy.deepcopy', 'deepcopy', (['hex_pln'], {}), '(hex_pln)\n', (4336, 4345), False, 'from copy import deepcopy\n'), ((4451, 4499), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_inv_four', '(four * -1)'], {}), '(_inv_four, four * -1)\n', (4477, 4499), True, 'import numpy as np\n'), ((4649, 4691), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_d', 'd_expected'], {}), '(_d, d_expected)\n', (4675, 4691), True, 'import numpy as np\n'), ((4798, 4840), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['_d', 'd_expected'], {}), '(_d, d_expected)\n', (4824, 4840), True, 'import numpy as np\n')]
|
import json
INSTITUTION_TEMPLATE = '''
{
"Institution":{
"Students":{
},
"Teachers":{
},
"Quizzes":{
"DataStructures":{
},
"Algorithms":{
},
"MachineLearning":{
}
}
}
}
'''
class DatabaseHandler:
def __init__(self):
# add a try catch block if the file does not exists
with open('database.json') as f:
self.institute_data = json.load(f)
def get_students_list(self):
return self.institute_data['Institution']['Students']
def get_teachers_list(self):
return self.institute_data['Institution']['Teachers']
def update_teachers_list(self, teachers_list):
# make some check to be sure teachers_list is in same format as required
self.institute_data['Institution']['Teachers'] = teachers_list
with open('database.json', 'w') as f:
json.dump(self.institute_data, f, indent=2)
def update_students_list(self, students_list):
# make some check to be sure students_list is in same format as required
self.institute_data['Institution']['Students'] = students_list
with open('database.json', 'w') as f:
json.dump(self.institute_data, f, indent=2)
def get_subjects_list(self):
'''
returns list of subjects available to give quiz
'''
Quizzes_dict = self.institute_data['Institution']['Quizzes']
subjects_lists = list(Quizzes_dict.keys())
return subjects_lists
def get_subject_quiz(self,subject):
'''
returns a list of quizzes in respective subject
'''
return self.institute_data['Institution']['Quizzes'][subject]
def get_tests_list(self):
''' returning a quizess dictionary by that author who is logged in it returns
name of subject name as key and in subject name key as quiz name'''
return self.institute_data['Institution']['Quizzes']
def add_new_quiz(self, quizzes_list):
# make some check to be sure quizzes_list is in same format as required
self.institute_data['Institution']['Quizzes'] = quizzes_list
with open('database.json', 'w') as f:
json.dump(self.institute_data, f, indent=2)
|
[
"json.dump",
"json.load"
] |
[((417, 429), 'json.load', 'json.load', (['f'], {}), '(f)\n', (426, 429), False, 'import json\n'), ((884, 927), 'json.dump', 'json.dump', (['self.institute_data', 'f'], {'indent': '(2)'}), '(self.institute_data, f, indent=2)\n', (893, 927), False, 'import json\n'), ((1194, 1237), 'json.dump', 'json.dump', (['self.institute_data', 'f'], {'indent': '(2)'}), '(self.institute_data, f, indent=2)\n', (1203, 1237), False, 'import json\n'), ((2162, 2205), 'json.dump', 'json.dump', (['self.institute_data', 'f'], {'indent': '(2)'}), '(self.institute_data, f, indent=2)\n', (2171, 2205), False, 'import json\n')]
|
from __future__ import print_function
import os
from pprint import pprint
try:
input = raw_input
except NameError:
pass
import argparse
import pc_lib_api
import pc_lib_general
import json
import pandas
from datetime import datetime, date, time
from pathlib import Path
# --Execution Block-- #
# --Parse command line arguments-- #
parser = argparse.ArgumentParser(prog='rltoolbox')
parser.add_argument(
'-u',
'--username',
type=str,
help='*Required* - Prisma Cloud API Access Key ID that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-p',
'--password',
type=str,
help='*Required* - Prisma Cloud API Secret Key that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-url',
'--uiurl',
type=str,
help='*Required* - Base URL used in the UI for connecting to Prisma Cloud. '
'Formatted as app.prismacloud.io or app2.prismacloud.io or app.eu.prismacloud.io, etc. '
'You can also input the api version of the URL if you know it and it will be passed through.')
parser.add_argument(
'-url_compute',
'--uiurl_compute',
type=str,
help='*Required* - Base URL used in the UI for connecting to Prisma Cloud Compute. '
'Formatted as region.cloud.twistlock.com/identifier.'
'Retrieved from Compute->Manage->System->Downloads->Path to Console')
parser.add_argument(
'-y',
'--yes',
action='store_true',
help='(Optional) - Override user input for verification (auto answer for yes).')
args = parser.parse_args()
# --End parse command line arguments-- #
# --Main-- #
# Get login details worked out
pc_settings = pc_lib_general.pc_login_get(args.username, args.password, args.uiurl, args.uiurl_compute)
# Verification (override with -y)
if not args.yes:
print()
print('Ready to excute commands aginst your Prisma Cloud tenant.')
verification_response = str(input('Would you like to continue (y or yes to continue)?'))
continue_response = {'yes', 'y'}
print()
if verification_response not in continue_response:
pc_lib_general.pc_exit_error(400, 'Verification failed due to user response. Exiting...')
# Sort out API Login
print('API - Getting authentication token...', end='')
pc_settings = pc_lib_api.pc_jwt_get(pc_settings)
print('Done.')
# Get containers list
print('API - Getting containers list...', end='')
pc_settings, response_package = pc_lib_api.api_containers_get(pc_settings)
file_name = "containers_list_filtered_" + str(datetime.now().strftime("%Y-%m-%d-%H-%M-%S")) + ".csv"
file_path = os.path.join(Path.home(), "prisma-compute-exports")
containers = response_package['data']
data_header = "Application,Hostname,Cluster,Image Name,Namespace"
print("Exporting data to: " + os.path.join(file_path, file_name))
pc_lib_general.pc_file_write_csv(file_name, data_header, file_path)
for container in containers:
data_info_hostname = container['hostname']
data_info_namespace = container['info']['namespace']
data_info_cluster = container['info']['cluster']
data_info_imageName = container['info']['imageName']
data_info_app = container['info']['app']
data_line = data_info_app + "," + data_info_hostname + "," + data_info_cluster + "," + data_info_imageName + "," + data_info_namespace
pc_lib_general.pc_file_write_csv(file_name, data_line, file_path)
print('Done.')
|
[
"pc_lib_general.pc_exit_error",
"pc_lib_general.pc_login_get",
"argparse.ArgumentParser",
"pathlib.Path.home",
"pc_lib_api.pc_jwt_get",
"datetime.datetime.now",
"pc_lib_api.api_containers_get",
"pc_lib_general.pc_file_write_csv",
"os.path.join"
] |
[((350, 391), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""rltoolbox"""'}), "(prog='rltoolbox')\n", (373, 391), False, 'import argparse\n'), ((1687, 1781), 'pc_lib_general.pc_login_get', 'pc_lib_general.pc_login_get', (['args.username', 'args.password', 'args.uiurl', 'args.uiurl_compute'], {}), '(args.username, args.password, args.uiurl, args.\n uiurl_compute)\n', (1714, 1781), False, 'import pc_lib_general\n'), ((2299, 2333), 'pc_lib_api.pc_jwt_get', 'pc_lib_api.pc_jwt_get', (['pc_settings'], {}), '(pc_settings)\n', (2320, 2333), False, 'import pc_lib_api\n'), ((2454, 2496), 'pc_lib_api.api_containers_get', 'pc_lib_api.api_containers_get', (['pc_settings'], {}), '(pc_settings)\n', (2483, 2496), False, 'import pc_lib_api\n'), ((2832, 2899), 'pc_lib_general.pc_file_write_csv', 'pc_lib_general.pc_file_write_csv', (['file_name', 'data_header', 'file_path'], {}), '(file_name, data_header, file_path)\n', (2864, 2899), False, 'import pc_lib_general\n'), ((2623, 2634), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (2632, 2634), False, 'from pathlib import Path\n'), ((3331, 3396), 'pc_lib_general.pc_file_write_csv', 'pc_lib_general.pc_file_write_csv', (['file_name', 'data_line', 'file_path'], {}), '(file_name, data_line, file_path)\n', (3363, 3396), False, 'import pc_lib_general\n'), ((2117, 2211), 'pc_lib_general.pc_exit_error', 'pc_lib_general.pc_exit_error', (['(400)', '"""Verification failed due to user response. Exiting..."""'], {}), "(400,\n 'Verification failed due to user response. Exiting...')\n", (2145, 2211), False, 'import pc_lib_general\n'), ((2796, 2830), 'os.path.join', 'os.path.join', (['file_path', 'file_name'], {}), '(file_path, file_name)\n', (2808, 2830), False, 'import os\n'), ((2543, 2557), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2555, 2557), False, 'from datetime import datetime, date, time\n')]
|
import sys
from xml.etree.ElementInclude import include
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need fine tuning.
# "packages": ["os"] is used as example only
# build_exe_options = {"packages": ["os"], "excludes": ["tkinter"]}
# base="Win32GUI" should be used only for Windows GUI app
base = None
if sys.platform == "win32":
base = "Win32GUI"
includes = ["jinja2.ext"] # add jinja2.ext here
packages = ["sqlalchemy"]
excludes = ["Tkinter"]
target = Executable(script="main.py", base=base)
build_exe_options = dict(
includes=includes,
packages=packages,
excludes=excludes,
include_files=["resources/", "templates/", "static/", "app.db"],
) # folder,relative path. Use tuple like in the single file to set a absolute path.
setup(
name="Flask App",
version="0.1",
description="Flask App",
copyDependentFiles=True,
options={"build_exe": build_exe_options},
executables=[target],
)
# # Copy files
# import os
# import shutil
# import os, shutil
# def copytree(src, dst, symlinks=False, ignore=None):
# for item in os.listdir(src):
# s = os.path.join(src, item)
# d = os.path.join(dst, item)
# if os.path.isdir(s):
# shutil.copytree(s, d, symlinks, ignore)
# else:
# shutil.copy2(s, d)
# os.makedirs(os.path.join("build", "exe.win-amd64-3.9", "data"))
# copytree("data", os.path.join("build", "exe.win-amd64-3.9", "data"))
|
[
"cx_Freeze.Executable",
"cx_Freeze.setup"
] |
[((513, 552), 'cx_Freeze.Executable', 'Executable', ([], {'script': '"""main.py"""', 'base': 'base'}), "(script='main.py', base=base)\n", (523, 552), False, 'from cx_Freeze import setup, Executable\n'), ((804, 964), 'cx_Freeze.setup', 'setup', ([], {'name': '"""Flask App"""', 'version': '"""0.1"""', 'description': '"""Flask App"""', 'copyDependentFiles': '(True)', 'options': "{'build_exe': build_exe_options}", 'executables': '[target]'}), "(name='Flask App', version='0.1', description='Flask App',\n copyDependentFiles=True, options={'build_exe': build_exe_options},\n executables=[target])\n", (809, 964), False, 'from cx_Freeze import setup, Executable\n')]
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from future.builtins.disabled import *
import sys
import fnmatch
import re
import os
import argparse
from argparse import ArgumentTypeError
import traceback
from . import commands
class CommandLineParser(object):
def __init__(self):
self.initParser()
def toNumber(self, value):
number = value
try:
number = int(value)
# need native int not newint
number = eval("{}".format(number))
except (TypeError, ValueError):
pass
return number
def unitToFactor(self, unit):
units = {
"k": 1024,
"m": 1024*1024,
"g": 1024*1024*1024,
"t": 1024*1024*1024*1024,
}
factor = 1
try:
if unit:
factor = units[unit[0].lower()]
except KeyError:
raise KeyError("Unsupported unit '{}'".format(unit))
return factor
def patternToRegexp(self, pattern):
pattern = pattern.decode("UTF-8")
try:
regexp = None
if pattern.startswith("~"):
regexp = re.compile(pattern[1:])
else:
pattern = fnmatch.translate(pattern)
regexp = re.compile("^"+pattern)
except:
traceback.print_exc()
raise
return regexp
def diskModeType(self, s):
fmt = "all | none | <ctrlNr>-<slotNr> [<ctrlNr>-<slotNr>]"
s = s.lower()
if s == "all":
return s
elif s == "none":
return None
pattern = "^(\d+)-(\d+)$"
match = self.matchPattern(pattern, fmt, s)
n, m = (self.toNumber(x) for x in match.groups())
return (n, m)
def memoryType(self, s):
fmt = "<number>[m|g]"
pattern = "^(\d+)(?([m|g].?))$"
pattern = "^([\d.]+)(:?([m|M|g|G]).?)$"
match = self.matchPattern(pattern, fmt, s)
mem, unit = match.groups()
factor = self.unitToFactor(unit)
if factor == 1:
factor = 1024
mem = float(mem)
mem = factor * mem
return int(mem)
def extraConfigType(self, s):
fmt = "<key>=<value>"
pattern = "^([^=]+)=(.*)$"
match = self.matchPattern(pattern, fmt, s)
key, value = match.groups()
return (key, value)
def isoType(self, s):
fmt = "\[datastore\] <path>"
pattern = "^\[[^\]]+\]\s.*$"
match = self.matchPattern(pattern, fmt, s)
return s
def diskLinkedType(self, s):
fmt = "[<ctrlNr>-<slotNr>,]vm[:snapshot],<ctrlNr>-<slotNr>"
pattern = "^(?:(\d+)-(\d+),)?([^:,]+)(?::([^,]+))?(?:,(\d+)-(\d+))$"
match = self.matchPattern(pattern, fmt, s)
n, m, vm, snapshot, x, y = (self.toNumber(x) for x in match.groups())
return {"slot": (n, m), "vm": (vm, snapshot), "vmSlot": (x, y)}
def diskNewType(self, s):
fmt = "[<ctrlNr>-<slotNr>,]size=<capacity>[mb|gb|tb]"
pattern = "^(?:(\d+)-(\d+),)?size=(\d+)([m|M|g|G|t|T].?)?$"
match = self.matchPattern(pattern, fmt, s)
n, m, size, unit = (self.toNumber(x) for x in match.groups())
factor = self.unitToFactor(unit)
size = factor * size
return {"slot": (n, m), "capacity": size}
def diskDestroyType(self, s):
fmt = "<ctrlNr>-<slotNr>"
pattern = "^(\d+)-(\d+)$"
match = self.matchPattern(pattern, fmt, s)
n, m = (self.toNumber(x) for x in match.groups())
return (n, m)
def nicAddType(self, s):
fmt = "[mac=xx:xx:xx:xx:xx:xx,ip=a.b.c.d/8,gw=u,v,w,x]"
macPattern = "[.:]".join(["[0-9A-F]{2}"] * 6)
ipPattern = "\.".join(["\d+"] * 4)
pattern = "^(?:mac=({0}),?)?(?:ip=({1})(?:/(\d+))?,?)?(?:gw=({1}),?)?$".format(
macPattern, ipPattern)
match = self.matchPattern(pattern, fmt, s)
mac, ip, mask, gw = match.groups()
return {"mac": mac, "ip": ip, "mask": mask, "gw": gw}
def matchPattern(self, pattern, fmt, s):
reg = re.compile(pattern, re.I)
match = reg.search(s)
if not match:
raise argparse.ArgumentTypeError(
"'{}' does not match format'{}'".format(s, fmt))
return match
def getSubParser(self, function, subparsers, **kwargs):
parser = subparsers.add_parser(
function,
formatter_class=argparse.RawTextHelpFormatter,
**kwargs)
return parser
def initParser(self):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
self.parser = parser
parser.add_argument(
"--dryrun", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument(
"--vcenter",
type=str,
metavar="host",
help="Hostname/IP of the VCenter")
parser.add_argument(
"--vc-user",
type=str,
metavar="user", dest="vcUser",
help="VCenter username")
parser.add_argument(
"--vc-pass",
type=str,
metavar="password", dest="vcPass",
help="VCenter password, may be base64 encoded")
parser.add_argument(
"--auth",
type=str,
default="auth.ini",
metavar="auth.ini",
help="Load credentials from auth file, user empty to save in user home")
parser.add_argument(
"--save-auth",
action="store_true", dest="saveAuth",
help="Save/update auth file")
parser.add_argument(
"--ask-cred", action="store_true", dest="askCred",
help="Force user to enter credentials")
subparsers = parser.add_subparsers(dest="which")
subparsers.required = True
for mod in commands.commands:
mod.addParser(self, subparsers)
def _currentParserArgs(self, args):
which = args.which
keys = getattr(args, "{}Args".format(which))
parserArgs = {}
for k, v in vars(args).items():
if k in keys:
parserArgs[k] = v
return parserArgs
def showFullHelp(self):
# http://stackoverflow.com/questions/20094215/argparse-subparser-monolithic-help-output
parser = self.parser
# print main help
print(parser.format_help())
# retrieve subparsers from parser
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
print("--------------------------------------------------------------------------------")
print("Command '{}'".format(choice))
print(subparser.format_help())
def parse(self, argv=sys.argv[1:]):
args, nestedArgv = self.parser.parse_known_args(argv)
args.m2m = False
if args.which == "m2m":
args.m2m = True
if not nestedArgv or nestedArgv[0] == "-":
# read json args from stdin
raise NotImplementedError()
else:
self.parser.parse_args(nestedArgv, namespace=args)
else:
self.parser.parse_args(argv, namespace=args)
# camelCase, remove unwanted characters
which = args.which
which = which.title()
which = re.sub("-", "", which)
which = which[0].lower() + which[1:]
args.which = which
parserArgs = self._currentParserArgs(args)
return which, args, parserArgs
|
[
"traceback.print_exc",
"argparse.ArgumentParser",
"fnmatch.translate",
"re.sub",
"re.compile"
] |
[((4208, 4233), 're.compile', 're.compile', (['pattern', 're.I'], {}), '(pattern, re.I)\n', (4218, 4233), False, 'import re\n'), ((4704, 4774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawTextHelpFormatter'}), '(formatter_class=argparse.RawTextHelpFormatter)\n', (4727, 4774), False, 'import argparse\n'), ((7962, 7984), 're.sub', 're.sub', (['"""-"""', '""""""', 'which'], {}), "('-', '', which)\n", (7968, 7984), False, 'import re\n'), ((1259, 1282), 're.compile', 're.compile', (['pattern[1:]'], {}), '(pattern[1:])\n', (1269, 1282), False, 'import re\n'), ((1327, 1353), 'fnmatch.translate', 'fnmatch.translate', (['pattern'], {}), '(pattern)\n', (1344, 1353), False, 'import fnmatch\n'), ((1379, 1404), 're.compile', 're.compile', (["('^' + pattern)"], {}), "('^' + pattern)\n", (1389, 1404), False, 'import re\n'), ((1431, 1452), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1450, 1452), False, 'import traceback\n')]
|
"""
Helper functions used by multiple parts of LAtools.
(c) <NAME> : https://github.com/oscarbranson
"""
import os
import shutil
import re
import configparser
import datetime as dt
import numpy as np
import dateutil as du
import pkg_resources as pkgrs
import uncertainties.unumpy as un
import scipy.interpolate as interp
from .stat_fns import nominal_values
from .analyte_names import pretty_element
# Bunch modifies dict to allow item access using dot (.) operator
class Bunch(dict):
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
# warnings monkeypatch
# https://stackoverflow.com/questions/2187269/python-print-only-the-message-on-warnings
def _warning(message, category=UserWarning,
filename='', lineno=-1,
file=None, line=None):
print(message)
def get_date(datetime, time_format=None):
"""
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
"""
if time_format is None:
t = du.parser.parse(datetime)
else:
t = dt.datetime.strptime(datetime, time_format)
return t
def get_total_n_points(d):
"""
Returns the total number of data points in values of dict.
Paramters
---------
d : dict
"""
n = 0
for di in d.values():
n += len(di)
return n
def get_total_time_span(d):
"""
Returns total length of analysis.
"""
tmax = 0
for di in d.values():
if di.uTime.max() > tmax:
tmax = di.uTime.max()
return tmax
def unitpicker(a, llim=0.1, denominator=None, focus_stage=None):
"""
Determines the most appropriate plotting unit for data.
Parameters
----------
a : float or array-like
number to optimise. If array like, the 25% quantile is optimised.
llim : float
minimum allowable value in scaled data.
Returns
-------
(float, str)
(multiplier, unit)
"""
if not isinstance(a, (int, float)):
a = nominal_values(a)
a = np.percentile(a[~np.isnan(a)], 25)
if a == 0:
raise ValueError("Cannot calculate unit for zero.")
if denominator is not None:
pd = pretty_element(denominator)
else:
pd = ''
if focus_stage == 'calibrated':
udict = {0: 'mol/mol ' + pd,
1: 'mmol/mol ' + pd,
2: '$\mu$mol/mol ' + pd,
3: 'nmol/mol ' + pd,
4: 'pmol/mol ' + pd,
5: 'fmol/mol ' + pd}
elif focus_stage == 'ratios':
udict = {0: 'counts/count ' + pd,
1: '$10^{-3}$ counts/count ' + pd,
2: '$10^{-6}$ counts/count ' + pd,
3: '$10^{-9}$ counts/count ' + pd,
4: '$10^{-12}$ counts/count ' + pd,
5: '$10^{-15}$ counts/count ' + pd}
elif focus_stage in ('rawdata', 'despiked', 'bkgsub'):
udict = udict = {0: 'counts',
1: '$10^{-3}$ counts',
2: '$10^{-6}$ counts',
3: '$10^{-9}$ counts',
4: '$10^{-12}$ counts',
5: '$10^{-15}$ counts'}
else:
udict = {0: '', 1: '', 2: '', 3: '', 4: '', 5: ''}
a = abs(a)
n = 0
if a < llim:
while a < llim:
a *= 1000
n += 1
return float(1000**n), udict[n]
def collate_data(in_dir, extension='.csv', out_dir=None):
"""
Copy all csvs in nested directroy to single directory.
Function to copy all csvs from a directory, and place
them in a new directory.
Parameters
----------
in_dir : str
Input directory containing csv files in subfolders
extension : str
The extension that identifies your data files.
Defaults to '.csv'.
out_dir : str
Destination directory
Returns
-------
None
"""
if out_dir is None:
out_dir = './' + re.search('^\.(.*)', extension).groups(0)[0]
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for p, d, fs in os.walk(in_dir):
for f in fs:
if extension in f:
shutil.copy(p + '/' + f, out_dir + '/' + f)
return
def bool_transitions(a):
"""
Return indices where a boolean array changes from True to False
"""
return np.where(a[:-1] != a[1:])[0]
def bool_2_indices(a):
"""
Convert boolean array into a 2D array of (start, stop) pairs.
"""
if any(a):
lims = []
lims.append(np.where(a[:-1] != a[1:])[0])
if a[0]:
lims.append([0])
if a[-1]:
lims.append([len(a) - 1])
lims = np.concatenate(lims)
lims.sort()
return np.reshape(lims, (lims.size // 2, 2))
else:
return None
def enumerate_bool(bool_array, nstart=0):
"""
Consecutively numbers contiguous booleans in array.
i.e. a boolean sequence, and resulting numbering
T F T T T F T F F F T T F
0-1 1 1 - 2 ---3 3 -
where ' - '
Parameters
----------
bool_array : array_like
Array of booleans.
nstart : int
The number of the first boolean group.
"""
ind = bool_2_indices(bool_array)
ns = np.full(bool_array.size, nstart, dtype=int)
for n, lims in enumerate(ind):
ns[lims[0]:lims[-1] + 1] = nstart + n + 1
return ns
def tuples_2_bool(tuples, x):
"""
Generate boolean array from list of limit tuples.
Parameters
----------
tuples : array_like
[2, n] array of (start, end) values
x : array_like
x scale the tuples are mapped to
Returns
-------
array_like
boolean array, True where x is between each pair of tuples.
"""
if np.ndim(tuples) == 1:
tuples = [tuples]
out = np.zeros(x.size, dtype=bool)
for l, u in tuples:
out[(x > l) & (x < u)] = True
return out
def get_example_data(destination_dir):
if os.path.isdir(destination_dir):
overwrite = input(destination_dir +
' already exists. Overwrite? [N/y]: ').lower() == 'y'
if overwrite:
shutil.rmtree(destination_dir)
else:
print(destination_dir + ' was not overwritten.')
shutil.copytree(pkgrs.resource_filename('latools', 'resources/test_data'),
destination_dir)
return
def rangecalc(xs, pad=0.05):
mn = np.nanmin(xs)
mx = np.nanmax(xs)
xr = mx - mn
return [mn - pad * xr, mx + pad * xr]
class un_interp1d(object):
"""
object for handling interpolation of values with uncertainties.
"""
def __init__(self, x, y, fill_value=np.nan, **kwargs):
if isinstance(fill_value, tuple):
nom_fill = tuple([un.nominal_values(v) for v in fill_value])
std_fill = tuple([un.std_devs(v) for v in fill_value])
else:
nom_fill = std_fill = fill_value
self.nom_interp = interp.interp1d(un.nominal_values(x),
un.nominal_values(y),
fill_value=nom_fill, **kwargs)
self.std_interp = interp.interp1d(un.nominal_values(x),
un.std_devs(y),
fill_value=std_fill, **kwargs)
def new(self, xn):
yn = self.nom_interp(xn)
yn_err = self.std_interp(xn)
return un.uarray(yn, yn_err)
def new_nom(self, xn):
return self.nom_interp(xn)
def new_std(self, xn):
return self.std_interp(xn)
def rolling_window(a, window, pad=None):
"""
Returns (win, len(a)) rolling - window array of data.
Parameters
----------
a : array_like
Array to calculate the rolling window of
window : int
Description of `window`.
pad : same as dtype(a)
Description of `pad`.
Returns
-------
array_like
An array of shape (n, window), where n is either len(a) - window
if pad is None, or len(a) if pad is not None.
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1], )
out = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
# pad shape
if window % 2 == 0:
npre = window // 2 - 1
npost = window // 2
else:
npre = npost = window // 2
if isinstance(pad, str):
if pad == 'ends':
prepad = np.full((npre, window), a[0])
postpad = np.full((npost, window), a[-1])
elif pad == 'mean_ends':
prepad = np.full((npre, window), np.mean(a[:(window // 2)]))
postpad = np.full((npost, window), np.mean(a[-(window // 2):]))
elif pad == 'repeat_ends':
prepad = np.full((npre, window), out[0])
postpad = np.full((npost, window), out[0])
else:
raise ValueError("If pad is a string, it must be either 'ends', 'mean_ends' or 'repeat_ends'.")
return np.concatenate((prepad, out, postpad))
elif pad is not None:
pre_blankpad = np.empty(((npre, window)))
pre_blankpad[:] = pad
post_blankpad = np.empty(((npost, window)))
post_blankpad[:] = pad
return np.concatenate([pre_blankpad, out, post_blankpad])
else:
return out
def fastsmooth(a, win=11):
"""
Returns rolling - window smooth of a.
Function to efficiently calculate the rolling mean of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # add 1 to window if it is even.
kernel = np.ones(win) / win
npad = int((win - 1) / 2)
spad = np.full(npad + 1, np.mean(a[:(npad + 1)]))
epad = np.full(npad - 1, np.mean(a[-(npad - 1):]))
return np.concatenate([spad, np.convolve(a, kernel, 'valid'), epad])
def fastgrad(a, win=11):
"""
Returns rolling - window gradient of a.
Function to efficiently calculate the rolling gradient of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # subtract 1 from window if it is even.
# trick for efficient 'rolling' computation in numpy
# shape = a.shape[:-1] + (a.shape[-1] - win + 1, win)
# strides = a.strides + (a.strides[-1], )
# wins = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
wins = rolling_window(a, win, 'ends')
# apply rolling gradient to data
a = map(lambda x: np.polyfit(np.arange(win), x, 1)[0], wins)
return np.array(list(a))
def calc_grads(x, dat, keys=None, win=5):
"""
Calculate gradients of values in dat.
Parameters
----------
x : array like
Independent variable for items in dat.
dat : dict
{key: dependent_variable} pairs
keys : str or array-like
Which keys in dict to calculate the gradient of.
win : int
The side of the rolling window for gradient calculation
Returns
-------
dict of gradients
"""
if keys is None:
keys = dat.keys()
def grad(xy):
if (~np.isnan(xy)).all():
try:
return np.polyfit(xy[0], xy[1], 1)[0]
except ValueError:
return np.nan
else:
return np.nan
xs = rolling_window(x, win, pad='repeat_ends')
grads = Bunch()
for k in keys:
d = nominal_values(rolling_window(dat[k], win, pad='repeat_ends'))
grads[k] = np.array(list(map(grad, zip(xs, d))))
return grads
def findmins(x, y):
""" Function to find local minima.
Parameters
----------
x, y : array_like
1D arrays of the independent (x) and dependent (y) variables.
Returns
-------
array_like
Array of points in x where y has a local minimum.
"""
return x[np.r_[False, y[1:] < y[:-1]] & np.r_[y[:-1] < y[1:], False]]
def stack_keys(ddict, keys, extra=None):
"""
Combine elements of ddict into an array of shape (len(ddict[key]), len(keys)).
Useful for preparing data for sklearn.
Parameters
----------
ddict : dict
A dict containing arrays or lists to be stacked.
Must be of equal length.
keys : list or str
The keys of dict to stack. Must be present in ddict.
extra : list (optional)
A list of additional arrays to stack. Elements of extra
must be the same length as arrays in ddict.
Extras are inserted as the first columns of output.
"""
if isinstance(keys, str):
d = [ddict[keys]]
else:
d = [ddict[k] for k in keys]
if extra is not None:
d = extra + d
return np.vstack(d).T
|
[
"os.mkdir",
"numpy.polyfit",
"numpy.empty",
"os.walk",
"numpy.ones",
"pkg_resources.resource_filename",
"numpy.isnan",
"numpy.mean",
"numpy.arange",
"shutil.rmtree",
"numpy.convolve",
"shutil.copy",
"numpy.full",
"numpy.ndim",
"numpy.reshape",
"re.search",
"dateutil.parser.parse",
"uncertainties.unumpy.uarray",
"datetime.datetime.strptime",
"numpy.lib.stride_tricks.as_strided",
"uncertainties.unumpy.nominal_values",
"numpy.concatenate",
"numpy.vstack",
"numpy.nanmax",
"uncertainties.unumpy.std_devs",
"os.path.isdir",
"numpy.zeros",
"numpy.nanmin",
"numpy.where"
] |
[((4364, 4379), 'os.walk', 'os.walk', (['in_dir'], {}), '(in_dir)\n', (4371, 4379), False, 'import os\n'), ((5523, 5566), 'numpy.full', 'np.full', (['bool_array.size', 'nstart'], {'dtype': 'int'}), '(bool_array.size, nstart, dtype=int)\n', (5530, 5566), True, 'import numpy as np\n'), ((6100, 6128), 'numpy.zeros', 'np.zeros', (['x.size'], {'dtype': 'bool'}), '(x.size, dtype=bool)\n', (6108, 6128), True, 'import numpy as np\n'), ((6253, 6283), 'os.path.isdir', 'os.path.isdir', (['destination_dir'], {}), '(destination_dir)\n', (6266, 6283), False, 'import os\n'), ((6717, 6730), 'numpy.nanmin', 'np.nanmin', (['xs'], {}), '(xs)\n', (6726, 6730), True, 'import numpy as np\n'), ((6740, 6753), 'numpy.nanmax', 'np.nanmax', (['xs'], {}), '(xs)\n', (6749, 6753), True, 'import numpy as np\n'), ((8484, 8548), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['a'], {'shape': 'shape', 'strides': 'strides'}), '(a, shape=shape, strides=strides)\n', (8515, 8548), True, 'import numpy as np\n'), ((1276, 1301), 'dateutil.parser.parse', 'du.parser.parse', (['datetime'], {}), '(datetime)\n', (1291, 1301), True, 'import dateutil as du\n'), ((1324, 1367), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['datetime', 'time_format'], {}), '(datetime, time_format)\n', (1344, 1367), True, 'import datetime as dt\n'), ((4293, 4315), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (4306, 4315), False, 'import os\n'), ((4325, 4342), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (4333, 4342), False, 'import os\n'), ((4625, 4650), 'numpy.where', 'np.where', (['(a[:-1] != a[1:])'], {}), '(a[:-1] != a[1:])\n', (4633, 4650), True, 'import numpy as np\n'), ((4961, 4981), 'numpy.concatenate', 'np.concatenate', (['lims'], {}), '(lims)\n', (4975, 4981), True, 'import numpy as np\n'), ((5018, 5055), 'numpy.reshape', 'np.reshape', (['lims', '(lims.size // 2, 2)'], {}), '(lims, (lims.size // 2, 2))\n', (5028, 5055), True, 'import numpy as np\n'), ((6041, 6056), 'numpy.ndim', 'np.ndim', (['tuples'], {}), '(tuples)\n', (6048, 6056), True, 'import numpy as np\n'), ((6570, 6627), 'pkg_resources.resource_filename', 'pkgrs.resource_filename', (['"""latools"""', '"""resources/test_data"""'], {}), "('latools', 'resources/test_data')\n", (6593, 6627), True, 'import pkg_resources as pkgrs\n'), ((7731, 7752), 'uncertainties.unumpy.uarray', 'un.uarray', (['yn', 'yn_err'], {}), '(yn, yn_err)\n', (7740, 7752), True, 'import uncertainties.unumpy as un\n'), ((9316, 9354), 'numpy.concatenate', 'np.concatenate', (['(prepad, out, postpad)'], {}), '((prepad, out, postpad))\n', (9330, 9354), True, 'import numpy as np\n'), ((10354, 10366), 'numpy.ones', 'np.ones', (['win'], {}), '(win)\n', (10361, 10366), True, 'import numpy as np\n'), ((10432, 10453), 'numpy.mean', 'np.mean', (['a[:npad + 1]'], {}), '(a[:npad + 1])\n', (10439, 10453), True, 'import numpy as np\n'), ((10486, 10510), 'numpy.mean', 'np.mean', (['a[-(npad - 1):]'], {}), '(a[-(npad - 1):])\n', (10493, 10510), True, 'import numpy as np\n'), ((13829, 13841), 'numpy.vstack', 'np.vstack', (['d'], {}), '(d)\n', (13838, 13841), True, 'import numpy as np\n'), ((6443, 6473), 'shutil.rmtree', 'shutil.rmtree', (['destination_dir'], {}), '(destination_dir)\n', (6456, 6473), False, 'import shutil\n'), ((7268, 7288), 'uncertainties.unumpy.nominal_values', 'un.nominal_values', (['x'], {}), '(x)\n', (7285, 7288), True, 'import uncertainties.unumpy as un\n'), ((7332, 7352), 'uncertainties.unumpy.nominal_values', 'un.nominal_values', (['y'], {}), '(y)\n', (7349, 7352), True, 'import uncertainties.unumpy as un\n'), ((7469, 7489), 'uncertainties.unumpy.nominal_values', 'un.nominal_values', (['x'], {}), '(x)\n', (7486, 7489), True, 'import uncertainties.unumpy as un\n'), ((7533, 7547), 'uncertainties.unumpy.std_devs', 'un.std_devs', (['y'], {}), '(y)\n', (7544, 7547), True, 'import uncertainties.unumpy as un\n'), ((8769, 8798), 'numpy.full', 'np.full', (['(npre, window)', 'a[0]'], {}), '((npre, window), a[0])\n', (8776, 8798), True, 'import numpy as np\n'), ((8821, 8852), 'numpy.full', 'np.full', (['(npost, window)', 'a[-1]'], {}), '((npost, window), a[-1])\n', (8828, 8852), True, 'import numpy as np\n'), ((9404, 9428), 'numpy.empty', 'np.empty', (['(npre, window)'], {}), '((npre, window))\n', (9412, 9428), True, 'import numpy as np\n'), ((9485, 9510), 'numpy.empty', 'np.empty', (['(npost, window)'], {}), '((npost, window))\n', (9493, 9510), True, 'import numpy as np\n'), ((9559, 9609), 'numpy.concatenate', 'np.concatenate', (['[pre_blankpad, out, post_blankpad]'], {}), '([pre_blankpad, out, post_blankpad])\n', (9573, 9609), True, 'import numpy as np\n'), ((10545, 10576), 'numpy.convolve', 'np.convolve', (['a', 'kernel', '"""valid"""'], {}), "(a, kernel, 'valid')\n", (10556, 10576), True, 'import numpy as np\n'), ((4449, 4492), 'shutil.copy', 'shutil.copy', (["(p + '/' + f)", "(out_dir + '/' + f)"], {}), "(p + '/' + f, out_dir + '/' + f)\n", (4460, 4492), False, 'import shutil\n'), ((4813, 4838), 'numpy.where', 'np.where', (['(a[:-1] != a[1:])'], {}), '(a[:-1] != a[1:])\n', (4821, 4838), True, 'import numpy as np\n'), ((2321, 2332), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (2329, 2332), True, 'import numpy as np\n'), ((7057, 7077), 'uncertainties.unumpy.nominal_values', 'un.nominal_values', (['v'], {}), '(v)\n', (7074, 7077), True, 'import uncertainties.unumpy as un\n'), ((7130, 7144), 'uncertainties.unumpy.std_devs', 'un.std_devs', (['v'], {}), '(v)\n', (7141, 7144), True, 'import uncertainties.unumpy as un\n'), ((8931, 8955), 'numpy.mean', 'np.mean', (['a[:window // 2]'], {}), '(a[:window // 2])\n', (8938, 8955), True, 'import numpy as np\n'), ((9006, 9033), 'numpy.mean', 'np.mean', (['a[-(window // 2):]'], {}), '(a[-(window // 2):])\n', (9013, 9033), True, 'import numpy as np\n'), ((9091, 9122), 'numpy.full', 'np.full', (['(npre, window)', 'out[0]'], {}), '((npre, window), out[0])\n', (9098, 9122), True, 'import numpy as np\n'), ((9145, 9177), 'numpy.full', 'np.full', (['(npost, window)', 'out[0]'], {}), '((npost, window), out[0])\n', (9152, 9177), True, 'import numpy as np\n'), ((11649, 11663), 'numpy.arange', 'np.arange', (['win'], {}), '(win)\n', (11658, 11663), True, 'import numpy as np\n'), ((12258, 12270), 'numpy.isnan', 'np.isnan', (['xy'], {}), '(xy)\n', (12266, 12270), True, 'import numpy as np\n'), ((12319, 12346), 'numpy.polyfit', 'np.polyfit', (['xy[0]', 'xy[1]', '(1)'], {}), '(xy[0], xy[1], 1)\n', (12329, 12346), True, 'import numpy as np\n'), ((4236, 4268), 're.search', 're.search', (['"""^\\\\.(.*)"""', 'extension'], {}), "('^\\\\.(.*)', extension)\n", (4245, 4268), False, 'import re\n')]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# %%
import pandas as pd
from tasrif.processing_pipeline.pandas import FillNAOperator
df = pd.DataFrame(
{
"name": ["Alfred", "juli", "Tom", "Ali"],
"height": [np.nan, 155, 159, 165],
"born": [pd.NaT, pd.Timestamp("2010-04-25"), pd.NaT, pd.NaT],
}
)
operator = FillNAOperator(axis=0, value="laptop")
df = operator.process(df)[0]
df
|
[
"tasrif.processing_pipeline.pandas.FillNAOperator",
"pandas.Timestamp"
] |
[((584, 622), 'tasrif.processing_pipeline.pandas.FillNAOperator', 'FillNAOperator', ([], {'axis': '(0)', 'value': '"""laptop"""'}), "(axis=0, value='laptop')\n", (598, 622), False, 'from tasrif.processing_pipeline.pandas import FillNAOperator\n'), ((519, 545), 'pandas.Timestamp', 'pd.Timestamp', (['"""2010-04-25"""'], {}), "('2010-04-25')\n", (531, 545), True, 'import pandas as pd\n')]
|
# coding=utf8
import re
def tokenize_prolog(logical_form):
# Tokenize Prolog
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
toks = [t if "::" not in t else t.replace(
"::", " ") for t in normalized_lf.split()]
return toks
def normalize_prolog_variable_names(logical_form):
"""Standardize variable names in Prolog with De Brujin indices."""
toks = tokenize_prolog(logical_form)
# Replace Variable
cur_vars = []
new_toks = []
for w in toks:
if re.match('[A-Z]', w) or re.match('_\d+', w):
if w in cur_vars:
ind_from_end = len(cur_vars) - cur_vars.index(w) - 1
new_toks.append('V%d' % ind_from_end)
else:
cur_vars.append(w)
new_toks.append('NV')
else:
new_toks.append(w)
return ''.join(new_toks)
def preprocess_prolog(logical_form):
normalized_prolog = normalize_prolog_variable_names(logical_form)
normalized_prolog = re.sub(r"\s*\(\s*", "(", normalized_prolog)
normalized_prolog = re.sub(r"\s*\)\s*", ")", normalized_prolog)
normalized_prolog = re.sub(r"\s*,\s*", ",", normalized_prolog)
normalized_prolog = normalized_prolog.replace("\+ r", "\+r")
normalized_prolog = normalized_prolog
return normalized_prolog
def preprocess_funql(lf):
l = re.sub(r"\s*\(\s*", "(", lf)
l = re.sub(r"\s*\)\s*", ")", l)
l = re.sub(r"\s*,\s*", ",", l)
return l
def postprocess_prolog(logical_form):
normalized_prolog = logical_form.replace("windo nt", "windows nt")
normalized_prolog = normalized_prolog.replace("windo 95", "windows 95")
return normalized_prolog
def postprocess_sql(logical_form):
normalized_sql = logical_form.replace("windo nt", "windows nt")
normalized_sql = normalized_sql.replace("windo 95", "windows 95")
normalized_sql = normalized_sql.replace("\\'", "'")
return normalized_sql
def postprocess_lambda(logical_form):
normalized_lc = logical_form.replace("windo nt", "windows nt")
normalized_lc = normalized_lc.replace("windo 95", "windows 95")
normalized_lc = normalized_lc.replace("\\'", "'")
return normalized_lc
def normalize_sql(logical_form):
s = logical_form.replace("( ", "(").replace(" )", ")").replace(
";", "").replace('"', "'").replace(' . ', '.').strip().lower()
s = s.replace('max (', 'max(')
s = s.replace('min (', 'min(')
s = s.replace('avg (', 'avg(')
s = s.replace('count (', 'count(')
s = s.replace('sum (', 'sum(')
s = s.replace('count(1)', 'count(*)')
return s
def normalize_lambda_calculus(logical_form):
s = logical_form.replace(
'\s+', ' ').replace("( ", "(").replace(" )", ")").replace(') )', '))').replace(' :', ':').strip()
s = s.replace('"', "'").replace(') )', '))')
return s
if __name__ == '__main__':
sql = '(lambda $0:e (and (job $0) (language $0 perl) (company $0 "Lockheed Martin Aeronautics") (loc $0 colorado)))'
normalized_sql = normalize_lambda_calculus(sql).replace("'", "\\'")
sql_ = postprocess_lambda(normalized_sql)
print(sql)
print(normalized_sql)
print(sql_)
|
[
"re.sub",
"re.match"
] |
[((1201, 1246), 're.sub', 're.sub', (['"""\\\\s*\\\\(\\\\s*"""', '"""("""', 'normalized_prolog'], {}), "('\\\\s*\\\\(\\\\s*', '(', normalized_prolog)\n", (1207, 1246), False, 'import re\n'), ((1269, 1314), 're.sub', 're.sub', (['"""\\\\s*\\\\)\\\\s*"""', '""")"""', 'normalized_prolog'], {}), "('\\\\s*\\\\)\\\\s*', ')', normalized_prolog)\n", (1275, 1314), False, 'import re\n'), ((1337, 1380), 're.sub', 're.sub', (['"""\\\\s*,\\\\s*"""', '""","""', 'normalized_prolog'], {}), "('\\\\s*,\\\\s*', ',', normalized_prolog)\n", (1343, 1380), False, 'import re\n'), ((1552, 1582), 're.sub', 're.sub', (['"""\\\\s*\\\\(\\\\s*"""', '"""("""', 'lf'], {}), "('\\\\s*\\\\(\\\\s*', '(', lf)\n", (1558, 1582), False, 'import re\n'), ((1589, 1618), 're.sub', 're.sub', (['"""\\\\s*\\\\)\\\\s*"""', '""")"""', 'l'], {}), "('\\\\s*\\\\)\\\\s*', ')', l)\n", (1595, 1618), False, 'import re\n'), ((1625, 1652), 're.sub', 're.sub', (['"""\\\\s*,\\\\s*"""', '""","""', 'l'], {}), "('\\\\s*,\\\\s*', ',', l)\n", (1631, 1652), False, 'import re\n'), ((705, 725), 're.match', 're.match', (['"""[A-Z]"""', 'w'], {}), "('[A-Z]', w)\n", (713, 725), False, 'import re\n'), ((729, 749), 're.match', 're.match', (['"""_\\\\d+"""', 'w'], {}), "('_\\\\d+', w)\n", (737, 749), False, 'import re\n')]
|
from datetime import datetime
from shutil import copy2, copytree
import os
import errno
import subprocess
import re
from soteria.exceptions import BoogieParseError, BoogieTypeError, BoogieVerificationError, BoogieUnknownError
from soteria.debug_support.debugger import Debugger
##TODO : refactor this class
class Executor:
#@classmethod
def execute_boogie(operations, specification, name = 'specification'):
model_file_path = 'results/' + name + '.model'
spec_file = Executor.create_spec_file(specification, name)
path_to_boogie = '/boogie/Binaries/Boogie.exe'
proc = subprocess.Popen(['mono', path_to_boogie, '-mv:' + model_file_path, spec_file], stdout=subprocess.PIPE)
out = proc.communicate()[0]
status = Executor.get_execution_status(name, out.decode("utf-8"), operations, spec_file, model_file_path)
return status
def create_spec_file(text, name):
with open('results/' + name + '.bpl', 'w') as f:
f.write(text)
return 'results/' + name + '.bpl'
def get_execution_status(name, result, operations, spec_file, model_file_path):
if 'parse errors detected' in result:
raise BoogieParseError(result + '\n')
if 'type checking errors detected' in result:
raise BoogieTypeError(result + '\n')
if 'Boogie program verifier finished with' in result:
errors = Executor.get_number_of_errors(result[result.index('Boogie program verifier finished with') + 38:])
if errors > 0:
specification = open(spec_file).readlines()
debugger = Debugger()
info = debugger.get_debug_info(operations, specification, result, model_file_path)
raise BoogieVerificationError(name + '::::::\n' + info + '\n')
if errors == 0:
return result
raise BoogieUnknownError(result)
def get_number_of_errors(text):
p = re.compile('\d error')
m = p.search(text)
if m:
e = re.compile('\d')
n = e.search(m.group(0))
return int(n.group(0))
return -1
|
[
"subprocess.Popen",
"soteria.exceptions.BoogieTypeError",
"soteria.exceptions.BoogieUnknownError",
"soteria.debug_support.debugger.Debugger",
"soteria.exceptions.BoogieParseError",
"soteria.exceptions.BoogieVerificationError",
"re.compile"
] |
[((611, 718), 'subprocess.Popen', 'subprocess.Popen', (["['mono', path_to_boogie, '-mv:' + model_file_path, spec_file]"], {'stdout': 'subprocess.PIPE'}), "(['mono', path_to_boogie, '-mv:' + model_file_path,\n spec_file], stdout=subprocess.PIPE)\n", (627, 718), False, 'import subprocess\n'), ((1892, 1918), 'soteria.exceptions.BoogieUnknownError', 'BoogieUnknownError', (['result'], {}), '(result)\n', (1910, 1918), False, 'from soteria.exceptions import BoogieParseError, BoogieTypeError, BoogieVerificationError, BoogieUnknownError\n'), ((1968, 1991), 're.compile', 're.compile', (['"""\\\\d error"""'], {}), "('\\\\d error')\n", (1978, 1991), False, 'import re\n'), ((1200, 1231), 'soteria.exceptions.BoogieParseError', 'BoogieParseError', (["(result + '\\n')"], {}), "(result + '\\n')\n", (1216, 1231), False, 'from soteria.exceptions import BoogieParseError, BoogieTypeError, BoogieVerificationError, BoogieUnknownError\n'), ((1304, 1334), 'soteria.exceptions.BoogieTypeError', 'BoogieTypeError', (["(result + '\\n')"], {}), "(result + '\\n')\n", (1319, 1334), False, 'from soteria.exceptions import BoogieParseError, BoogieTypeError, BoogieVerificationError, BoogieUnknownError\n'), ((2049, 2066), 're.compile', 're.compile', (['"""\\\\d"""'], {}), "('\\\\d')\n", (2059, 2066), False, 'import re\n'), ((1631, 1641), 'soteria.debug_support.debugger.Debugger', 'Debugger', ([], {}), '()\n', (1639, 1641), False, 'from soteria.debug_support.debugger import Debugger\n'), ((1763, 1819), 'soteria.exceptions.BoogieVerificationError', 'BoogieVerificationError', (["(name + '::::::\\n' + info + '\\n')"], {}), "(name + '::::::\\n' + info + '\\n')\n", (1786, 1819), False, 'from soteria.exceptions import BoogieParseError, BoogieTypeError, BoogieVerificationError, BoogieUnknownError\n')]
|
'''
ilf - compiler
'''
import os
import json
from .parse import parse
from .core import Ip4Filter, Ival
# -- GLOBALS
# (re)initialized by compile_file
GROUPS = {} # grp-name -> set([networks,.. , services, ..])
# -- AST = [(pos, [type, id, value]), ..]
def ast_iter(ast, types=None):
'iterate across statements of requested types'
types = [] if types is None else types
yield_all = len(types) == 0
for pos, stmt in ast:
if yield_all or stmt[0] in types:
yield (pos, stmt)
def ast_enum(ast, types=None):
'enumerate across statements of requested types'
types = [] if types is None else types
yield_all = len(types) == 0
for idx, (pos, stmt) in enumerate(ast):
if yield_all or stmt[0] in types:
yield (idx, pos, stmt)
def ast_errmsg(pos, err_type, stmt_type, msg):
'small helper to easily create ERROR/WARNING stmts'
return (pos, [err_type, stmt_type, msg])
def ast_includes(ast):
'expand include-statements in-place'
seen = {}
idx = -1
while idx+1 < len(ast): # while loop since ast is expanding
idx += 1
(fname, linenr, col), stmt = ast[idx]
if stmt[0] != 'INCLUDE':
continue
absname = os.path.realpath(os.path.normpath(
os.path.join(os.path.dirname(fname), stmt[1])))
if absname in seen:
ast[idx] = ast_errmsg(
(fname, linenr, 1),
'ERROR', stmt[0],
'{} already included at {}'.format(absname, seen[absname]))
continue
seen[absname] = '{}:{}:{}'.format(fname, linenr, col) # record include
try:
with open(absname, 'r') as fhdl:
include_ast = parse(fhdl) # possibly includes new includes(..)
except (IOError, OSError):
ast[idx] = ast_errmsg(
(fname, linenr, 1),
'ERROR', stmt[0],
'cannot find/read {}'.format(absname))
continue
ast[idx:idx+1] = include_ast # replace include(file) with its stmts
return ast
def _ivalify(lst, *types):
'turn a list of tokens (IP, PORTSTR, STR) into a list of Ivals'
global GROUPS
rv, errs = [], [] # in case of errors
for elm in lst:
try:
if elm[0] == 'IP':
rv.append(Ival.ip_pfx(elm[1]))
elif elm[0] == 'PORTSTR':
rv.append(Ival.port_str(elm[1]))
elif elm[0] == 'STR':
# rv.extend(GROUPS[elm[1]])
rv.extend(GROUPS.get(elm[1], []))
except (ValueError, KeyError):
errs.append(elm[1])
if len(errs):
msg = 'Invalid item(s): {}'.format(', '.join(errs))
raise ValueError(msg)
return [i for i in rv if i.type in types]
def ast_ivalify(ast):
'turn IP- and PORTSTR-values into Ival-s'
for idx, pos, stmt in ast_enum(ast, ['GROUP', 'RULE', 'RULEPLUS']):
try:
if stmt[0] == 'GROUP':
ivals = Ival.summary(_ivalify(stmt[2], Ival.IP, Ival.PORTSTR))
ast[idx] = (pos, (stmt[0], stmt[1], ivals))
elif stmt[0] == 'RULEPLUS':
scope = Ival.PORTSTR if stmt[1] == '@' else Ival.IP
ivals = Ival.summary(_ivalify(stmt[2]), scope)
ast[idx] = (pos, (stmt[0], stmt[1], ivals))
elif stmt[0] == 'RULE':
srcs = Ival.summary(_ivalify(stmt[2], Ival.IP))
dsts = Ival.summary(_ivalify(stmt[4], Ival.IP))
srvs = Ival.summary(_ivalify(stmt[5], Ival.PORTSTR))
ast[idx] = (pos, (stmt[0], stmt[1], srcs, stmt[3],
dsts, srvs, *stmt[6:]))
else:
raise ValueError('{} invalid stmt for ast_ivalify'.format(
stmt[0]))
except ValueError as e:
ast[idx] = ast_errmsg(pos, 'ERROR', stmt[0], '{}'.format((e)))
return ast
def ast_jsonify(ast):
'turn a rule\'s json string into a python dict'
# only RULE tuple's have json string (or None) as last element
for idx, pos, stmt in ast_enum(ast, ['RULE']):
try:
dta = None if stmt[-1] is None else json.loads(stmt[-1])
ast[idx] = (pos, (*stmt[0:-1], dta))
except (TypeError, json.decoder.JSONDecodeError) as e:
print('could not decode ', stmt[-1])
ast[idx] = ast_errmsg(pos, 'ERROR', stmt[0],
'json-error: {}'.format((e)))
return ast
def expand_refs(dct):
'return an expanded member list from a, possibly, recursive definition'
# dct is {name} -> set([name, ..]), which may refer to other names
for target, mbrs in dct.items():
heap = list(mbrs) # mbrs name ('STR', name)
seen, dct[target] = [target], set([])
while heap:
nxt = heap.pop()
if nxt in seen: # circular reference
continue
seen.append(nxt)
if nxt in dct:
heap.extend(list(dct[nxt]))
dct[target].add(nxt)
return dct
def ast_symbol_table(ast):
'Build the symbol table for the ast'
# need 2 passes, since forward referencing is allowed
global GROUPS
# (re-)initialise symbol table
GROUPS = {'any': set([Ival.ip_pfx('any')]),
'any/any': set([Ival.port_str('any/any')])}
TODO = {} # GROUP-name -> [group-names to include]
# 1st pass, collect direct IP/PORTSTR's per groupname and
# defer group references till phase2
for idx, pos, stmt in ast_enum(ast, ['GROUP']):
_, grpname, mbrs = stmt
refs = [t[1] for t in mbrs if t[0] == 'STR'] # only the name
TODO.setdefault(grpname, set()).update(refs) # defer named ref's
grpdef = GROUPS.setdefault(grpname, set()) # always define symbol
try:
ivals = _ivalify([m for m in mbrs if m[0] != 'STR'],
Ival.IP, Ival.PORTSTR)
grpdef.update(ivals) # add straight IP/PORTSTR's to symbol def.
except ValueError as e:
ast[idx] = (pos, ('ERROR', 'GROUP', e.args[0]))
print('dir ValueError as e', e, dir(e), e.args)
# 2nd pass, expand delayed references
for name, mbrs in expand_refs(TODO).items():
for mbr in mbrs:
xtra = GROUPS.get(mbr, [])
if len(xtra) == 0:
print('empty ref', mbr, 'for group', name)
GROUPS.setdefault(name, set()).update(xtra)
return GROUPS
def ast_rules(ast):
'expand elements of the defined rules'
# ('RULE', <name>, [src], DIR, [dst], [srv], ('ACTION',act), <json-str>)
rules = []
for pos, stmt in ast_iter(ast, ['RULE', 'RULEPLUS']):
if stmt[0] == 'RULE':
rules.append(list(stmt[1:]))
elif stmt[0] == 'RULEPLUS':
if len(rules) == 0:
raise ValueError('dangling:{}'.format(str(stmt)))
if '@' == stmt[1]:
rules[-1][4].extend(stmt[2])
if '<' in stmt[1]:
rules[-1][1].extend(stmt[2])
if '>' in stmt[1]:
rules[-1][3].extend(stmt[2])
else:
raise ValueError('ast_rules cannot handle stmt {!r}'.format(stmt))
# proces direction of rules
# rule := [name, src, dst, srv, action, json-str]
rv = []
for rule in rules:
direction = rule[2] # capture direction and remove field
del rule[2]
rule[1] = Ival.summary(rule[1]) # summarize src
rule[2] = Ival.summary(rule[2]) # summarize dst
rule[3] = Ival.summary(rule[3]) # summarize srv
if direction == '>':
rv.append(rule)
elif direction == '<':
rule[1], rule[2] = rule[2], rule[1]
rv.append(rule)
else:
rv.append(rule.copy())
if rule[1] != rule[2]:
rule[1], rule[2] = rule[2], rule[1]
rv.append(rule)
return rv
# -- SEMANTICS
def ast_semantics(ast):
'run all chk_ast_funcs on ast'
# all chk_xyz(ast) -> must return an (un)modified, valid ast
for check in [x for x in globals() if x.startswith('chk_')]:
semantics = globals()[check]
# XXX: log on informational level to console
print('semantics:', semantics.__doc__)
ast = semantics(ast)
return ast
def chk_ast_dangling(ast):
'checking RULE(PLUS) scopes'
scope = None # determines current scope (if any)
for idx, pos, stmt in ast_enum(ast):
if stmt[0] == 'BLANK':
continue
if stmt[0] == 'RULEPLUS' and scope not in ['RULE', 'RULEPLUS']:
ast[idx] = (pos, ('ERROR', 'RULEPLUS',
'not in scope of a RULE'))
scope = stmt[1] if stmt[0] in ['ERROR', 'WARNING'] else stmt[0]
return ast
def chk_ast_refs(ast):
'check group references'
global GROUPS
def undefined_refs(lst):
return [x[1] for x in lst if x[0] == 'STR' and x[1] not in GROUPS]
def empty_refs(lst):
return [x[1] for x in lst if x[0] == 'STR' and x[1] in GROUPS and len(
GROUPS.get(x[1], [])) == 0]
for idx, pos, stmt in ast_enum(ast, ['GROUP', 'RULE', 'RULEPLUS']):
unrefs = undefined_refs(stmt[2]) # unknown group-references
emptyrefs = empty_refs(stmt[2]) # undefined group-references
if stmt[0] == 'RULE':
unrefs += undefined_refs(stmt[4]) # add unknown dsts
emptyrefs += empty_refs(stmt[4])
unrefs += undefined_refs(stmt[5]) # add unknown srvs
emptyrefs += empty_refs(stmt[5])
if len(unrefs) and len(emptyrefs):
msg = 'has empty ref: {} and undefined refs: {}'.format(
', '.join(emptyrefs), ', '.join(unrefs))
elif len(unrefs):
msg = 'has undefined references: {}'.format(unrefs)
elif len(emptyrefs):
msg = 'has empty references: {}'.format(emptyrefs)
else:
continue # all is ok
ast[idx] = (pos, ('ERROR', stmt[0], msg))
return ast
def chk_ast_args(ast):
'checking argument validity'
# RULEPLUS @ has STR's or PORTSTR's, else its an ERROR
# RULEPLUS <,>,<> has STR's or IP's, else its an ERROR
# RULE, same checks for src, dst and services
NETARGS = ('IP', 'STR')
SRVARGS = ('PORTSTR', 'STR')
ALLARGS = set([*NETARGS, *SRVARGS])
for idx, pos, stmt in ast_enum(ast, ['GROUP', 'RULE', 'RULEPLUS']):
illegal = []
if stmt[0] == 'GROUP':
illegal = [x[1] for x in stmt[2] if x[0] not in ALLARGS]
elif stmt[0] == 'RULE':
illegal = [x[1] for x in stmt[2] if x[0] not in NETARGS]
illegal.extend(x[1] for x in stmt[4] if x[0] not in NETARGS)
illegal.extend(x[1] for x in stmt[5] if x[0] not in SRVARGS)
elif stmt[0] == 'RULEPLUS':
if stmt[1] == '@':
illegal = [x[1] for x in stmt[2] if x[0] not in SRVARGS]
else:
illegal = [x[1] for x in stmt[2] if x[0] not in NETARGS]
else:
raise ValueError('stmt args check: unknown stmt type {}'.format(
stmt[1]))
if len(illegal):
msg = 'illegal args: {}'.format(', '.join(str(i) for i in illegal))
ast[idx] = (pos, ('ERROR', stmt[0], msg))
return ast
# -- Compile
def print_ast(ast):
'print out the abstract syntax tree'
for pos, stmt in ast:
print('{}:{}:{}'.format(os.path.relpath(pos[0]), pos[1], pos[2]),
*(elm for elm in stmt))
def compile(src):
'compile file or script text into IP4Filter object'
global GROUPS
try:
fhdl = open(src, "rt") # either a filename
except (IOError, OSError):
import io # or text
fhdl = io.StringIO(src)
ast = parse(fhdl)
ast = ast_includes(ast) # include & parse include(files)
GROUPS = ast_symbol_table(ast) # create new symbol table
ast = ast_semantics(ast) # check validity of ast
ast = ast_ivalify(ast) # turn IP, PORTSTR strings into Ival's
ast = ast_jsonify(ast) # turn json str into python object
errors = list(ast_iter(ast, 'ERROR'))
warnings = list(ast_iter(ast, 'WARNING'))
for pos, msg in errors:
print('Error:{}:{}'.format(pos, msg))
for pos, msg in warnings:
print('Warning:{}:{}'.format(pos, msg))
print('Score: E{}, W{}'.format(len(errors), len(warnings)))
if len(errors):
print_ast(ast)
raise SystemExit('Filter script contains errors')
# TODO:
# - maybe return (errors, warnings, ip4f)
rules = ast_rules(ast)
ip4f = Ip4Filter()
for rid, (name, srcs, dsts, ports, action, obj) in enumerate(rules):
ip4f._add(rid, srcs, dsts, ports, name, action, obj)
return ip4f
|
[
"os.path.relpath",
"io.StringIO",
"os.path.dirname",
"json.loads"
] |
[((11909, 11925), 'io.StringIO', 'io.StringIO', (['src'], {}), '(src)\n', (11920, 11925), False, 'import io\n'), ((4226, 4246), 'json.loads', 'json.loads', (['stmt[-1]'], {}), '(stmt[-1])\n', (4236, 4246), False, 'import json\n'), ((11579, 11602), 'os.path.relpath', 'os.path.relpath', (['pos[0]'], {}), '(pos[0])\n', (11594, 11602), False, 'import os\n'), ((1302, 1324), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (1317, 1324), False, 'import os\n')]
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test for hook of session run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from distutils.version import LooseVersion as Version
import six
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
from tensorflow.python.framework.versions import __version__
import epl
from epl.parallel.hooks import _append_replicated_fetches
# pylint: disable=missing-docstring,unused-argument,unused-variable
class RunHookTest(test.TestCase):
def test_for_append_replicated_fetches(self):
epl.init(config=epl.Config({"communication.gradients_reduce_method": "sum"}))
with epl.Cluster(worker_hosts="127.0.0.1:8001", worker_index=0):
with epl.replicate(device_count=1):
num_x = np.random.randint(0, 10, (500, 20)).astype(dtype=np.float32)
num_y = np.random.randint(0, 10, 500).astype(dtype=np.int64)
dataset = tf.data.Dataset.from_tensor_slices((num_x, num_y)) \
.batch(10).repeat(1)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
iterator.initializer)
x, labels = iterator.get_next()
logits = tf.layers.dense(x, 2)
logits = tf.layers.dense(logits, 10)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,
logits=logits)
epl.add_to_collection(loss, epl.GraphKeys.GLOBAL_MEAN_OBJECTS)
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.9)
train_op = optimizer.minimize(loss, global_step=global_step)
tf.train.MonitoredTrainingSession()
# Test for a single operation/tensor.
fetches = loss
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches.name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0"
])
fetches = train_op
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
# Test for nvidia-tf(1.15.4) and deeprec(1.15.5).
if Version(__version__) >= Version("1.15.4") and Version(__version__) < Version("2.0"):
suffix = "/group_deps"
else:
suffix = ""
self.assertEqual(fetches.name, "Momentum" + suffix)
self.assertEqual(replicas, [
"EPL_REPLICA_1/Momentum" + suffix, "EPL_REPLICA_2/Momentum" +
suffix, "EPL_REPLICA_3/Momentum" + suffix
])
# Test for list fetches.
fetches = [loss, train_op]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
fetches = [fetch.name for fetch in fetches]
replicas = [rep.name for rep in replicas]
self.assertListEqual(
fetches, ["EPL_PARALLEL_STRATEGY/truediv:0", "Momentum" + suffix])
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of dict.
fetches = {"loss": loss, "train_op": train_op}
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches["loss"].name,
"EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches["train_op"].name, "Momentum" + suffix)
if six.PY2:
self.assertListEqual(replicas, [
"EPL_REPLICA_1/Momentum" + suffix, "EPL_REPLICA_2/Momentum" +
suffix, "EPL_REPLICA_3/Momentum" + suffix,
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0"
])
else:
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of OrderedDict
fetches = collections.OrderedDict()
fetches["loss"] = loss
fetches["train_op"] = train_op
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches["loss"].name,
"EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches["train_op"].name, "Momentum" + suffix)
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of tuple.
fetches = (loss, train_op)
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches[0].name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches[1].name, "Momentum" + suffix)
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of namedtuple.
fetch_type = collections.namedtuple("fetch_type", ["loss", "train_op"])
fetches = fetch_type(loss=loss, train_op=train_op)
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches.loss.name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches.train_op.name, "Momentum" + suffix)
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for nested list fetches.
def _flatten(li):
return sum(
([x] if not isinstance(x, list) else _flatten(x) for x in li), [])
fetches = [labels, [train_op, logits, [loss, global_step]]]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
fetches = _flatten(fetches)
fetches = [fetch.name for fetch in fetches]
replicas = [rep.name for rep in replicas]
self.assertListEqual(fetches, [
"IteratorGetNext:1", "Momentum" + suffix, "dense_1/BiasAdd:0",
"EPL_PARALLEL_STRATEGY/truediv:0", "global_step:0"
])
self.assertListEqual(replicas, [
"EPL_REPLICA_1/IteratorGetNext:1",
"EPL_REPLICA_2/IteratorGetNext:1",
"EPL_REPLICA_3/IteratorGetNext:1", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix,
"EPL_REPLICA_1/dense_1/BiasAdd:0",
"EPL_REPLICA_2/dense_1/BiasAdd:0",
"EPL_REPLICA_3/dense_1/BiasAdd:0",
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0",
"EPL_REPLICA_1/global_step:0",
"EPL_REPLICA_2/global_step:0",
"EPL_REPLICA_3/global_step:0"
])
# Test for nested list with dict.
fetches = [labels, {"loss": loss}]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches[0].name, "IteratorGetNext:1")
self.assertEqual(fetches[1]["loss"].name,
"EPL_PARALLEL_STRATEGY/truediv:0")
self.assertListEqual(replicas, [
"EPL_REPLICA_1/IteratorGetNext:1",
"EPL_REPLICA_2/IteratorGetNext:1",
"EPL_REPLICA_3/IteratorGetNext:1",
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0"
])
# Test for nested list with tuple.
fetches = [labels, (loss, global_step)]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches[0].name, "IteratorGetNext:1")
self.assertEqual(fetches[1][0].name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches[1][1].name, "global_step:0")
self.assertListEqual(replicas, [
"EPL_REPLICA_1/IteratorGetNext:1",
"EPL_REPLICA_2/IteratorGetNext:1",
"EPL_REPLICA_3/IteratorGetNext:1",
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0",
"EPL_REPLICA_1/global_step:0",
"EPL_REPLICA_2/global_step:0",
"EPL_REPLICA_3/global_step:0"
])
# pylint: enable=missing-docstring,unused-argument,unused-variable
if __name__ == "__main__":
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow.train.MonitoredTrainingSession",
"tensorflow.losses.sparse_softmax_cross_entropy",
"epl.add_to_collection",
"distutils.version.LooseVersion",
"tensorflow.layers.dense",
"tensorflow.train.get_or_create_global_step",
"epl.replicate",
"tensorflow.add_to_collection",
"tensorflow.data.Dataset.from_tensor_slices",
"epl.Cluster",
"epl.parallel.hooks._append_replicated_fetches",
"numpy.random.randint",
"tensorflow.train.MomentumOptimizer",
"collections.namedtuple",
"collections.OrderedDict",
"epl.Config"
] |
[((10439, 10450), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (10448, 10450), False, 'from tensorflow.python.platform import test\n'), ((8955, 9000), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (8981, 9000), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((9631, 9676), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (9657, 9676), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((1387, 1445), 'epl.Cluster', 'epl.Cluster', ([], {'worker_hosts': '"""127.0.0.1:8001"""', 'worker_index': '(0)'}), "(worker_hosts='127.0.0.1:8001', worker_index=0)\n", (1398, 1445), False, 'import epl\n'), ((2526, 2561), 'tensorflow.train.MonitoredTrainingSession', 'tf.train.MonitoredTrainingSession', ([], {}), '()\n', (2559, 2561), True, 'import tensorflow as tf\n'), ((2664, 2709), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (2690, 2709), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((3080, 3125), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (3106, 3125), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((3714, 3759), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (3740, 3759), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((4397, 4442), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (4423, 4442), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((5402, 5427), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5425, 5427), False, 'import collections\n'), ((5530, 5575), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (5556, 5575), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((6212, 6257), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (6238, 6257), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((6812, 6870), 'collections.namedtuple', 'collections.namedtuple', (['"""fetch_type"""', "['loss', 'train_op']"], {}), "('fetch_type', ['loss', 'train_op'])\n", (6834, 6870), False, 'import collections\n'), ((6964, 7009), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (6990, 7009), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((7780, 7825), 'epl.parallel.hooks._append_replicated_fetches', '_append_replicated_fetches', (['fetches', 'replicas'], {}), '(fetches, replicas)\n', (7806, 7825), False, 'from epl.parallel.hooks import _append_replicated_fetches\n'), ((1316, 1376), 'epl.Config', 'epl.Config', (["{'communication.gradients_reduce_method': 'sum'}"], {}), "({'communication.gradients_reduce_method': 'sum'})\n", (1326, 1376), False, 'import epl\n'), ((1458, 1487), 'epl.replicate', 'epl.replicate', ([], {'device_count': '(1)'}), '(device_count=1)\n', (1471, 1487), False, 'import epl\n'), ((1824, 1899), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.TABLE_INITIALIZERS', 'iterator.initializer'], {}), '(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)\n', (1844, 1899), True, 'import tensorflow as tf\n'), ((1987, 2008), 'tensorflow.layers.dense', 'tf.layers.dense', (['x', '(2)'], {}), '(x, 2)\n', (2002, 2008), True, 'import tensorflow as tf\n'), ((2026, 2053), 'tensorflow.layers.dense', 'tf.layers.dense', (['logits', '(10)'], {}), '(logits, 10)\n', (2041, 2053), True, 'import tensorflow as tf\n'), ((2069, 2137), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (2107, 2137), True, 'import tensorflow as tf\n'), ((2200, 2262), 'epl.add_to_collection', 'epl.add_to_collection', (['loss', 'epl.GraphKeys.GLOBAL_MEAN_OBJECTS'], {}), '(loss, epl.GraphKeys.GLOBAL_MEAN_OBJECTS)\n', (2221, 2262), False, 'import epl\n'), ((2285, 2321), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (2319, 2321), True, 'import tensorflow as tf\n'), ((2342, 2403), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': '(0.001)', 'momentum': '(0.9)'}), '(learning_rate=0.001, momentum=0.9)\n', (2368, 2403), True, 'import tensorflow as tf\n'), ((3239, 3259), 'distutils.version.LooseVersion', 'Version', (['__version__'], {}), '(__version__)\n', (3246, 3259), True, 'from distutils.version import LooseVersion as Version\n'), ((3263, 3280), 'distutils.version.LooseVersion', 'Version', (['"""1.15.4"""'], {}), "('1.15.4')\n", (3270, 3280), True, 'from distutils.version import LooseVersion as Version\n'), ((3285, 3305), 'distutils.version.LooseVersion', 'Version', (['__version__'], {}), '(__version__)\n', (3292, 3305), True, 'from distutils.version import LooseVersion as Version\n'), ((3308, 3322), 'distutils.version.LooseVersion', 'Version', (['"""2.0"""'], {}), "('2.0')\n", (3315, 3322), True, 'from distutils.version import LooseVersion as Version\n'), ((1505, 1540), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(500, 20)'], {}), '(0, 10, (500, 20))\n', (1522, 1540), True, 'import numpy as np\n'), ((1582, 1611), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(500)'], {}), '(0, 10, 500)\n', (1599, 1611), True, 'import numpy as np\n'), ((1653, 1703), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(num_x, num_y)'], {}), '((num_x, num_y))\n', (1687, 1703), True, 'import tensorflow as tf\n')]
|
from collections import deque # Implement Mathematiques Stacks
# from main_terminalFunctions import
from os import get_terminal_size
from main_terminalGetKey import getKey
def readfile(file):
# Gras, Italique, Strike, code, Mcode, Hilight
# 0** 1* 2__ 3_ 4~~ 5` 6``` 7==
etat = [False, False, False, False, False, False, False, False]
to_printfile = []
with open(file, "r") as f:
a = f.readlines()
for i in a:
current_ligne = i.rstrip() # Fro keep \t
if current_ligne == "---" or current_ligne == "___" or current_ligne == "***":
current_ligne = get_terminal_size()[0] * "─"
elif current_ligne[0:6] == "######":
current_ligne = "\033[33mh6\u2588\u2588\u2588\u2588" + current_ligne[6:] + "\033[0m"
elif current_ligne[0:5] == "#####":
current_ligne = "\033[33mh5\u2588\u2588\u2588" + current_ligne[5:] + "\033[0m"
elif current_ligne[0:4] == "####":
current_ligne = "\033[33mH4\u2588\u2588" + current_ligne[4:] + "\033[0m"
elif current_ligne[0:3] == "###":
current_ligne = "\033[32m\033[1m" + (' ' + current_ligne[3:] + " ").center(get_terminal_size()[0],
".") + "\033[0m" # "\033[32m\033[3m3\u2588\u2588"+ current_ligne[3:] +"\033[0m"
elif current_ligne[0:2] == "##":
current_ligne = "\033[34m\033[1m" + (' ' + current_ligne[2:] + " ").center(get_terminal_size()[0],
"─") + "\033[0m"
elif current_ligne[0:1] == "#":
current_ligne = "\033[31m\033[1m\033[4m" + (' ' + current_ligne[1:] + " ").center(get_terminal_size()[0],
"\u2588") + "\033[0m"
# While "**" or "~~" or "*" or "==" or "__" not i current line
if "**" in current_ligne and not etat[0]:
etat[0] = True
current_ligne = current_ligne.replace("**", "\033[1m\033[91m", 1)
if "**" in current_ligne and etat[0]:
etat[0] = False
current_ligne = current_ligne.replace("**", "\033[0m", 1)
if "__" in current_ligne and not etat[2]:
etat[2] = True
current_ligne = current_ligne.replace("__", "\033[1m", 1)
if "__" in current_ligne and etat[2]:
etat[2] = False
current_ligne = current_ligne.replace("__", "\033[0m", 1)
if "==" in current_ligne and not etat[7]:
etat[7] = True
current_ligne = current_ligne.replace("==", "\033[103m\033[30m", 1)
if "==" in current_ligne and etat[7]:
etat[7] = False
current_ligne = current_ligne.replace("==", "\033[0m", 1)
to_printfile.append(current_ligne)
return to_printfile
def printontermnal(to_printfile, boucle=True):
ShowLines = False
Firstline = 0
ChosedLink = 0
Reapet = True
while Reapet:
for i in to_printfile:
print(i)
if boucle:
key = getKey(debug=True)
if key == "l":
ShowLines = not ShowLines
if key == "j": # DOWN
Firstline = Firstline + 1 # min(Firstline+1, len(to_printfile))
if key == "k": # Up
Firstline = Firstline - 1 # max(Firstline-1, 0)
if key == "Tab":
ChosedLink = ChosedLink + 1 # min(ChosedLink+1, len(alllink))
if key == "ShiftTab":
ChosedLink = ChosedLink - 1 # min(ChosedLink-1, 0)
if key == "\r": # ENTER
pass # TODO: Open browser with current link
else:
Reapet = False
if __name__ == "__main__": # Si tu le lance avec python3.10 main_readmereader.py
import sys
args = sys.argv # Recuperer les arguments du terminal
if "--help" in args or "-h" in args:
print("""
-l, --lines: Affiches le numero des lignes avec
-h, --help: affiche ceci
-c, --config: Fichier config (Feature)
-i, --image : Affiche les images en Assci with `https://dev.to/natamacm/terminal-image-with-python-44mh`
-b, -blockcode : Hilight code blocks
-s, --size : definir la taille de l'output
""")
if "--exec" in args:
printontermnal(readfile("resources/Readmereader/RM.md"), boucle=False)
|
[
"os.get_terminal_size",
"main_terminalGetKey.getKey"
] |
[((3204, 3222), 'main_terminalGetKey.getKey', 'getKey', ([], {'debug': '(True)'}), '(debug=True)\n', (3210, 3222), False, 'from main_terminalGetKey import getKey\n'), ((656, 675), 'os.get_terminal_size', 'get_terminal_size', ([], {}), '()\n', (673, 675), False, 'from os import get_terminal_size\n'), ((1219, 1238), 'os.get_terminal_size', 'get_terminal_size', ([], {}), '()\n', (1236, 1238), False, 'from os import get_terminal_size\n'), ((1539, 1558), 'os.get_terminal_size', 'get_terminal_size', ([], {}), '()\n', (1556, 1558), False, 'from os import get_terminal_size\n'), ((1801, 1820), 'os.get_terminal_size', 'get_terminal_size', ([], {}), '()\n', (1818, 1820), False, 'from os import get_terminal_size\n')]
|
from active_learning.oracles import UserOracle, FunctionalOracle
from active_learning.evaluation import Evaluator
from active_learning.active_learner import RandomSelectionAlgorithm, GPSelect_Algorithm, UncertaintySamplingAlgorithm
from active_learning.rating import length_based
import unittest
class ActiveLearningExperimentsTest(unittest.TestCase):
def test_evaluator(self):
algorithm = UncertaintySamplingAlgorithm
algo_params_length = {'hypothesis': 'Gaussian Process', 'hypothesis_params': {'transformation': 'length'}}
algo_params_tfidf = {'hypothesis': 'Gaussian Process', 'hypothesis_params': {'transformation': 'tfidf'}}
logs_path = '/home/freya/BP/32de-python/notebooks/active_learning/logs/testrun/f'
def rating_func_constant(c):
return lambda x: c
rating_func = rating_func_constant(1)
oracle = FunctionalOracle(**{'rating_func': rating_func})
res = Evaluator(algorithm=algorithm, algo_params=algo_params_tfidf,
oracle=oracle,
batch_size=1, dataset_name='Rotten Tomato', logs_path=logs_path).compute()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"active_learning.oracles.FunctionalOracle",
"active_learning.evaluation.Evaluator"
] |
[((1171, 1186), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1184, 1186), False, 'import unittest\n'), ((886, 934), 'active_learning.oracles.FunctionalOracle', 'FunctionalOracle', ([], {}), "(**{'rating_func': rating_func})\n", (902, 934), False, 'from active_learning.oracles import UserOracle, FunctionalOracle\n'), ((950, 1095), 'active_learning.evaluation.Evaluator', 'Evaluator', ([], {'algorithm': 'algorithm', 'algo_params': 'algo_params_tfidf', 'oracle': 'oracle', 'batch_size': '(1)', 'dataset_name': '"""Rotten Tomato"""', 'logs_path': 'logs_path'}), "(algorithm=algorithm, algo_params=algo_params_tfidf, oracle=oracle,\n batch_size=1, dataset_name='Rotten Tomato', logs_path=logs_path)\n", (959, 1095), False, 'from active_learning.evaluation import Evaluator\n')]
|
import six
import numpy as np
import nutszebra_utility as nz
import sys
import pickle
def unpickle(file_name):
fp = open(file_name, 'rb')
if sys.version_info.major == 2:
data = pickle.load(fp)
elif sys.version_info.major == 3:
data = pickle.load(fp, encoding='latin-1')
fp.close()
return data
class Cifar10(object):
def __init__(self):
self.utility = nz.Utility()
self.output_name = 'cifar10.pkl'
self.url = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
self.downloaded_file = 'cifar-10-python.tar.gz'
self.untared_file = 'cifar-10-batches-py'
self.batch_train_file = ['data_batch_' + str(num) for num in six.moves.range(1, 6)]
self.batch_test_file = 'test_batch'
self.meta_file = 'batches.meta'
self.converted_name = 'cifar10.pkl'
def download_cifar_10(self):
# if already downloaded and processed, then return True
if self.converted_name in self.utility.find_files(self.utility.nutszebra_path, affix_flag=True):
print('Already downloaded')
return True
# download file
print('Downloading: ' + self.downloaded_file)
self.utility.download_file(self.url, self.utility.nutszebra_path, self.downloaded_file)
print('Done')
print('Uncompressing')
# untar
self.utility.untar_gz(self.utility.nutszebra_path + '/' + self.downloaded_file)
print('Done')
# delete tar.gz file
self.utility.remove_file(self.downloaded_file)
# load train file
print('Loading train data')
train_x = np.zeros((50000, 3, 32, 32), dtype=np.float32)
train_y = np.zeros((50000), dtype=np.int32)
for i, batch_file in enumerate(self.batch_train_file):
data = unpickle(self.untared_file + '/' + batch_file)
start = i * 10000
end = start + 10000
train_x[start:end] = data['data'].reshape(10000, 3, 32, 32)
train_y[start:end] = np.array(data['labels'], dtype=np.int32)
print('Done')
# load test file
print('Loading test data')
test_x = np.zeros((10000, 3, 32, 32), dtype=np.float32)
test_y = np.zeros((10000), dtype=np.int32)
data = unpickle(self.untared_file + '/' + self.batch_test_file)
test_x[:] = data['data'].reshape(10000, 3, 32, 32)
test_y[:] = np.array(data['labels'], dtype=np.int32)
print('Done')
# load meta file
data = unpickle(self.untared_file + '/' + self.meta_file)
meta = data['label_names']
# save loaded data
print('Saving')
data = {}
data['train_x'] = train_x
data['train_y'] = train_y
data['test_x'] = test_x
data['test_y'] = test_y
data['meta'] = meta
self.utility.save_pickle(data, self.utility.nutszebra_path + '/' + self.converted_name)
def check_overlap(self):
data = self.load_cifar10_data()
length = len(data['test_x'])
result = [0] * length
for i in six.moves.range(length):
result[i] = np.any(np.all(data['test_x'][i] == data['train_x']))
return (np.any(result), result)
def load_cifar10_data(self):
self.download_cifar_10()
return unpickle(self.utility.nutszebra_path + '/' + self.converted_name)
|
[
"six.moves.range",
"numpy.zeros",
"numpy.any",
"nutszebra_utility.Utility",
"pickle.load",
"numpy.array",
"numpy.all"
] |
[((195, 210), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (206, 210), False, 'import pickle\n'), ((404, 416), 'nutszebra_utility.Utility', 'nz.Utility', ([], {}), '()\n', (414, 416), True, 'import nutszebra_utility as nz\n'), ((1644, 1690), 'numpy.zeros', 'np.zeros', (['(50000, 3, 32, 32)'], {'dtype': 'np.float32'}), '((50000, 3, 32, 32), dtype=np.float32)\n', (1652, 1690), True, 'import numpy as np\n'), ((1709, 1740), 'numpy.zeros', 'np.zeros', (['(50000)'], {'dtype': 'np.int32'}), '(50000, dtype=np.int32)\n', (1717, 1740), True, 'import numpy as np\n'), ((2179, 2225), 'numpy.zeros', 'np.zeros', (['(10000, 3, 32, 32)'], {'dtype': 'np.float32'}), '((10000, 3, 32, 32), dtype=np.float32)\n', (2187, 2225), True, 'import numpy as np\n'), ((2243, 2274), 'numpy.zeros', 'np.zeros', (['(10000)'], {'dtype': 'np.int32'}), '(10000, dtype=np.int32)\n', (2251, 2274), True, 'import numpy as np\n'), ((2428, 2468), 'numpy.array', 'np.array', (["data['labels']"], {'dtype': 'np.int32'}), "(data['labels'], dtype=np.int32)\n", (2436, 2468), True, 'import numpy as np\n'), ((3096, 3119), 'six.moves.range', 'six.moves.range', (['length'], {}), '(length)\n', (3111, 3119), False, 'import six\n'), ((264, 299), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""latin-1"""'}), "(fp, encoding='latin-1')\n", (275, 299), False, 'import pickle\n'), ((2039, 2079), 'numpy.array', 'np.array', (["data['labels']"], {'dtype': 'np.int32'}), "(data['labels'], dtype=np.int32)\n", (2047, 2079), True, 'import numpy as np\n'), ((3214, 3228), 'numpy.any', 'np.any', (['result'], {}), '(result)\n', (3220, 3228), True, 'import numpy as np\n'), ((709, 730), 'six.moves.range', 'six.moves.range', (['(1)', '(6)'], {}), '(1, 6)\n', (724, 730), False, 'import six\n'), ((3152, 3196), 'numpy.all', 'np.all', (["(data['test_x'][i] == data['train_x'])"], {}), "(data['test_x'][i] == data['train_x'])\n", (3158, 3196), True, 'import numpy as np\n')]
|
#coding:utf-8
#
# id: bugs.core_5676
# title: Consider equivalence classes for index navigation
# decription:
# Confirmed inefficiense on:
# 3.0.3.32837
# 4.0.0.800
# Checked on:
# 3.0.3.32852: OK, 1.250s.
# 4.0.0.830: OK, 1.375s.
#
# tracker_id: CORE-5676
# min_versions: ['3.0.3']
# versions: 3.0.3
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0.3
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate table document(id int primary key using index pk_document);
recreate table doc_ip_doc(id int primary key using index pk_doc_ip_doc, name varchar(100));
insert into document (id) select row_number() over() from rdb$types,(select 1 i from rdb$types rows 10);
insert into doc_ip_doc (id) select row_number() over() from rdb$types;
commit;
set planonly;
select document.id, doc_ip_doc.name
from doc_ip_doc
join document on document.id=doc_ip_doc.id
order by doc_ip_doc.id;
--PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
select document.id, doc_ip_doc.name
from doc_ip_doc
join document on document.id=doc_ip_doc.id
order by document.id;
-- OLD: PLAN SORT (JOIN (DOC_IP_DOC NATURAL, DOCUMENT INDEX (PK_DOCUMENT)))
select doc_ip_doc.id, doc_ip_doc.name
from doc_ip_doc
join document on document.id=doc_ip_doc.id
order by id;
--PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
select document.id, doc_ip_doc.name
from doc_ip_doc
join document on document.id=doc_ip_doc.id
order by id;
-- OLD: PLAN SORT (JOIN (DOC_IP_DOC NATURAL, DOCUMENT INDEX (PK_DOCUMENT)))
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
"""
@pytest.mark.version('>=3.0.3')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
[
"pytest.mark.version",
"firebird.qa.isql_act",
"firebird.qa.db_factory"
] |
[((646, 691), 'firebird.qa.db_factory', 'db_factory', ([], {'sql_dialect': '(3)', 'init': 'init_script_1'}), '(sql_dialect=3, init=init_script_1)\n', (656, 691), False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((1953, 2015), 'firebird.qa.isql_act', 'isql_act', (['"""db_1"""', 'test_script_1'], {'substitutions': 'substitutions_1'}), "('db_1', test_script_1, substitutions=substitutions_1)\n", (1961, 2015), False, 'from firebird.qa import db_factory, isql_act, Action\n'), ((2355, 2385), 'pytest.mark.version', 'pytest.mark.version', (['""">=3.0.3"""'], {}), "('>=3.0.3')\n", (2374, 2385), False, 'import pytest\n')]
|
from tqdm import tqdm
from MCTS import MCTS
from BinaryTree import BinaryTree
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(15)
def run_experiment(max_iterations, dynamic_c=False):
"""
Run a single experiment of a sequence of MCTS searches to find the optimal path.
:param max_iterations: Number of iterations to run the MCTS.
:param dynamic_c: Boolean indicating whether to use a dynamic c or not.
:return: value of the optimal path found from the search
"""
tree = BinaryTree(depth=12, b=20, tau=3)
best_leaf = max(tree.leaves)
mcts = MCTS(max_iterations=max_iterations, c=2)
optimal_path = ""
while tree.depth > 0:
# search the best direction
direction = mcts.search(tree, dynamic_c=dynamic_c, verbose=False)
optimal_path += direction
# update the tree
tree.update_root(direction)
# return the distance of the optimal path found from the search wrt the best leaf
return sum(1 for a, b in zip(optimal_path, best_leaf.address) if a != b)
def main():
# compute statistics for static c and dynamic c
n_iterations = np.logspace(0.7, 3, num=18, base=10, dtype=int)
values_static_c = [run_experiment(max_iterations=n, dynamic_c=False) for n in tqdm(n_iterations, desc='Execute MCTS with c=2', unit=' experiment')]
values_dynamic_c = [run_experiment(max_iterations=n, dynamic_c=True) for n in tqdm(n_iterations, desc='Execute MCTS with dynamic c', unit=' experiment')]
# plot the results
plt.figure(figsize=(8, 4))
plt.plot(n_iterations, values_dynamic_c, '-o', label="MCTS with dynamic c")
plt.plot(n_iterations, values_static_c, '-o', label="MCTS with c=2")
plt.xlabel("Number of iterations")
plt.ylabel("Distance of the optimal path from the best leaf")
plt.title("Compare the value of the optimal path found by MCTS with and without dynamic c")
plt.grid(linestyle='--', linewidth=1)
plt.xscale("log")
plt.xticks(n_iterations, n_iterations)
plt.legend()
plt.show()
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xscale",
"tqdm.tqdm",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.logspace",
"matplotlib.pyplot.legend",
"MCTS.MCTS",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"BinaryTree.BinaryTree",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
] |
[((131, 149), 'numpy.random.seed', 'np.random.seed', (['(15)'], {}), '(15)\n', (145, 149), True, 'import numpy as np\n'), ((519, 552), 'BinaryTree.BinaryTree', 'BinaryTree', ([], {'depth': '(12)', 'b': '(20)', 'tau': '(3)'}), '(depth=12, b=20, tau=3)\n', (529, 552), False, 'from BinaryTree import BinaryTree\n'), ((597, 637), 'MCTS.MCTS', 'MCTS', ([], {'max_iterations': 'max_iterations', 'c': '(2)'}), '(max_iterations=max_iterations, c=2)\n', (601, 637), False, 'from MCTS import MCTS\n'), ((1143, 1190), 'numpy.logspace', 'np.logspace', (['(0.7)', '(3)'], {'num': '(18)', 'base': '(10)', 'dtype': 'int'}), '(0.7, 3, num=18, base=10, dtype=int)\n', (1154, 1190), True, 'import numpy as np\n'), ((1529, 1555), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (1539, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1635), 'matplotlib.pyplot.plot', 'plt.plot', (['n_iterations', 'values_dynamic_c', '"""-o"""'], {'label': '"""MCTS with dynamic c"""'}), "(n_iterations, values_dynamic_c, '-o', label='MCTS with dynamic c')\n", (1568, 1635), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1708), 'matplotlib.pyplot.plot', 'plt.plot', (['n_iterations', 'values_static_c', '"""-o"""'], {'label': '"""MCTS with c=2"""'}), "(n_iterations, values_static_c, '-o', label='MCTS with c=2')\n", (1648, 1708), True, 'import matplotlib.pyplot as plt\n'), ((1713, 1747), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of iterations"""'], {}), "('Number of iterations')\n", (1723, 1747), True, 'import matplotlib.pyplot as plt\n'), ((1752, 1813), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance of the optimal path from the best leaf"""'], {}), "('Distance of the optimal path from the best leaf')\n", (1762, 1813), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1919), 'matplotlib.pyplot.title', 'plt.title', (['"""Compare the value of the optimal path found by MCTS with and without dynamic c"""'], {}), "(\n 'Compare the value of the optimal path found by MCTS with and without dynamic c'\n )\n", (1827, 1919), True, 'import matplotlib.pyplot as plt\n'), ((1914, 1951), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""--"""', 'linewidth': '(1)'}), "(linestyle='--', linewidth=1)\n", (1922, 1951), True, 'import matplotlib.pyplot as plt\n'), ((1956, 1973), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1966, 1973), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2016), 'matplotlib.pyplot.xticks', 'plt.xticks', (['n_iterations', 'n_iterations'], {}), '(n_iterations, n_iterations)\n', (1988, 2016), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2033), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2031, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2048), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2046, 2048), True, 'import matplotlib.pyplot as plt\n'), ((1273, 1341), 'tqdm.tqdm', 'tqdm', (['n_iterations'], {'desc': '"""Execute MCTS with c=2"""', 'unit': '""" experiment"""'}), "(n_iterations, desc='Execute MCTS with c=2', unit=' experiment')\n", (1277, 1341), False, 'from tqdm import tqdm\n'), ((1425, 1499), 'tqdm.tqdm', 'tqdm', (['n_iterations'], {'desc': '"""Execute MCTS with dynamic c"""', 'unit': '""" experiment"""'}), "(n_iterations, desc='Execute MCTS with dynamic c', unit=' experiment')\n", (1429, 1499), False, 'from tqdm import tqdm\n')]
|
import argparse
from utils.data_loader import DataLoader
from algorithms.OFDClean import OFDClean
if __name__ == '__main__':
threshold = 20
sense_dir = ['sense2/', 'sense4/', 'sense6/', 'sense8/', 'sense10/']
sense_path = 'clinical' # sense_dir[1]
err_data_path = ['data_err3', 'data_err6', 'data_err9', 'data_err12', 'data_err15']
size_data_path = ['data_size20', 'data_size40', 'data_size60', 'data_size80', 'data_size100']
data_path = 'clinical'
# data_path = err_data_path[0]
# data_path = size_data_path[4]
config = {
'data': 'datasets/data/' + data_path + '.csv',
'ofds': 'datasets/ofds/' + 'clinical.csv',
'senses': 'datasets/senses/' + sense_path + '/', # sense name should be the same as column name
}
Loader = DataLoader(config)
data = Loader.read_data()
# print('data:\n', data)
ofds, right_attrs = Loader.read_ofds()
print('ofds:\n', ofds)
# print('right_attrs:\n', right_attrs)
senses, ssets = Loader.read_senses(right_attrs)
print('senses:\n', senses)
# print('ssets:\n', ssets)
Cleaner = OFDClean(data, ofds, senses, right_attrs, ssets, threshold)
Cleaner.run()
|
[
"algorithms.OFDClean.OFDClean",
"utils.data_loader.DataLoader"
] |
[((795, 813), 'utils.data_loader.DataLoader', 'DataLoader', (['config'], {}), '(config)\n', (805, 813), False, 'from utils.data_loader import DataLoader\n'), ((1115, 1174), 'algorithms.OFDClean.OFDClean', 'OFDClean', (['data', 'ofds', 'senses', 'right_attrs', 'ssets', 'threshold'], {}), '(data, ofds, senses, right_attrs, ssets, threshold)\n', (1123, 1174), False, 'from algorithms.OFDClean import OFDClean\n')]
|
######################################################################
# Author: <NAME>
# Username: rakhimovb
# Assignment: A03: A Pair of Fully Functional Gitty Psychedelic Robotic Turtles
######################################################################
import turtle
def draw_rectangle(t, h, c):
"""
This function draws a rectangle
:param t: turtle name
:param h: height of the rectangle
:param c: turtle color
:return:
"""
for i in range(2):
t.color(c)
t.begin_fill()
t.forward(h)
t.left(90)
t.forward(480)
t.left(90)
t.end_fill()
def draw_flag(t):
"""
This function draws two third of the flag
:param t: turtle name
:return:
"""
for i in ["#6fff01", "white"]:
draw_rectangle(t, 80, i)
t.fd(-15)
draw_rectangle(t, 15, "red")
t.fd(-80)
def moon(t):
"""
This function draws a moon on the top left corner of the flag
:param t: turtle name
:return:
"""
t.begin_fill()
t.circle(20, 180)
t.circle(20, -130)
t.end_fill()
def change_pos(t, x, y):
"""
This function changes the position of the turtle
:param t: turtle name
:param x: x coordinate
:param y: y coordinate
:return:
"""
t.penup()
t.setpos(x, y)
t.pendown()
def star_line(t, n, y):
"""
This function draws one line of stars in front of the moon
:param t: turtle name
:param n: number of stars on the line
:param y: y coordinate to move the stars
:return:
"""
x = -115
for b in range(n):
t.begin_fill()
for i in range(5):
t.fd(10)
t.right(144)
change_pos(t, x, y)
x = x - 15
t.end_fill()
def draw_stars(t):
"""
This function draws three lines of stars with different number of stars in each line
:param t: turtle name
:return:
"""
y = 110
n = 3
for i in [95, 80, 65]:
star_line(t, n, y)
change_pos(t, -100, i)
y = y - 15
n = n + 1
def main():
wn = turtle.Screen()
wn.bgpic("samarkand-196923_1920.png")
ttl1 = turtle.Turtle()
change_pos(ttl1, -250, -50) # Change position of the turtle to start drawing
ttl1.setheading(-90)
draw_flag(ttl1) # Draw the whole flag
draw_rectangle(ttl1, 80, "#0abeff")
ttl1.color("white") # Draw a moon in white on the top left corner of the flag
ttl1.pensize(3)
change_pos(ttl1, -200, 115)
ttl1.right(80)
moon(ttl1)
change_pos(ttl1, -100, 110) # Draw stars in front of the moon
ttl1.pensize(1)
ttl1.right(170)
draw_stars(ttl1)
change_pos(ttl1, -250, -210) # Write "UZBEKISTAN" under the flag
ttl1.color("#ff1100")
ttl1.write("UZBEKISTAN", font=("Blackadder ITC", 45, "normal"))
wn.exitonclick()
main()
|
[
"turtle.Screen",
"turtle.Turtle"
] |
[((2112, 2127), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (2125, 2127), False, 'import turtle\n'), ((2181, 2196), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (2194, 2196), False, 'import turtle\n')]
|
import numpy as np
import time
import keyboard
import math
import threading
def attack_mob(boxes,classes):
"""
recevies in the player box and the mob box and then will move the player towards the mob and then attack it
"""
#midpoints X1 and X2
player, closestmob = calculate_distance(boxes,classes)
#vertical movement y axis
if player[0]<closestmob[0]:
keyboard.teledown()
else:
keyboard.teleup()
# horizontal movement, i messed up the coordinates while creating the tuple index 1 is x, index 0 is y
if player[1]<closestmob[1]:
#moveleft and attack
print("player coord:"+str(player[0])+" "+str(player[1]))
print("\n mob coord:"+str(closestmob[0])+" "+str(closestmob[1]))
keyboard.moveRight()
keyboard.moveRight()
# keyboard.moveRight()
keyboard.attackFiveTimes()
keyboard.loot()
else:
# mob is to the right and attack
print("player coord:"+str(player[0])+" "+str(player[1]))
print("\n mob coord:"+str(closestmob[0])+" "+str(closestmob[1]))
keyboard.moveLeft()
keyboard.moveLeft()
# keyboard.moveLeft()
keyboard.attackFiveTimes()
keyboard.loot()
def filter(detections):
"""
takes first five detections returns boxes,scores and classes as numpy arrays
"""
#get first five predictions
boxes = detections['detection_boxes'][0].numpy()[:5]
scores = detections['detection_scores'][0].numpy()[:5]
classes = (detections['detection_classes'][0].numpy() + 1).astype(int)[:5]
isTherePlayer = False
if 2 in classes[:]:
isTherePlayer = True
return boxes, scores, classes, isTherePlayer
def calculate_distance(boxes,classes):
"""
calculates the distance between the player and the three mobs, and returns the mob with the shortest distance
"""
#get the index of the player, returns a numpy array containing the index
itemindex = np.where(classes==2)
#get the midpoints, list of tuples
midpoints =[]
for i in range(np.shape(boxes)[0]):
midpoints.append(getBoxesMidpoint(boxes[i]))
#calculate the distance between the player and the mobs
distance=np.zeros(5,dtype=np.float32)
for i in range(np.shape(boxes)[0]):
if i == itemindex[0][0]:
distance[i]= 99999.0
else:
distance[i]=distance_2points(midpoints[i],midpoints[itemindex[0][0]])
#get the min index, and return the player coord and mob coord.
minindex = np.argmin(distance)
return midpoints[itemindex[0][0]],midpoints[minindex]
def getBoxesMidpoint(box):
"""
takes in normalized coordinates of the 800x600 screen. coordinates are xmin,ymin,xmax,ymax
returns a tuple of the midpoint
"""
#denormalize them
normalized_coord = np.array([box[0]*806,box[1]*629,box[2]*806,box[3]*629],dtype=np.float32)
#offset from the origin
return (((normalized_coord[2]-normalized_coord[0])/2)+normalized_coord[0],((((normalized_coord[3]-normalized_coord[1])/2))+normalized_coord[1]))
def distance_2points(pt1,pt2):
"""
returns distance between two points pt1(x1,y1),pt2(x2,y2). points as tuples.
"""
return math.hypot(pt2[0]-pt1[0], pt2[1]-pt1[1])
def autobuff(stop_event):
starttime = time.time()
while not stop_event.wait(1):
print("Buffing!")
keyboard.buff()
keyboard.buff()
keyboard.buff()
time.sleep(65.0 - ((time.time() - starttime) % 65.0))
def autocc(stop_event):
starttime = time.time()
while not stop_event.wait(1):
print("CC'ing!")
keyboard.cc()
time.sleep(90.0 - ((time.time() - starttime) % 90.0))
if __name__ == "__main__":
pass
|
[
"math.hypot",
"numpy.zeros",
"keyboard.moveRight",
"numpy.argmin",
"time.time",
"keyboard.teledown",
"keyboard.loot",
"keyboard.moveLeft",
"numpy.where",
"numpy.array",
"keyboard.cc",
"keyboard.teleup",
"keyboard.buff",
"numpy.shape",
"keyboard.attackFiveTimes"
] |
[((1979, 2001), 'numpy.where', 'np.where', (['(classes == 2)'], {}), '(classes == 2)\n', (1987, 2001), True, 'import numpy as np\n'), ((2228, 2257), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'np.float32'}), '(5, dtype=np.float32)\n', (2236, 2257), True, 'import numpy as np\n'), ((2546, 2565), 'numpy.argmin', 'np.argmin', (['distance'], {}), '(distance)\n', (2555, 2565), True, 'import numpy as np\n'), ((2845, 2934), 'numpy.array', 'np.array', (['[box[0] * 806, box[1] * 629, box[2] * 806, box[3] * 629]'], {'dtype': 'np.float32'}), '([box[0] * 806, box[1] * 629, box[2] * 806, box[3] * 629], dtype=np\n .float32)\n', (2853, 2934), True, 'import numpy as np\n'), ((3236, 3280), 'math.hypot', 'math.hypot', (['(pt2[0] - pt1[0])', '(pt2[1] - pt1[1])'], {}), '(pt2[0] - pt1[0], pt2[1] - pt1[1])\n', (3246, 3280), False, 'import math\n'), ((3320, 3331), 'time.time', 'time.time', ([], {}), '()\n', (3329, 3331), False, 'import time\n'), ((3567, 3578), 'time.time', 'time.time', ([], {}), '()\n', (3576, 3578), False, 'import time\n'), ((391, 410), 'keyboard.teledown', 'keyboard.teledown', ([], {}), '()\n', (408, 410), False, 'import keyboard\n'), ((429, 446), 'keyboard.teleup', 'keyboard.teleup', ([], {}), '()\n', (444, 446), False, 'import keyboard\n'), ((762, 782), 'keyboard.moveRight', 'keyboard.moveRight', ([], {}), '()\n', (780, 782), False, 'import keyboard\n'), ((791, 811), 'keyboard.moveRight', 'keyboard.moveRight', ([], {}), '()\n', (809, 811), False, 'import keyboard\n'), ((851, 877), 'keyboard.attackFiveTimes', 'keyboard.attackFiveTimes', ([], {}), '()\n', (875, 877), False, 'import keyboard\n'), ((886, 901), 'keyboard.loot', 'keyboard.loot', ([], {}), '()\n', (899, 901), False, 'import keyboard\n'), ((1099, 1118), 'keyboard.moveLeft', 'keyboard.moveLeft', ([], {}), '()\n', (1116, 1118), False, 'import keyboard\n'), ((1127, 1146), 'keyboard.moveLeft', 'keyboard.moveLeft', ([], {}), '()\n', (1144, 1146), False, 'import keyboard\n'), ((1185, 1211), 'keyboard.attackFiveTimes', 'keyboard.attackFiveTimes', ([], {}), '()\n', (1209, 1211), False, 'import keyboard\n'), ((1220, 1235), 'keyboard.loot', 'keyboard.loot', ([], {}), '()\n', (1233, 1235), False, 'import keyboard\n'), ((3400, 3415), 'keyboard.buff', 'keyboard.buff', ([], {}), '()\n', (3413, 3415), False, 'import keyboard\n'), ((3424, 3439), 'keyboard.buff', 'keyboard.buff', ([], {}), '()\n', (3437, 3439), False, 'import keyboard\n'), ((3448, 3463), 'keyboard.buff', 'keyboard.buff', ([], {}), '()\n', (3461, 3463), False, 'import keyboard\n'), ((3646, 3659), 'keyboard.cc', 'keyboard.cc', ([], {}), '()\n', (3657, 3659), False, 'import keyboard\n'), ((2076, 2091), 'numpy.shape', 'np.shape', (['boxes'], {}), '(boxes)\n', (2084, 2091), True, 'import numpy as np\n'), ((2276, 2291), 'numpy.shape', 'np.shape', (['boxes'], {}), '(boxes)\n', (2284, 2291), True, 'import numpy as np\n'), ((3492, 3503), 'time.time', 'time.time', ([], {}), '()\n', (3501, 3503), False, 'import time\n'), ((3688, 3699), 'time.time', 'time.time', ([], {}), '()\n', (3697, 3699), False, 'import time\n')]
|
import os
import io
import httpretty
class APIMock():
"""
Responses should be a {method: filename} map
"""
def __init__(self, mock_url, mock_dir, responses):
self.mock_url = mock_url
self.responses = responses
self.mock_dir = mock_dir
def request_callback(self, request, uri, headers):
method = request.parsed_body[u'method'][0]
filename = self.responses[method]
with io.open(os.path.join(self.mock_dir, filename), 'r') as f:
contents = f.read()
return (200, headers, contents)
def __enter__(self):
httpretty.enable()
httpretty.register_uri(httpretty.POST, self.mock_url,
body=self.request_callback)
def __exit__(self, type, value, traceback):
httpretty.disable()
httpretty.reset()
|
[
"httpretty.register_uri",
"httpretty.disable",
"httpretty.reset",
"httpretty.enable",
"os.path.join"
] |
[((603, 621), 'httpretty.enable', 'httpretty.enable', ([], {}), '()\n', (619, 621), False, 'import httpretty\n'), ((630, 716), 'httpretty.register_uri', 'httpretty.register_uri', (['httpretty.POST', 'self.mock_url'], {'body': 'self.request_callback'}), '(httpretty.POST, self.mock_url, body=self.\n request_callback)\n', (652, 716), False, 'import httpretty\n'), ((800, 819), 'httpretty.disable', 'httpretty.disable', ([], {}), '()\n', (817, 819), False, 'import httpretty\n'), ((828, 845), 'httpretty.reset', 'httpretty.reset', ([], {}), '()\n', (843, 845), False, 'import httpretty\n'), ((447, 484), 'os.path.join', 'os.path.join', (['self.mock_dir', 'filename'], {}), '(self.mock_dir, filename)\n', (459, 484), False, 'import os\n')]
|
## Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
import sys
from load_db import load_graph
from load_db import intermediate
from load_db import svr_pkgs
from load_db import svr_cve_pkgs
from load_db import pkg_cve_supr
from load_db import pkg_cve_cvss_threshold
from load_db import pkgs_with_no_cve
from sbom_helpers import get_gdbpath
from sbom_helpers import mypprint
from sbom_helpers import validate_file_access
if( len(sys.argv) != 3 ):
print("There should be two arguments")
print("first arg is date eg 2019.03.16")
print("2nd arg is server eg 84a421cd887f11e887244dfe08192208")
exit()
else:
d = sys.argv[1]
svr = sys.argv[2]
gfile = get_gdbpath() + d + '.gdb'
#validate gdb file exists
validate_file_access([gfile])
graphdata = load_graph(gfile)
print("+++ file {0} ".format(gfile))
print("++++ svr {0}".format(svr))
svr_grp = intermediate(graphdata, 'type_group', svr)
print("++++ grp {0}".format(svr_grp))
hostname = intermediate(graphdata, 'type_hostname', svr)
print("++++ hostname {0}".format(hostname))
(num_pkg_vers,
num_pkgs,
pkg_ver_dict,
pkg_multiver_dict) = svr_pkgs(graphdata, svr)
print("+++++ {0} package/versions".format(num_pkg_vers))
print("+++++ {0} packages".format(num_pkgs))
mypprint(pkg_multiver_dict)
## print supressed cves
print("supressed cve's:")
scp = svr_cve_pkgs(graphdata, svr)
sup_cves = pkg_cve_supr(scp)
mypprint(sup_cves)
## print bins of cvss
no_cves = len(pkgs_with_no_cve(scp))
print("{0} packages with no cve's:".format(no_cves))
ten_cves = pkg_cve_cvss_threshold(scp, 10, 100)
l_ten_cves = len(ten_cves)
print("{0} packages with worst cve of cvss=10:".format(l_ten_cves))
mypprint(ten_cves)
seven_cves = pkg_cve_cvss_threshold(scp, 7, 10)
l_seven_cves = len(seven_cves)
print("{0} packages with cvss <10 and >=7".format(l_seven_cves))
mypprint(seven_cves)
five_cves = pkg_cve_cvss_threshold(scp, 5, 7)
l_five_cves = len(five_cves)
print("{0} packages with cvss <7 and >=5".format(l_five_cves))
mypprint(five_cves)
low_cves = pkg_cve_cvss_threshold(scp, 0, 5)
l_low_cves = len(low_cves)
print("{0} packages with cvss <5 and >=0".format(l_low_cves))
mypprint(low_cves)
|
[
"load_db.pkg_cve_cvss_threshold",
"load_db.svr_cve_pkgs",
"load_db.pkg_cve_supr",
"load_db.intermediate",
"sbom_helpers.mypprint",
"sbom_helpers.validate_file_access",
"load_db.svr_pkgs",
"load_db.pkgs_with_no_cve",
"load_db.load_graph",
"sbom_helpers.get_gdbpath"
] |
[((736, 765), 'sbom_helpers.validate_file_access', 'validate_file_access', (['[gfile]'], {}), '([gfile])\n', (756, 765), False, 'from sbom_helpers import validate_file_access\n'), ((779, 796), 'load_db.load_graph', 'load_graph', (['gfile'], {}), '(gfile)\n', (789, 796), False, 'from load_db import load_graph\n'), ((880, 922), 'load_db.intermediate', 'intermediate', (['graphdata', '"""type_group"""', 'svr'], {}), "(graphdata, 'type_group', svr)\n", (892, 922), False, 'from load_db import intermediate\n'), ((973, 1018), 'load_db.intermediate', 'intermediate', (['graphdata', '"""type_hostname"""', 'svr'], {}), "(graphdata, 'type_hostname', svr)\n", (985, 1018), False, 'from load_db import intermediate\n'), ((1127, 1151), 'load_db.svr_pkgs', 'svr_pkgs', (['graphdata', 'svr'], {}), '(graphdata, svr)\n', (1135, 1151), False, 'from load_db import svr_pkgs\n'), ((1254, 1281), 'sbom_helpers.mypprint', 'mypprint', (['pkg_multiver_dict'], {}), '(pkg_multiver_dict)\n', (1262, 1281), False, 'from sbom_helpers import mypprint\n'), ((1339, 1367), 'load_db.svr_cve_pkgs', 'svr_cve_pkgs', (['graphdata', 'svr'], {}), '(graphdata, svr)\n', (1351, 1367), False, 'from load_db import svr_cve_pkgs\n'), ((1379, 1396), 'load_db.pkg_cve_supr', 'pkg_cve_supr', (['scp'], {}), '(scp)\n', (1391, 1396), False, 'from load_db import pkg_cve_supr\n'), ((1397, 1415), 'sbom_helpers.mypprint', 'mypprint', (['sup_cves'], {}), '(sup_cves)\n', (1405, 1415), False, 'from sbom_helpers import mypprint\n'), ((1540, 1576), 'load_db.pkg_cve_cvss_threshold', 'pkg_cve_cvss_threshold', (['scp', '(10)', '(100)'], {}), '(scp, 10, 100)\n', (1562, 1576), False, 'from load_db import pkg_cve_cvss_threshold\n'), ((1672, 1690), 'sbom_helpers.mypprint', 'mypprint', (['ten_cves'], {}), '(ten_cves)\n', (1680, 1690), False, 'from sbom_helpers import mypprint\n'), ((1705, 1739), 'load_db.pkg_cve_cvss_threshold', 'pkg_cve_cvss_threshold', (['scp', '(7)', '(10)'], {}), '(scp, 7, 10)\n', (1727, 1739), False, 'from load_db import pkg_cve_cvss_threshold\n'), ((1836, 1856), 'sbom_helpers.mypprint', 'mypprint', (['seven_cves'], {}), '(seven_cves)\n', (1844, 1856), False, 'from sbom_helpers import mypprint\n'), ((1870, 1903), 'load_db.pkg_cve_cvss_threshold', 'pkg_cve_cvss_threshold', (['scp', '(5)', '(7)'], {}), '(scp, 5, 7)\n', (1892, 1903), False, 'from load_db import pkg_cve_cvss_threshold\n'), ((1996, 2015), 'sbom_helpers.mypprint', 'mypprint', (['five_cves'], {}), '(five_cves)\n', (2004, 2015), False, 'from sbom_helpers import mypprint\n'), ((2028, 2061), 'load_db.pkg_cve_cvss_threshold', 'pkg_cve_cvss_threshold', (['scp', '(0)', '(5)'], {}), '(scp, 0, 5)\n', (2050, 2061), False, 'from load_db import pkg_cve_cvss_threshold\n'), ((2151, 2169), 'sbom_helpers.mypprint', 'mypprint', (['low_cves'], {}), '(low_cves)\n', (2159, 2169), False, 'from sbom_helpers import mypprint\n'), ((1453, 1474), 'load_db.pkgs_with_no_cve', 'pkgs_with_no_cve', (['scp'], {}), '(scp)\n', (1469, 1474), False, 'from load_db import pkgs_with_no_cve\n'), ((683, 696), 'sbom_helpers.get_gdbpath', 'get_gdbpath', ([], {}), '()\n', (694, 696), False, 'from sbom_helpers import get_gdbpath\n')]
|
# -*- coding: utf-8 -*-
# Created on Sat Jun 05 2021
# Last modified on Mon Jun 07 2021
# Copyright (c) CaMOS Development Team. All Rights Reserved.
# Distributed under a MIT License. See LICENSE for more info.
import numpy as np
from camos.tasks.analysis import Analysis
from camos.utils.generategui import NumericInput, DatasetInput
from camos.utils.units import get_time
class BurstClean(Analysis):
analysis_name = "Clean Events"
required = ["dataset"]
def __init__(self, *args, **kwargs):
super(BurstClean, self).__init__(*args, **kwargs)
def _run(
self,
duration: NumericInput("Total Duration ({})".format(get_time()), 100),
_filter_min: NumericInput("Minimum Events/{}".format(get_time()), 1),
_filter_max: NumericInput("Maximum Events/{}".format(get_time()), 50),
_i_data: DatasetInput("Source dataset", 0),
):
output_type = [("CellID", "int"), ("Active", "float")]
# data should be provided in format summary (active events)
data = self.signal.data[_i_data]
self.dataname = self.signal.names[_i_data]
if not ("Active" in data.dtype.names):
raise ValueError("The dataset does not have the expected shape")
# Calculates the MFR, could be given as an input?
unique, counts = np.unique(data[:]["CellID"], return_counts=True)
active = data[:]["Active"]
IDs = data[:]["CellID"]
IDs_include = unique[
np.where(
(counts >= _filter_min * duration) & (counts <= _filter_max * duration)
)
]
idx = np.isin(IDs, IDs_include)
active_filter = active[idx]
IDs_filter = IDs[idx]
# Calculate mean firing rate per cell
self.output = np.zeros(shape=(len(active_filter), 1), dtype=output_type)
self.output[:]["CellID"] = IDs_filter.reshape(-1, 1)
self.output[:]["Active"] = active_filter.reshape(-1, 1)
self.output = self.output[1:]
self.foutput = self.output
# self.notify(
# "{}: Events Before = {}; Events After = {}".format(
# self.analysis_name, len(data), len(self.output)
# ),
# "INFO",
# )
def connectComponents(self, fields):
# Changing the input data to update the duration
fields["_i_data"].connect(
lambda x: fields["duration"].widget.setText(
str(int(self.signal.properties[x]["duration"]))
)
)
|
[
"camos.utils.generategui.DatasetInput",
"numpy.isin",
"numpy.where",
"camos.utils.units.get_time",
"numpy.unique"
] |
[((1324, 1372), 'numpy.unique', 'np.unique', (["data[:]['CellID']"], {'return_counts': '(True)'}), "(data[:]['CellID'], return_counts=True)\n", (1333, 1372), True, 'import numpy as np\n'), ((1618, 1643), 'numpy.isin', 'np.isin', (['IDs', 'IDs_include'], {}), '(IDs, IDs_include)\n', (1625, 1643), True, 'import numpy as np\n'), ((850, 883), 'camos.utils.generategui.DatasetInput', 'DatasetInput', (['"""Source dataset"""', '(0)'], {}), "('Source dataset', 0)\n", (862, 883), False, 'from camos.utils.generategui import NumericInput, DatasetInput\n'), ((1482, 1567), 'numpy.where', 'np.where', (['((counts >= _filter_min * duration) & (counts <= _filter_max * duration))'], {}), '((counts >= _filter_min * duration) & (counts <= _filter_max *\n duration))\n', (1490, 1567), True, 'import numpy as np\n'), ((657, 667), 'camos.utils.units.get_time', 'get_time', ([], {}), '()\n', (665, 667), False, 'from camos.utils.units import get_time\n'), ((737, 747), 'camos.utils.units.get_time', 'get_time', ([], {}), '()\n', (745, 747), False, 'from camos.utils.units import get_time\n'), ((815, 825), 'camos.utils.units.get_time', 'get_time', ([], {}), '()\n', (823, 825), False, 'from camos.utils.units import get_time\n')]
|
from __future__ import print_function
import mxnet as mx
from mxnet.gluon import nn
from mxnet.gluon.model_zoo.custom_layers import HybridConcurrent, Identity
from mxnet.gluon.model_zoo.vision import get_model
def test_concurrent():
model = HybridConcurrent(concat_dim=1)
model.add(nn.Dense(128, activation='tanh', in_units=10))
model.add(nn.Dense(64, activation='tanh', in_units=10))
model.add(nn.Dense(32, in_units=10))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 10)))
assert x.shape == (32, 224)
x.wait_to_read()
def test_identity():
model = Identity()
x = mx.nd.random_uniform(shape=(128, 33, 64))
mx.test_utils.assert_almost_equal(model(x).asnumpy(),
x.asnumpy())
def test_models():
all_models = ['resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'vgg11', 'vgg13', 'vgg16', 'vgg19',
'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn',
'alexnet', 'inceptionv3',
'densenet121', 'densenet161', 'densenet169', 'densenet201',
'squeezenet1.0', 'squeezenet1.1']
pretrained_to_test = set(['squeezenet1.1'])
for model_name in all_models:
test_pretrain = model_name in pretrained_to_test
model = get_model(model_name, pretrained=test_pretrain)
data_shape = (7, 3, 224, 224) if 'inception' not in model_name else (7, 3, 299, 299)
print(model)
if not test_pretrain:
model.collect_params().initialize()
model(mx.nd.random_uniform(shape=data_shape))
if __name__ == '__main__':
import nose
nose.runmodule()
|
[
"mxnet.gluon.nn.Dense",
"nose.runmodule",
"mxnet.gluon.model_zoo.custom_layers.Identity",
"mxnet.nd.random_uniform",
"mxnet.nd.zeros",
"mxnet.sym.var",
"mxnet.gluon.model_zoo.custom_layers.HybridConcurrent",
"mxnet.gluon.model_zoo.vision.get_model",
"mxnet.init.Xavier"
] |
[((247, 277), 'mxnet.gluon.model_zoo.custom_layers.HybridConcurrent', 'HybridConcurrent', ([], {'concat_dim': '(1)'}), '(concat_dim=1)\n', (263, 277), False, 'from mxnet.gluon.model_zoo.custom_layers import HybridConcurrent, Identity\n'), ((462, 480), 'mxnet.sym.var', 'mx.sym.var', (['"""data"""'], {}), "('data')\n", (472, 480), True, 'import mxnet as mx\n'), ((748, 758), 'mxnet.gluon.model_zoo.custom_layers.Identity', 'Identity', ([], {}), '()\n', (756, 758), False, 'from mxnet.gluon.model_zoo.custom_layers import HybridConcurrent, Identity\n'), ((767, 808), 'mxnet.nd.random_uniform', 'mx.nd.random_uniform', ([], {'shape': '(128, 33, 64)'}), '(shape=(128, 33, 64))\n', (787, 808), True, 'import mxnet as mx\n'), ((1922, 1938), 'nose.runmodule', 'nose.runmodule', ([], {}), '()\n', (1936, 1938), False, 'import nose\n'), ((292, 337), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['(128)'], {'activation': '"""tanh"""', 'in_units': '(10)'}), "(128, activation='tanh', in_units=10)\n", (300, 337), False, 'from mxnet.gluon import nn\n'), ((353, 397), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['(64)'], {'activation': '"""tanh"""', 'in_units': '(10)'}), "(64, activation='tanh', in_units=10)\n", (361, 397), False, 'from mxnet.gluon import nn\n'), ((413, 438), 'mxnet.gluon.nn.Dense', 'nn.Dense', (['(32)'], {'in_units': '(10)'}), '(32, in_units=10)\n', (421, 438), False, 'from mxnet.gluon import nn\n'), ((591, 621), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {'magnitude': '(2.24)'}), '(magnitude=2.24)\n', (605, 621), True, 'import mxnet as mx\n'), ((637, 658), 'mxnet.nd.zeros', 'mx.nd.zeros', (['(32, 10)'], {}), '((32, 10))\n', (648, 658), True, 'import mxnet as mx\n'), ((1579, 1626), 'mxnet.gluon.model_zoo.vision.get_model', 'get_model', (['model_name'], {'pretrained': 'test_pretrain'}), '(model_name, pretrained=test_pretrain)\n', (1588, 1626), False, 'from mxnet.gluon.model_zoo.vision import get_model\n'), ((1833, 1871), 'mxnet.nd.random_uniform', 'mx.nd.random_uniform', ([], {'shape': 'data_shape'}), '(shape=data_shape)\n', (1853, 1871), True, 'import mxnet as mx\n')]
|
"""
Created on April 13, 2018
Edited on July 05, 2019
@author: <NAME> & <NAME>
Sony CSL Paris, France
Institute for Computational Perception, Johannes Kepler University, Linz
Austrian Research Institute for Artificial Intelligence, Vienna
"""
import numpy as np
import librosa
import torch.utils.data as data
import torch
import logging
import PIL
from scipy.signal import get_window
from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, \
CenterCrop
from complex_auto.util import to_numpy, cached
LOGGER = logging.getLogger(__name__)
def standardize_(ngram):
ngram = ngram - ngram.mean()
std = ngram.std()
if std > 1e-8:
ngram = .1 * ngram / std
return ngram
class Data(object):
def __init__(self, data_x, data_y, standardize=False):
self.data_x = data_x
self.data_y = data_y
def __getitem__(self, index):
return [standardize_(torch.FloatTensor(self.data_x[index])),
standardize_(torch.FloatTensor(self.data_y[index])),
-1, -1, -1]
def __len__(self):
return len(self.data_x)
class DataSampler(object):
def __init__(self, data_x, length_ngram, samples_epoch, standard=True,
shifts=[24, 24], scales=[1., 0], shuffle=True,
transform=(0, 1, 2), emph_onset=0, random_pairs=False):
"""
Returns random ngrams from data, can shift and scale data in two
dimensions
:param data_x: data (2d)
:param length_ngram: length of sampled ngrams
:param samples_epoch: number of samples per epoch
:param standard: if instances should be standardized
:param shifts: 2-tuple, maximal random shifts in two dimensions
:param scales: 2-tuple, maximal random scaling in two dimensions
:param shuffle: instances are returned in random order
:param transform: iterable; which transforms should be applied.
pitch_shift (0), time shift (1), tempo-change (2)
:param emph_onset: onsets are emphasized
:param random_pairs: a pair is sampled using two random (unrelated)
instances
"""
self.data_x = data_x
self.length_ngram = length_ngram
self.samples_epoch = samples_epoch
self.standard = standard
self.max_x = shifts[0]
self.max_y = shifts[1]
self.scale_x = scales[0]
self.scale_y = scales[1]
self.shuffle = shuffle
self.transform = transform
self.emph_onset = emph_onset
self.random_pairs = random_pairs
self.check_lengths()
def check_lengths(self):
delete = []
for i, song in enumerate(self.data_x):
max_ = song.shape[1] - self.length_ngram - self.max_x
if not self.max_x < max_:
print(f"Warning: Song number {i} is too short to be used "
f"with ngram length {self.length_ngram} and maximal "
f"time shift of {self.max_x} (will be ignored)!")
delete.append(i)
self.data_x = [i for j, i in enumerate(self.data_x) if j not in
delete]
def __len__(self):
if not self.shuffle:
return self.get_ngram_count()
return self.samples_epoch
def __getitem__(self, index):
# Transform: pitch_shift (0), time shift (1), tempo-change (2)
if self.transform is None:
# random transform
transform = np.random.randint(0, 3)
else:
transform = np.random.choice(self.transform)
if self.random_pairs:
# song_id, start, end = self.get_random_ngram()
# ngram = self.data_x[song_id][:, start:end].copy()
# song_id, start, end = self.get_random_ngram()
# ngram_trans = self.data_x[song_id][:, start:end].copy()
if np.random.randint(2) == 0:
[ngram, ngram_trans], song_id = self.get_pairs_same_song()
label = -1
transform = -1 # skips transformation codes
else:
song_id, start, end = self.get_ngram_by_idx(index)
ngram = self.data_x[song_id][:, start:end].copy()
elif self.shuffle:
song_id, start, end = self.get_random_ngram()
ngram = self.data_x[song_id][:, start:end].copy()
else:
song_id, start, end = self.get_ngram_by_idx(index)
ngram = self.data_x[song_id][:, start:end].copy()
# Normalization needed for PIL image processing (scale)
ngram -= ngram.min()
if ngram.max() > 1e-6:
ngram /= ngram.max()
assert ngram.shape[1] != 0, f"{start}, {end}," \
f"{self.data_x[song_id].shape[1]}, " \
f"{self.max_x}"
if transform == 1:
if self.max_x == 0:
shiftx = 0
else:
shiftx = np.random.randint(-self.max_x, self.max_x)
ngram_trans = self.trans_time_shift(end, song_id, start,
shiftx)
label = "shiftx" + str(shiftx)
if transform == 0:
if self.max_y == 0:
shifty = 0
else:
shifty = np.random.randint(-self.max_y, self.max_y)
ngram_trans = self.trans_pitch_shift(ngram, shifty)
label = "shifty" + str(shifty)
if transform == 2:
scale_x = 1 + self.scale_x * np.random.rand()
ngram, ngram_trans, minus = self.trans_speed_change(ngram, scale_x)
label = scale_x if not minus else -scale_x
label = "scale" + str(label)
ngram = to_numpy(ngram)
ngram_trans = to_numpy(ngram_trans)
ngram_onset = np.diff(np.concatenate((ngram[:, 0:1], ngram), axis=1),
axis=1)
ngram_trans_onset = np.diff(np.concatenate((ngram_trans[:, 0:1],
ngram_trans), axis=1), axis=1)
ngram_onset[ngram_onset < 0] = 0
ngram_trans_onset[ngram_trans_onset < 0] = 0
ngram = ngram + ngram_onset * self.emph_onset
ngram_trans = ngram_trans + ngram_trans_onset * self.emph_onset
if self.standard:
ngram = self.standardize(ngram)
ngram_trans = self.standardize(ngram_trans)
ngram = torch.FloatTensor(ngram).view(-1)
ngram_trans = torch.FloatTensor(ngram_trans).view(-1)
return ngram+1e-8, ngram_trans+1e-8, transform, song_id, label
def get_ngram_count(self):
count = 0
count_data = len(self.data_x)
for i in range(count_data):
len_data = self.data_x[i].shape[1]
startmin = 2 * self.max_x
startmax = len_data - self.length_ngram - 2 * self.max_x
count += startmax - startmin
return count
def get_ngram_by_idx(self, index):
count = 0
count_data = len(self.data_x)
for i in range(count_data):
len_data = self.data_x[i].shape[1]
startmin = 2 * self.max_x
startmax = len_data - self.length_ngram - 2 * self.max_x
if index >= count and index + startmin < count + startmax:
song_id = i
start = index - count + startmin
break
count += startmax - startmin
end = start + self.length_ngram
return song_id, start, end
def get_random_ngram(self):
count_data = len(self.data_x)
song_id = np.random.randint(0, count_data)
len_data = self.data_x[song_id].shape[1]
start = np.random.randint(self.max_x,
len_data - self.length_ngram - self.max_x)
end = start + self.length_ngram
return song_id, start, end
def get_pairs_same_song(self):
count_data = len(self.data_x)
song_id = np.random.randint(0, count_data)
len_data = self.data_x[song_id].shape[1]
pairs = []
for i in range(2):
start = np.random.randint(2 * self.max_x,
len_data - self.length_ngram - 2 * self.max_x)
end = start + self.length_ngram
ngram = self.data_x[song_id][:, start:end].copy()
pairs.append(ngram)
return pairs, song_id
def trans_speed_change(self, ngram, scale_x):
size1 = ngram.shape[1]
size0 = ngram.shape[0]
new_size_t_x = int(scale_x * size1)
new_size_t_y = ngram.shape[0]
transform_out = Compose([
ToPILImage(),
Resize((new_size_t_y, new_size_t_x),
interpolation=PIL.Image.NEAREST),
CenterCrop((size0, size1)),
ToTensor()
])
ngram_trans = transform_out(torch.FloatTensor(ngram).unsqueeze(0))
minus = False
if np.random.randint(0, 2) == 1:
ngram_ = ngram
ngram = ngram_trans
ngram_trans = ngram_
minus = True
return ngram, ngram_trans, minus
def trans_pitch_shift(self, ngram, shifty):
return to_numpy(self.transp0(torch.FloatTensor(ngram), shifty))
def trans_time_shift(self, end, song_id, start, shiftx):
return self.data_x[song_id][:, start + shiftx:end + shiftx]
def standardize(self, ngram):
ngram = ngram - ngram.mean()
std = ngram.std()
ngram = .1 * ngram / (std + 1e-8)
return ngram
def transp0(self, x, shift):
"""
Transposes axis 0 (zero-based) of x by [shift] steps.
Missing information is padded with zeros.
:param x: the array to transpose
:param shift: the transposition distance
:return: x transposed
"""
if shift == 0:
return x
pad = torch.zeros(abs(shift), x.size(1))
if shift < 0:
return torch.cat([pad, x[:-abs(shift), :]], dim=0)
return torch.cat([x[abs(shift):, :], pad], dim=0)
def transp1(self, x, shift):
"""
Transposes axis 1 (zero-based) of x by [shift] steps.
Missing information is padded with zeros.
:param x: the array to transpose
:param shift: the transposition distance
:return: x transposed
"""
if shift == 0:
return x
pad = torch.zeros(x.size(1), abs(shift))
if shift < 0:
return torch.cat([pad, x[:, :-abs(shift)]], dim=1)
return torch.cat([x[:, abs(shift):], pad], dim=1)
class Signal(data.Dataset):
def __init__(self, filelist, sr="22050", trg_shift=0, block_size=1024,
refresh_cache=False, cache_fn="signal_cache.pyc.bz",
allow_diff_shapes=False, padded=False, random_shift=0,
samples_epoch=1000, window='hann'):
"""
Constructor for 1D signal dataset
:param filelist: list of audio file names (str)
:param sr: desired sample rate
:param trg_shift: target == input shifted by [-trg_shift] steps,
blocks are shortened accordingly
:param block_size: length of one instance in a batch
:param refresh_cache: when True recalculate and save to cache file
when False loads from cache file when available
:param cache_fn: filename of cache file
"""
self.trg_shift = trg_shift
self.block_size = block_size
self.sr = sr
self.allow_diff_shapes = allow_diff_shapes
self.padded = padded
self.random_shift = random_shift
self.window = window
self.samples_epoch = samples_epoch
self.signals = cached(cache_fn, self.load_files, (filelist,),
refresh_cache=refresh_cache)
def __getitem__(self, index):
rand_inst = np.random.randint(len(self.signals))
if self.random_shift > 0:
shift = np.random.randint(-self.random_shift, self.random_shift)
else:
shift = self.trg_shift
rand_pos = np.random.randint(abs(shift),
len(self.signals[rand_inst]) -
abs(shift) - self.block_size)
w = get_window(self.window, self.block_size)
x = self.signals[rand_inst][rand_pos:rand_pos+self.block_size]
y = self.signals[rand_inst][rand_pos+shift:
rand_pos+shift+self.block_size, :]
x = torch.FloatTensor(x.squeeze() * w)
y = torch.FloatTensor(y.squeeze() * w)
x = self.standardize(x)
y = self.standardize(y)
return x, y, -1, -1, -1
def standardize(self, signal):
ngram = signal - signal.mean()
std = ngram.std()
if std > 1e-6:
ngram = ngram / std
else: # prevent empty input
ngram = ngram + 1e-8
return ngram
def __len__(self):
return self.samples_epoch
def load_files(self, filelist):
data_all = []
for file in filelist:
file = file.strip('\n')
print(f"loading file {file}")
signal = librosa.load(file)[0][:, None]
data_all.append(signal)
if len(data_all) == 0:
LOGGER.warning("No data added to Signal Dataset!")
return data_all
|
[
"complex_auto.util.cached",
"numpy.concatenate",
"scipy.signal.get_window",
"torch.FloatTensor",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.ToTensor",
"complex_auto.util.to_numpy",
"numpy.random.randint",
"librosa.load",
"numpy.random.choice",
"numpy.random.rand",
"torchvision.transforms.CenterCrop",
"logging.getLogger",
"torchvision.transforms.Resize"
] |
[((541, 568), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (558, 568), False, 'import logging\n'), ((7686, 7718), 'numpy.random.randint', 'np.random.randint', (['(0)', 'count_data'], {}), '(0, count_data)\n', (7703, 7718), True, 'import numpy as np\n'), ((7784, 7856), 'numpy.random.randint', 'np.random.randint', (['self.max_x', '(len_data - self.length_ngram - self.max_x)'], {}), '(self.max_x, len_data - self.length_ngram - self.max_x)\n', (7801, 7856), True, 'import numpy as np\n'), ((8058, 8090), 'numpy.random.randint', 'np.random.randint', (['(0)', 'count_data'], {}), '(0, count_data)\n', (8075, 8090), True, 'import numpy as np\n'), ((11906, 11981), 'complex_auto.util.cached', 'cached', (['cache_fn', 'self.load_files', '(filelist,)'], {'refresh_cache': 'refresh_cache'}), '(cache_fn, self.load_files, (filelist,), refresh_cache=refresh_cache)\n', (11912, 11981), False, 'from complex_auto.util import to_numpy, cached\n'), ((12463, 12503), 'scipy.signal.get_window', 'get_window', (['self.window', 'self.block_size'], {}), '(self.window, self.block_size)\n', (12473, 12503), False, 'from scipy.signal import get_window\n'), ((3513, 3536), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (3530, 3536), True, 'import numpy as np\n'), ((3575, 3607), 'numpy.random.choice', 'np.random.choice', (['self.transform'], {}), '(self.transform)\n', (3591, 3607), True, 'import numpy as np\n'), ((5781, 5796), 'complex_auto.util.to_numpy', 'to_numpy', (['ngram'], {}), '(ngram)\n', (5789, 5796), False, 'from complex_auto.util import to_numpy, cached\n'), ((5823, 5844), 'complex_auto.util.to_numpy', 'to_numpy', (['ngram_trans'], {}), '(ngram_trans)\n', (5831, 5844), False, 'from complex_auto.util import to_numpy, cached\n'), ((5876, 5922), 'numpy.concatenate', 'np.concatenate', (['(ngram[:, 0:1], ngram)'], {'axis': '(1)'}), '((ngram[:, 0:1], ngram), axis=1)\n', (5890, 5922), True, 'import numpy as np\n'), ((6037, 6095), 'numpy.concatenate', 'np.concatenate', (['(ngram_trans[:, 0:1], ngram_trans)'], {'axis': '(1)'}), '((ngram_trans[:, 0:1], ngram_trans), axis=1)\n', (6051, 6095), True, 'import numpy as np\n'), ((8206, 8291), 'numpy.random.randint', 'np.random.randint', (['(2 * self.max_x)', '(len_data - self.length_ngram - 2 * self.max_x)'], {}), '(2 * self.max_x, len_data - self.length_ngram - 2 * self.max_x\n )\n', (8223, 8291), True, 'import numpy as np\n'), ((9034, 9057), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (9051, 9057), True, 'import numpy as np\n'), ((12159, 12215), 'numpy.random.randint', 'np.random.randint', (['(-self.random_shift)', 'self.random_shift'], {}), '(-self.random_shift, self.random_shift)\n', (12176, 12215), True, 'import numpy as np\n'), ((923, 960), 'torch.FloatTensor', 'torch.FloatTensor', (['self.data_x[index]'], {}), '(self.data_x[index])\n', (940, 960), False, 'import torch\n'), ((992, 1029), 'torch.FloatTensor', 'torch.FloatTensor', (['self.data_y[index]'], {}), '(self.data_y[index])\n', (1009, 1029), False, 'import torch\n'), ((3908, 3928), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (3925, 3928), True, 'import numpy as np\n'), ((5007, 5049), 'numpy.random.randint', 'np.random.randint', (['(-self.max_x)', 'self.max_x'], {}), '(-self.max_x, self.max_x)\n', (5024, 5049), True, 'import numpy as np\n'), ((5349, 5391), 'numpy.random.randint', 'np.random.randint', (['(-self.max_y)', 'self.max_y'], {}), '(-self.max_y, self.max_y)\n', (5366, 5391), True, 'import numpy as np\n'), ((6516, 6540), 'torch.FloatTensor', 'torch.FloatTensor', (['ngram'], {}), '(ngram)\n', (6533, 6540), False, 'import torch\n'), ((6572, 6602), 'torch.FloatTensor', 'torch.FloatTensor', (['ngram_trans'], {}), '(ngram_trans)\n', (6589, 6602), False, 'import torch\n'), ((8735, 8747), 'torchvision.transforms.ToPILImage', 'ToPILImage', ([], {}), '()\n', (8745, 8747), False, 'from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, CenterCrop\n'), ((8761, 8830), 'torchvision.transforms.Resize', 'Resize', (['(new_size_t_y, new_size_t_x)'], {'interpolation': 'PIL.Image.NEAREST'}), '((new_size_t_y, new_size_t_x), interpolation=PIL.Image.NEAREST)\n', (8767, 8830), False, 'from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, CenterCrop\n'), ((8863, 8889), 'torchvision.transforms.CenterCrop', 'CenterCrop', (['(size0, size1)'], {}), '((size0, size1))\n', (8873, 8889), False, 'from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, CenterCrop\n'), ((8903, 8913), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (8911, 8913), False, 'from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, CenterCrop\n'), ((9309, 9333), 'torch.FloatTensor', 'torch.FloatTensor', (['ngram'], {}), '(ngram)\n', (9326, 9333), False, 'import torch\n'), ((5568, 5584), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5582, 5584), True, 'import numpy as np\n'), ((8961, 8985), 'torch.FloatTensor', 'torch.FloatTensor', (['ngram'], {}), '(ngram)\n', (8978, 8985), False, 'import torch\n'), ((13382, 13400), 'librosa.load', 'librosa.load', (['file'], {}), '(file)\n', (13394, 13400), False, 'import librosa\n')]
|
import os
import errno
import itertools
directory = 'C:/Users/Jan/Dropbox/Bachelorarbeit/Programm/Testdaten/Raw DataSet/'
# listdir = [file for file in os.listdir(directory) if file not in ['capa.txt', 'capb.txt', 'capc.txt']]
# for d in listdir:
# print('Opening dir: ', directory+'/'+d)
# with open(directory+'/'+d) as f:
# firstLine = True
# numLocs = 0
# numCusts = 0
# text = ''
# f_i = []
# d_j = []
# b_i = []
# first_line = f.readline()
# numLocs = int(first_line.split()[0])
# numCusts = int(first_line.split()[1])
# c_ij = [[(0, 0, 0) for cus in range(numCusts)] for loc in range(numLocs) ]
# for i, line in enumerate(f):
# if i < numLocs:
# b_i.append((i, float(line.split()[0])))
# f_i.append((i, float(line.split()[1])))
# else:
# for number in line.split():
# text += ' ' + number
# text = text[1:]
# for index, item in enumerate(text.split(' ')):
# if index % (numLocs+1) == 0:
# d_j.append((index, float(item)))
# text = [val for index, val in enumerate(text.split(' ')) if index % (numLocs+1) != 0]
# for customer in range(numCusts):
# firstLine = True
# actual_allocating_costs = []
# for index, val in enumerate(text[:numLocs]):
# c_ij[index][customer] = (index, customer, float(val))
# if len(text) > numLocs:
# text = text[numLocs:]
# directory = "C:/Users/Jan/Dropbox/Bachelorarbeit/Programm/Testdaten"
# for d in listdir:
# files_to_save = ['cij.txt', 'dj.txt', 'bi.txt', 'fi.txt']
# for file, data in zip(files_to_save, [c_ij, d_j, b_i, f_i]):
# print(directory+'/'+d.split('.')[0]+'/'+file)
# os.makedirs(os.path.dirname(directory+'/'+d.split('.')[0]+'/'+file), exist_ok=True)
# with open(directory+'/'+d.split('.')[0]+'/'+file, "w") as f:
# if len(data[0]) == 2:
# for val in data:
# f.write(' '.join(map(str, val))+'\n')
# else:
# for val_i in data:
# for val_j in val_i:
# f.write(' '.join(map(str, val_j)) + '\n')
#
correct = [56, 94, 89]
listdir = [file for file in os.listdir(directory) if file in ['capa.txt', 'capb.txt', 'capc.txt']]
capacity_amount = 8000.0
for c, d in zip(range(listdir), listdir):
print('Opening dir: ', directory+d)
with open(directory+'/'+d) as f:
#Init Vars
firstLine = True
numLocs = 0
numCusts = 0
text = ''
f_i = []
d_j = []
b_i = []
first_line = f.readline()
numLocs = int(first_line.split()[0])
numCusts = int(first_line.split()[1])
c_ij = []
# Start Parsing
for i, line in enumerate(f):
if i <= numLocs and i > 0:
b_i.append(capacity_amount)
f_i.append(line.split(" ")[2])
else:
for number in line.split(" "):
text += " " + number
text_list = [item for index, item in enumerate(text.split()) if item != " " and item != "\n" and item != "capacity"]
text_list[c] = correct[c]
for item,counter in zip(text_list, range(len(text_list))):
if counter % (numLocs+1) == 0:
d_j.append(item)
else:
if " " in item:
c_ij.append(item.split(" ")[-1])
else:
c_ij.append(item)
directory = "C:/Users/Jan/Dropbox/Bachelorarbeit/Programm/Testdaten"
for d in listdir:
files_to_save = ['cij.txt', 'dj.txt', 'bi.txt', 'fi.txt']
for file, data in zip(files_to_save, [c_ij, d_j, b_i, f_i]):
print(directory+'/'+d.split('.')[0]+'/'+file)
os.makedirs(os.path.dirname(directory+'/'+d.split('.')[0]+'/'+file), exist_ok=True)
with open(directory+'/'+d.split('.')[0]+'/'+file, "w") as f:
for val in data:
f.write(str(val)+'\n')
|
[
"os.listdir"
] |
[((2396, 2417), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (2406, 2417), False, 'import os\n')]
|
import schedule
import time
from gql import Main
import configparser
import json
from jsondiff import diff
from writedb import writedb
import pandas as pd
from pandas import DataFrame
config = configparser.RawConfigParser()
config.read('refresh_time.cfg')
interval = config.getint('Main','time')
t = int(interval)
response = Main.git_activities()
new = response['data']['repository']
namewithowner = new['nameWithOwner']
watchers = new['watchers']['totalCount']
fork = new['forkCount']
stars = new['stargazers']['totalCount']
commit_count = new['object']['history']['totalCount']
commit = new['object']['history']['edges']
writedb.write_repo(namewithowner,fork,stars,watchers,commit_count)
for each in commit:
date = each['node']['committedDate']
committer = each['node']['committer']['name']
message = each['node']['messageHeadline']
writedb.write_commit(committer,date,message,namewithowner)
def job():
response1 = Main.git_activities()
time.sleep(300)
response2 =Main.git_activities()
r = diff(response1,response2)
df = DataFrame(response1)
df2 = DataFrame(response2)
ne = (df != df2).any(1)
print(ne)
if r=={}:
print('no difference in git activites from last response')
else:
print('difference')
new = response2['data']['repository']
namewithowner = new['nameWithOwner']
watchers = new['watchers']['totalCount']
fork = new['forkCount']
stars = new['stargazers']['totalCount']
commit_count = new['object']['history']['totalCount']
commit = new['object']['history']['edges']
writedb.update_repo(namewithowner,fork,stars,watchers,commit_count)
for each in commit:
date = each['node']['committedDate']
committer = each['node']['committer']['name']
message = each['node']['messageHeadline']
writedb.insert_new(committer,date,message,namewithowner)
while True:
job()
|
[
"pandas.DataFrame",
"jsondiff.diff",
"gql.Main.git_activities",
"configparser.RawConfigParser",
"writedb.writedb.write_repo",
"time.sleep",
"writedb.writedb.update_repo",
"writedb.writedb.insert_new",
"writedb.writedb.write_commit"
] |
[((195, 225), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (223, 225), False, 'import configparser\n'), ((328, 349), 'gql.Main.git_activities', 'Main.git_activities', ([], {}), '()\n', (347, 349), False, 'from gql import Main\n'), ((628, 698), 'writedb.writedb.write_repo', 'writedb.write_repo', (['namewithowner', 'fork', 'stars', 'watchers', 'commit_count'], {}), '(namewithowner, fork, stars, watchers, commit_count)\n', (646, 698), False, 'from writedb import writedb\n'), ((850, 911), 'writedb.writedb.write_commit', 'writedb.write_commit', (['committer', 'date', 'message', 'namewithowner'], {}), '(committer, date, message, namewithowner)\n', (870, 911), False, 'from writedb import writedb\n'), ((939, 960), 'gql.Main.git_activities', 'Main.git_activities', ([], {}), '()\n', (958, 960), False, 'from gql import Main\n'), ((963, 978), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (973, 978), False, 'import time\n'), ((992, 1013), 'gql.Main.git_activities', 'Main.git_activities', ([], {}), '()\n', (1011, 1013), False, 'from gql import Main\n'), ((1020, 1046), 'jsondiff.diff', 'diff', (['response1', 'response2'], {}), '(response1, response2)\n', (1024, 1046), False, 'from jsondiff import diff\n'), ((1053, 1073), 'pandas.DataFrame', 'DataFrame', (['response1'], {}), '(response1)\n', (1062, 1073), False, 'from pandas import DataFrame\n'), ((1082, 1102), 'pandas.DataFrame', 'DataFrame', (['response2'], {}), '(response2)\n', (1091, 1102), False, 'from pandas import DataFrame\n'), ((1557, 1628), 'writedb.writedb.update_repo', 'writedb.update_repo', (['namewithowner', 'fork', 'stars', 'watchers', 'commit_count'], {}), '(namewithowner, fork, stars, watchers, commit_count)\n', (1576, 1628), False, 'from writedb import writedb\n'), ((1799, 1858), 'writedb.writedb.insert_new', 'writedb.insert_new', (['committer', 'date', 'message', 'namewithowner'], {}), '(committer, date, message, namewithowner)\n', (1817, 1858), False, 'from writedb import writedb\n')]
|
from dataclasses import dataclass
from typing import Any, Dict
from urllib.error import HTTPError
from urllib.request import urlopen
import requests
import srsly
from huggingface_hub import cached_download, hf_hub_url
from embeddings.utils.loggers import get_logger
_logger = get_logger(__name__)
@dataclass
class StaticModelHubConfig:
repo_id: str
@property
def model_type_reference(self) -> str:
reference = self._load_hub_json("module.json")["type"]
if isinstance(reference, str):
return reference
else:
raise ValueError(f"Wrong format of import reference {reference}.")
@property
def default_config(self) -> Dict[str, Any]:
config = self._load_hub_json("default_config.json")
if isinstance(config, dict):
return config
else:
raise ValueError(f"Wrong format of default config {config}.")
def _load_hub_json(self, filename: str) -> Any:
url = self._get_file_hf_hub_url(filename)
try:
path = cached_download(url)
except requests.HTTPError:
raise EnvironmentError(
"Repository not found or wrong format of a given model (module.json not found)."
)
return srsly.read_json(path)
def _get_file_hf_hub_url(self, filename: str) -> str:
url: str = hf_hub_url(self.repo_id, filename=filename)
return url
def file_accessible(self, filename: str) -> bool:
try:
result: bool = urlopen(self._get_file_hf_hub_url(filename)).getcode() == 200
return result
except HTTPError:
return False
@dataclass
class SingleFileConfig(StaticModelHubConfig):
model_name: str
@property
def cached_model(self) -> str:
url: str = self._get_file_hf_hub_url(self.model_name)
path: str = cached_download(url)
return path
@dataclass
class GensimFileConfig(SingleFileConfig):
model_name: str
@property
def cached_model(self) -> str:
url: str = self._get_file_hf_hub_url(self.model_name)
path: str = cached_download(url)
npy_vectors_url: str = self._get_file_hf_hub_url(f"{self.model_name}.vectors.npy")
try:
cached_download(npy_vectors_url, force_filename=f"{path}.vectors.npy")
except requests.HTTPError:
_logger.info(f"{self.model_name}.vectors.npy not found, skipping it.")
return path
|
[
"embeddings.utils.loggers.get_logger",
"huggingface_hub.cached_download",
"srsly.read_json",
"huggingface_hub.hf_hub_url"
] |
[((279, 299), 'embeddings.utils.loggers.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (289, 299), False, 'from embeddings.utils.loggers import get_logger\n'), ((1267, 1288), 'srsly.read_json', 'srsly.read_json', (['path'], {}), '(path)\n', (1282, 1288), False, 'import srsly\n'), ((1367, 1410), 'huggingface_hub.hf_hub_url', 'hf_hub_url', (['self.repo_id'], {'filename': 'filename'}), '(self.repo_id, filename=filename)\n', (1377, 1410), False, 'from huggingface_hub import cached_download, hf_hub_url\n'), ((1875, 1895), 'huggingface_hub.cached_download', 'cached_download', (['url'], {}), '(url)\n', (1890, 1895), False, 'from huggingface_hub import cached_download, hf_hub_url\n'), ((2123, 2143), 'huggingface_hub.cached_download', 'cached_download', (['url'], {}), '(url)\n', (2138, 2143), False, 'from huggingface_hub import cached_download, hf_hub_url\n'), ((1049, 1069), 'huggingface_hub.cached_download', 'cached_download', (['url'], {}), '(url)\n', (1064, 1069), False, 'from huggingface_hub import cached_download, hf_hub_url\n'), ((2262, 2332), 'huggingface_hub.cached_download', 'cached_download', (['npy_vectors_url'], {'force_filename': 'f"""{path}.vectors.npy"""'}), "(npy_vectors_url, force_filename=f'{path}.vectors.npy')\n", (2277, 2332), False, 'from huggingface_hub import cached_download, hf_hub_url\n')]
|
from dataprovider import Date, Validator, RSSReader, StoryRSS
from models import ModelRSS
from threading import Thread
import logging
import schedule
import time
import json
import os
logging.basicConfig(filename=os.getenv("BIASIMPACTER_OUTPUT"),
level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler())
def set_up_mongo():
try:
mongo_host = os.getenv("BIASIMPACTER_DC_MONGO_HOST")
mongo_port = os.getenv("MONGO_PORT")
mongo_db = os.getenv("APP_MONGO_DB")
mongo_user = os.getenv("APP_MONGO_USER")
mongo_pw = os.getenv("APP_MONGO_PASS")
mongo_uri = "mongodb://{}:{}@{}:{}/{}".format(
mongo_user, mongo_pw, mongo_host, mongo_port, mongo_db)
logging.info(mongo_uri)
return mongo_uri
except Exception() as e:
logging.error(e)
def read_source(datapath=os.path.join(os.path.dirname(os.path.dirname(__file__)), "source.txt")):
with open(datapath, 'r') as f:
return [line.rstrip().split(", ") for line in f]
def main():
uri = set_up_mongo()
mongo_rss = ModelRSS(uri)
urls = read_source()
for name, url in urls:
try:
logging.info("Reading story: {}".format(name))
story = StoryRSS(name, url, mongo_rss)
story.save_story()
except Exception as e:
logging.error(e)
if __name__ == "__main__":
main()
|
[
"logging.error",
"os.path.dirname",
"logging.StreamHandler",
"logging.info",
"dataprovider.StoryRSS",
"models.ModelRSS",
"os.getenv",
"logging.getLogger"
] |
[((404, 427), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (425, 427), False, 'import logging\n'), ((1187, 1200), 'models.ModelRSS', 'ModelRSS', (['uri'], {}), '(uri)\n', (1195, 1200), False, 'from models import ModelRSS\n'), ((215, 247), 'os.getenv', 'os.getenv', (['"""BIASIMPACTER_OUTPUT"""'], {}), "('BIASIMPACTER_OUTPUT')\n", (224, 247), False, 'import os\n'), ((373, 392), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (390, 392), False, 'import logging\n'), ((481, 520), 'os.getenv', 'os.getenv', (['"""BIASIMPACTER_DC_MONGO_HOST"""'], {}), "('BIASIMPACTER_DC_MONGO_HOST')\n", (490, 520), False, 'import os\n'), ((542, 565), 'os.getenv', 'os.getenv', (['"""MONGO_PORT"""'], {}), "('MONGO_PORT')\n", (551, 565), False, 'import os\n'), ((585, 610), 'os.getenv', 'os.getenv', (['"""APP_MONGO_DB"""'], {}), "('APP_MONGO_DB')\n", (594, 610), False, 'import os\n'), ((632, 659), 'os.getenv', 'os.getenv', (['"""APP_MONGO_USER"""'], {}), "('APP_MONGO_USER')\n", (641, 659), False, 'import os\n'), ((679, 706), 'os.getenv', 'os.getenv', (['"""APP_MONGO_PASS"""'], {}), "('APP_MONGO_PASS')\n", (688, 706), False, 'import os\n'), ((838, 861), 'logging.info', 'logging.info', (['mongo_uri'], {}), '(mongo_uri)\n', (850, 861), False, 'import logging\n'), ((925, 941), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (938, 941), False, 'import logging\n'), ((997, 1022), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1012, 1022), False, 'import os\n'), ((1345, 1375), 'dataprovider.StoryRSS', 'StoryRSS', (['name', 'url', 'mongo_rss'], {}), '(name, url, mongo_rss)\n', (1353, 1375), False, 'from dataprovider import Date, Validator, RSSReader, StoryRSS\n'), ((1450, 1466), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (1463, 1466), False, 'import logging\n')]
|
import json
from math import ceil, floor
import requests
from .packet import PacketList
from .packet.base import Packet
from .transaction import Transaction
from .usage import (UsageMessage, UsageRecord, UsageResponse, UsageResponseError,
FailedUsageResponse, UsageStatus)
"""AMIE client and Usage Client classes"""
class AMIERequestError(requests.RequestException):
pass
class AMIEClient(object):
"""
AMIE Client.
Args:
site_name (str): Name of the client site.
api_key (str): API key secret
amie_url (str): Base URL for the XSEDE AMIE api
Examples:
>>> psc_client = amieclient.AMIEClient(site_name='PSC', api_key=some_secrets_store['amie_api_key'])
You can also override the amie_url and usage_url parameters, if you're
doing local development or testing out a new version.
>>> psc_alt_base_client = amieclient.AMIEClient(site_name='PSC', api_key='test_api_key', amie_url='https://amieclient.xsede.org/v0.20_beta/)
"""
def __init__(self, site_name, api_key,
amie_url='https://amieclient.xsede.org/v0.10/'):
if not amie_url.endswith('/'):
self.amie_url = amie_url + '/'
else:
self.amie_url = amie_url
self.site_name = site_name
amie_headers = {
'XA-API-KEY': api_key,
'XA-SITE': site_name
}
s = requests.Session()
s.headers.update(amie_headers)
self._session = s
def __enter__(self):
return self
def __exit__(self, *args):
self._session.close()
@staticmethod
def _join_list(things):
if things is not None and things != []:
# If we're given a list, join it with commas
return ','.join(things)
elif things == []:
# if we're given an empty list, return None
return None
else:
# If we're given anything else, i.e. None or some other single
# thing, give it back
return things
@staticmethod
def _dt_range(start, end):
if start is None and end is None:
time_str = None
else:
start_str = start.isoformat() if start else ""
end_str = end.isoformat() if end else ""
time_str = "{},{}".format(start_str, end_str)
return time_str
def get_transaction(self, *, transaction_or_id):
"""
Given a single transaction record id, fetches the related transaction.
See the :swagger:`Swagger documentation <AMIE_Client/get_transactions__site_name___amie_transaction_id__packets/>` for more details.
Args:
transaction_or_id: The transaction or transaction record ID.
Returns:
amieclient.Transaction
"""
if isinstance(transaction_or_id, Transaction):
tx_id = transaction_or_id.trans_rec_id
else:
tx_id = transaction_or_id
url = self.amie_url + 'transactions/{}/{}/packets'.format(self.site_name, tx_id)
r = self._session.get(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return Transaction.from_dict(response)
def set_transaction_failed(self, *, transaction_or_id):
"""
Given a single transaction or transaction record id, marks it faield.
See the :swagger:`Swagger documentation <AMIE_Client/put_transactions__site_name___amie_transaction_id__state_failed>` for more details.
Args:
transaction_or_id: The transaction or transaction record ID.
"""
if isinstance(transaction_or_id, Transaction):
tx_id = transaction_or_id.trans_rec_id
else:
tx_id = transaction_or_id
url = self.amie_url + 'transactions/{}/{}/state/failed'.format(self.site_name, tx_id)
r = self._session.put(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def get_packet(self, *, packet_rec_id):
"""
Given a single packet record id, fetches the packet.
See the :swagger:`Swagger documentation <AMIE_Client/get_packets__site_name_>` for more details.
Args:
packet_rec_id: The transaction record ID.
Returns:
amieclient.Packet
"""
url = self.amie_url + 'packets/{}/{}'.format(self.site_name, packet_rec_id)
r = self._session.get(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return Packet.from_dict(response['result'])
def list_packets(self, *, trans_rec_ids=None, outgoing=None,
update_time_start=None, update_time_until=None,
states=None, client_states=None, transaction_states=None,
incoming=None):
"""
Fetches a list of packets based on the provided search parameters
See the :swagger:`Swagger documentation <AMIE_Client/get_packets__site_name_>` for more details.
Args:
trans_rec_ids (list): Searches for packets with these transaction record IDs.
states (list): Searches for packets with the provided states.
update_time_start (datetime.Datetime): Searches for packets updated since this time.
update_time_until (datetime.Datetime): Searches for packets updated before this time.
states (list): Searches for packets in the provided states.
client_states (list): Searches for packets in the provided client states.
transaction_states (list): Searches for packets in the provided client states.
incoming (bool): If true, search is limited to incoming packets.
Returns:
amieclient.PacketList: a list of packets matching the provided parameters.
"""
trans_rec_ids_str = self._join_list(trans_rec_ids)
states_str = self._join_list(states)
client_states_str = self._join_list(client_states)
transaction_states_str = self._join_list(transaction_states)
time_str = self._dt_range(update_time_start, update_time_until)
# Build a dict of parameters. Requests skips any with a None value,
# so no need to weed them out
params = {
'trans_rec_id': trans_rec_ids_str,
'outgoing': outgoing,
'update_time': time_str,
'states': states_str,
'client_state': client_states_str,
'transaction_state': transaction_states_str,
'incoming': incoming
}
# Get the list of packets
url = self.amie_url + 'packets/{}'.format(self.site_name)
r = self._session.get(url, params=params)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return PacketList.from_dict(response)
def send_packet(self, packet, skip_validation=False):
"""
Send a packet
See the :swagger:`Swagger documentation <AMIE_Client/post_packets__site_name_>` for more details.
Args:
packet (amieclient.Packet): The packet to send.
Returns:
requests.Response: The response from the AMIE API.
"""
if not skip_validation:
packet.validate_data(raise_on_invalid=True)
url = self.amie_url + 'packets/{}'.format(self.site_name)
r = self._session.post(url, json=packet.as_dict())
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def set_packet_client_state(self, packet_or_id, state):
"""
Set the client state on the server of the packet corresponding to the given
packet_or_id.
See the :swagger:`Swagger documentation <AMIE_Client/put_packets__site_name___packet_rec_id__client_state__client_state_>` for more details.
Args:
packet_or_id (Packet, int): The packet or packet_rec_id to set state on.
state (str): The state to set
"""
if isinstance(packet_or_id, Packet):
pkt_id = packet_or_id.packet_rec_id
else:
pkt_id = packet_or_id
url = self.amie_url + 'packets/{}/{}/client_state/{}'.format(self.site_name,
pkt_id, state)
r = self._session.put(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def clear_packet_client_state(self, packet_or_id):
"""
Clears the client state on the server of the packet corresponding to the given
packet_or_id.
See the :swagger:`Swagger documentation <AMIE_Client/delete_packets__site_name___packet_rec_id__client_state_>` for more details.
Args:
packet_or_id (Packet, int): The packet or packet_rec_id to clear client_state on.
"""
if isinstance(packet_or_id, Packet):
pkt_id = packet_or_id.packet_rec_id
else:
pkt_id = packet_or_id
url = self.amie_url + 'packets/{}/{}/client_state'.format(self.site_name, pkt_id)
r = self._session.delete(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def set_packet_client_json(self, packet_or_id, client_json):
"""
Set the client JSON on the server of the packet corresponding to the given
packet_or_id.
See the :swagger:`Swagger documentation <AMIE_Client/put_packets__site_name___packet_rec_id__client_json>` for more details.
Args:
packet_or_id (Packet, int): The packet or packet_rec_id to set client_json on.
client_json: The json to set. Can be any serializable object or a string of
JSON.
"""
if isinstance(packet_or_id, Packet):
pkt_id = packet_or_id.packet_rec_id
else:
pkt_id = packet_or_id
url = self.amie_url + 'packets/{}/{}/client_json'.format(self.site_name, pkt_id)
if isinstance(client_json, str):
# Best to parse the json here. Ensures it's valid and that everything
# serializes back properly when we do the PUT
client_json = json.loads(client_json)
r = self._session.put(url, json=client_json)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def clear_packet_client_json(self, packet_or_id):
"""
Clears the client JSON on the server of the packet corresponding to the given
packet_or_id.
See the :swagger:`Swagger documentation <AMIE_Client/delete_packets__site_name___packet_rec_id__client_json>` for more details.
Args:
packet_or_id (Packet, int): The packet or packet_rec_id to clear client_json on.
"""
if isinstance(packet_or_id, Packet):
pkt_id = packet_or_id.packet_rec_id
else:
pkt_id = packet_or_id
url = self.amie_url + 'packets/{}/{}/client_json'.format(self.site_name, pkt_id)
r = self._session.delete(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
class UsageClient:
"""
AMIE Usage Client.
Args:
site_name (str): Name of the client site.
api_key (str): API key secret
usage_url (str): Base URL for the XSEDE Usage api
Examples:
>>> psc_client = amieclient.UsageClient(site_name='PSC', api_key=some_secrets_store['amie_api_key'])
You can also override the amie_url and usage_url parameters, if you're
doing local development or testing out a new version.
>>> psc_alt_base_client = amieclient.UsageClient(site_name='PSC', api_key='test_api_key', usage_url='https://amieclient.xsede.org/v0.20_beta/)
"""
def __init__(self, site_name, api_key,
usage_url='https://usage.xsede.org/api/v1'):
if not usage_url.endswith('/'):
self.usage_url = usage_url + '/'
else:
self.usage_url = usage_url
self.site_name = site_name
amie_headers = {
'XA-API-KEY': api_key,
'XA-SITE': site_name
}
s = requests.Session()
s.headers.update(amie_headers)
self._session = s
def __enter__(self):
return self
def __exit__(self, *args):
self._session.close()
def send(self, usage_packets):
"""
Sends a usage update to the Usage API host. This function accepts
individual UsageMessages, lists of UsageRecords, or even a single
UsageRecord. Returns a list of UsageResponses
The API currently has a request size limit of 1024KiB. We get
ample room for overhead that may be added by intermediate layers
(reverse proxies, etc) by capping the size of the request we send
to 768KiB. This happens automatically, no need to chunk your usage
packets yourself. But this potential chunking means that we may get
more than one response, so for the sake of consistency this method
will return a list of responses.
Args:
usage_packets (UsageMessage, [UsageRecord], UsageRecord):
A UsageMessage object, list of UsageRecords, or a single
UsageRecord to send.
Returns:
list of responses
"""
if isinstance(usage_packets, UsageRecord):
pkt_list = UsageMessage([usage_packets])
elif isinstance(usage_packets, list):
pkt_list = UsageMessage(usage_packets)
elif isinstance(usage_packets, UsageMessage):
pkt_list = usage_packets
url = self.usage_url + 'usage/'
# prepare the request
req = requests.Request('POST', url, json=pkt_list.as_dict())
prepped_req = self._session.prepare_request(req)
# Get the size of the content
content_length = int(prepped_req.headers.get('Content-Length'))
# Cap content_length at 786432 bytes
if content_length >= 786432:
# Get the safe number of safe chunks:
number_of_chunks = ceil(content_length / 786432)
# Get the size of those chunks
chunk_size = floor(len(pkt_list) / number_of_chunks)
results = list()
for chunk in pkt_list._chunked(chunk_size=chunk_size):
# Send each chunk
r = self.send_usage(chunk)
results.extend(r)
return results
r = self._session.send(prepped_req)
if r.status_code == 200:
resp = UsageResponse.from_dict(r.json())
elif r.status_code == 400:
# Get the message if we're given one; otherwise
msg = r.json().get('error', 'Bad Request, but error not specified by server')
raise UsageResponseError(msg)
else:
r.raise_for_status()
return [resp]
def summary(self):
"""
Gets a usage summary
Not implemented yet
"""
raise NotImplementedError("Usage summaries are not yet implemented in the AMIE Usage api")
def get_failed_records(self):
"""
Gets all failed records
Takes no arguments
"""
url = self.usage_url + 'usage/failed'
r = self._session.get(url)
if r.status_code > 200:
# Get the message if we're given one; otherwise placeholder
msg = r.json().get('error', 'Bad Request, but error not specified by server')
raise UsageResponseError(msg)
return FailedUsageResponse.from_dict(r.json())
def clear_failed_records(self, failed_records_or_ids):
"""
Tells the server to clear the failed records given
Args:
failed_records_or_ids ([FailedUsageRecord], [int]):
A list of FailedUsageRecords, or plain FailedRecordIds, to unmark as
failed
"""
def _get_id(fr):
if hasattr(fr, 'failed_record_id'):
return str(fr.failed_record_id)
else:
return str(fr)
if isinstance(failed_records_or_ids, list):
failed_ids = map(_get_id, failed_records_or_ids)
else:
failed_ids = [_get_id(failed_records_or_ids)]
fids = ','.join(failed_ids)
url = self.usage_url + 'usage/failed/{fids}'.format(fids)
r = self._session.delete(url)
r.raise_for_status()
return True
def status(self, from_time=None, to_time=None):
"""
Gets the status of records processed from the queue in the provided interval.
Args:
from_date (Datetime): Start date and time
to_date (Datetime): End date and time
"""
from_iso = from_time.isoformat() if from_time is not None else None
to_iso = to_time.isoformat() if to_time is not None else None
p = {'FromTime': from_iso, 'ToTime': to_iso}
url = self.usage_url + 'usage/status'
r = self._session.get(url, params=p)
if r.status_code > 200:
# Get the message if we're given one; otherwise
msg = r.json().get('error', 'Bad Request, but error not specified by server')
raise UsageResponseError(msg)
return UsageStatus.from_list(r.json())
|
[
"requests.Session",
"json.loads",
"math.ceil"
] |
[((1429, 1447), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1445, 1447), False, 'import requests\n'), ((13415, 13433), 'requests.Session', 'requests.Session', ([], {}), '()\n', (13431, 13433), False, 'import requests\n'), ((11161, 11184), 'json.loads', 'json.loads', (['client_json'], {}), '(client_json)\n', (11171, 11184), False, 'import json\n'), ((15360, 15389), 'math.ceil', 'ceil', (['(content_length / 786432)'], {}), '(content_length / 786432)\n', (15364, 15389), False, 'from math import ceil, floor\n')]
|
"""Xetra ETL Component"""
import logging
from datetime import datetime
from typing import NamedTuple
import pandas as pd
from xetra.common.s3 import S3BucketConnector
from xetra.common.meta_process import MetaProcess
class XetraSourceConfig(NamedTuple):
"""
Class for source configuration data
src_first_extract_date: determines the date for extracting the source
src_columns: source column names
src_col_date: column name for date in source
src_col_isin: column name for isin in source
src_col_time: column name for time in source
src_col_start_price: column name for starting price in source
src_col_min_price: column name for minimum price in source
src_col_max_price: column name for maximum price in source
src_col_traded_vol: column name for traded volumne in source
"""
src_first_extract_date: str
src_columns: list
src_col_date: str
src_col_isin: str
src_col_time: str
src_col_start_price: str
src_col_min_price: str
src_col_max_price: str
src_col_traded_vol: str
class XetraTargetConfig(NamedTuple):
"""
Class for target configuration data
trg_col_isin: column name for isin in target
trg_col_date: column name for date in target
trg_col_op_price: column name for opening price in target
trg_col_clos_price: column name for closing price in target
trg_col_min_price: column name for minimum price in target
trg_col_max_price: column name for maximum price in target
trg_col_dail_trad_vol: column name for daily traded volume in target
trg_col_ch_prev_clos: column name for change to previous day's closing price in target
trg_key: basic key of target file
trg_key_date_format: date format of target file key
trg_format: file format of the target file
"""
trg_col_isin: str
trg_col_date: str
trg_col_op_price: str
trg_col_clos_price: str
trg_col_min_price: str
trg_col_max_price: str
trg_col_dail_trad_vol: str
trg_col_ch_prev_clos: str
trg_key: str
trg_key_date_format: str
trg_format: str
class XetraETL():
"""
Reads the Xetra data, transforms and writes the transformed to target
"""
def __init__(self, s3_bucket_src: S3BucketConnector,
s3_bucket_trg: S3BucketConnector, meta_key: str,
src_args: XetraSourceConfig, trg_args: XetraTargetConfig):
"""
Constructor for XetraTransformer
:param s3_bucket_src: connection to source S3 bucket
:param s3_bucket_trg: connection to target S3 bucket
:param meta_key: used as self.meta_key -> key of meta file
:param src_args: NamedTouple class with source configuration data
:param trg_args: NamedTouple class with target configuration data
"""
self._logger = logging.getLogger(__name__)
self.s3_bucket_src = s3_bucket_src
self.s3_bucket_trg = s3_bucket_trg
self.meta_key = meta_key
self.src_args = src_args
self.trg_args = trg_args
self.extract_date, self.extract_date_list = MetaProcess.return_date_list(
self.src_args.src_first_extract_date, self.meta_key, self.s3_bucket_trg)
self.meta_update_list = [date for date in self.extract_date_list\
if date >= self.extract_date]
def extract(self):
"""
Read the source data and concatenates them to one Pandas DataFrame
:returns:
data_frame: Pandas DataFrame with the extracted data
"""
self._logger.info('Extracting Xetra source files started...')
files = [key for date in self.extract_date_list\
for key in self.s3_bucket_src.list_files_in_prefix(date)]
if not files:
data_frame = pd.DataFrame()
else:
data_frame = pd.concat([self.s3_bucket_src.read_csv_to_df(file)\
for file in files], ignore_index=True)
self._logger.info('Extracting Xetra source files finished.')
return data_frame
def transform_report1(self, data_frame: pd.DataFrame):
"""
Applies the necessary transformation to create report 1
:param data_frame: Pandas DataFrame as Input
:returns:
data_frame: Transformed Pandas DataFrame as Output
"""
if data_frame.empty:
self._logger.info('The dataframe is empty. No transformations will be applied.')
return data_frame
self._logger.info('Applying transformations to Xetra source data for report 1 started...')
# Filtering necessary source columns
data_frame = data_frame.loc[:, self.src_args.src_columns]
# Removing rows with missing values
data_frame.dropna(inplace=True)
# Calculating opening price per ISIN and day
data_frame[self.trg_args.trg_col_op_price] = data_frame\
.sort_values(by=[self.src_args.src_col_time])\
.groupby([
self.src_args.src_col_isin,
self.src_args.src_col_date
])[self.src_args.src_col_start_price]\
.transform('first')
# Calculating closing price per ISIN and day
data_frame[self.trg_args.trg_col_clos_price] = data_frame\
.sort_values(by=[self.src_args.src_col_time])\
.groupby([
self.src_args.src_col_isin,
self.src_args.src_col_date
])[self.src_args.src_col_start_price]\
.transform('last')
# Renaming columns
data_frame.rename(columns={
self.src_args.src_col_min_price: self.trg_args.trg_col_min_price,
self.src_args.src_col_max_price: self.trg_args.trg_col_max_price,
self.src_args.src_col_traded_vol: self.trg_args.trg_col_dail_trad_vol
}, inplace=True)
# Aggregating per ISIN and day -> opening price, closing price,
# minimum price, maximum price, traded volume
data_frame = data_frame.groupby([
self.src_args.src_col_isin,
self.src_args.src_col_date], as_index=False)\
.agg({
self.trg_args.trg_col_op_price: 'min',
self.trg_args.trg_col_clos_price: 'min',
self.trg_args.trg_col_min_price: 'min',
self.trg_args.trg_col_max_price: 'max',
self.trg_args.trg_col_dail_trad_vol: 'sum'})
# Change of current day's closing price compared to the
# previous trading day's closing price in %
data_frame[self.trg_args.trg_col_ch_prev_clos] = data_frame\
.sort_values(by=[self.src_args.src_col_date])\
.groupby([self.src_args.src_col_isin])[self.trg_args.trg_col_op_price]\
.shift(1)
data_frame[self.trg_args.trg_col_ch_prev_clos] = (
data_frame[self.trg_args.trg_col_op_price] \
- data_frame[self.trg_args.trg_col_ch_prev_clos]
) / data_frame[self.trg_args.trg_col_ch_prev_clos ] * 100
# Rounding to 2 decimals
data_frame = data_frame.round(decimals=2)
# Removing the day before extract_date
data_frame = data_frame[data_frame.Date >= self.extract_date].reset_index(drop=True)
self._logger.info('Applying transformations to Xetra source data finished...')
return data_frame
def load(self, data_frame: pd.DataFrame):
"""
Saves a Pandas DataFrame to the target
:param data_frame: Pandas DataFrame as Input
"""
# Creating target key
target_key = (
f'{self.trg_args.trg_key}'
f'{datetime.today().strftime(self.trg_args.trg_key_date_format)}.'
f'{self.trg_args.trg_format}'
)
# Writing to target
self.s3_bucket_trg.write_df_to_s3(data_frame, target_key, self.trg_args.trg_format)
self._logger.info('Xetra target data successfully written.')
# Updating meta file
MetaProcess.update_meta_file(self.meta_update_list, self.meta_key, self.s3_bucket_trg)
self._logger.info('Xetra meta file successfully updated.')
return True
def etl_report1(self):
"""
Extract, transform and load to create report 1
"""
# Extraction
data_frame = self.extract()
# Transformation
data_frame = self.transform_report1(data_frame)
# Load
self.load(data_frame)
return True
|
[
"pandas.DataFrame",
"datetime.datetime.today",
"xetra.common.meta_process.MetaProcess.update_meta_file",
"xetra.common.meta_process.MetaProcess.return_date_list",
"logging.getLogger"
] |
[((2825, 2852), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2842, 2852), False, 'import logging\n'), ((3090, 3196), 'xetra.common.meta_process.MetaProcess.return_date_list', 'MetaProcess.return_date_list', (['self.src_args.src_first_extract_date', 'self.meta_key', 'self.s3_bucket_trg'], {}), '(self.src_args.src_first_extract_date, self.\n meta_key, self.s3_bucket_trg)\n', (3118, 3196), False, 'from xetra.common.meta_process import MetaProcess\n'), ((8054, 8145), 'xetra.common.meta_process.MetaProcess.update_meta_file', 'MetaProcess.update_meta_file', (['self.meta_update_list', 'self.meta_key', 'self.s3_bucket_trg'], {}), '(self.meta_update_list, self.meta_key, self.\n s3_bucket_trg)\n', (8082, 8145), False, 'from xetra.common.meta_process import MetaProcess\n'), ((3779, 3793), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3791, 3793), True, 'import pandas as pd\n'), ((7712, 7728), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (7726, 7728), False, 'from datetime import datetime\n')]
|
"""
Implementation using CuPy acceleration.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
from time import time
import cupy as cp
from cupyx.scipy import fft as cufft
def powerspectrum(*u, average=True, diagnostics=False,
kmin=None, kmax=None, npts=None,
compute_fft=True, compute_sqr=True,
double=True, bench=False, **kwargs):
"""
See the documentation for the :ref:`CPU version<powerspectrum>`.
Parameters
----------
u : `np.ndarray`
Scalar or vector field.
If vector data, pass arguments as ``u1, u2, ..., un``
where ``ui`` is the ith vector component.
Each ``ui`` can be 1D, 2D, or 3D, and all must have the
same ``ui.shape`` and ``ui.dtype``.
average : `bool`, optional
If ``True``, average over values in a given
bin and multiply by the bin volume.
If ``False``, compute the sum.
diagnostics : `bool`, optional
Return the standard deviation and number of points
in a particular radial bin.
kmin : `int` or `float`, optional
Minimum wavenumber in power spectrum bins.
If ``None``, ``kmin = 1``.
kmax : `int` or `float`, optional
Maximum wavenumber in power spectrum bins.
If ``None``, ``kmax = max(u.shape)//2``.
npts : `int`, optional
Number of modes between ``kmin`` and ``kmax``,
inclusive.
If ``None``, ``npts = kmax-kmin+1``.
compute_fft : `bool`, optional
If ``False``, do not take the FFT of the input data.
FFTs should not be passed with the zero-frequency
component in the center.
compute_sqr : `bool`, optional
If ``False``, sum the real part of the FFT. This can be
useful for purely real FFTs, where the sign of the
FFT is useful information. If ``True``, take the square
as usual.
double : `bool`, optional
If ``False``, calculate FFTs in single precision.
Useful for saving memory.
bench : `bool`, optional
Print message for time of calculation.
kwargs
Additional keyword arguments passed to
``cupyx.scipy.fft.fftn`` or ``cupyx.scipy.fft.rfftn``.
Returns
-------
spectrum : `np.ndarray`, shape `(npts,)`
Radially averaged power spectrum :math:`P(k)`.
kn : `np.ndarray`, shape `(npts,)`
Left edges of radial bins :math:`k`.
counts : `np.ndarray`, shape `(npts,)`, optional
Number of points :math:`N_k` in each bin.
vol : `np.ndarray`, shape `(npts,)`, optional
Volume :math:`V_k` of each bin.
stdev : `np.ndarray`, shape `(npts,)`, optional
Standard deviation multiplied with :math:`V_k`
in each bin.
"""
if bench:
t0 = time()
shape = u[0].shape
ndim = u[0].ndim
ncomp = len(u)
N = max(u[0].shape)
if np.issubdtype(u[0].dtype, np.floating):
real = True
dtype = cp.float64 if double else cp.float32
else:
real = False
dtype = cp.complex128 if double else cp.complex64
if ndim not in [1, 2, 3]:
raise ValueError("Dimension of image must be 1, 2, or 3.")
# Get memory pools
mempool = cp.get_default_memory_pool()
pinned_mempool = cp.get_default_pinned_memory_pool()
# Compute pqower spectral density with memory efficiency
density = None
comp = cp.empty(shape, dtype=dtype)
for i in range(ncomp):
temp = cp.asarray(u[i], dtype=dtype)
comp[...] = temp
del temp
if compute_fft:
fft = _cufftn(comp, **kwargs)
else:
fft = comp
if density is None:
fftshape = fft.shape
density = cp.zeros(fft.shape)
if compute_sqr:
density[...] += _mod_squared(fft)
else:
density[...] += cp.real(fft)
del fft
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
# Need to double count if using rfftn
if real and compute_fft:
density[...] *= 2
# Get radial coordinates
kr = cp.asarray(_kmag_sampling(fftshape, real=real).astype(np.float32))
# Flatten arrays
kr = kr.ravel()
density = density.ravel()
# Get minimum and maximum k for binning if not given
if kmin is None:
kmin = 1
if kmax is None:
kmax = int(N/2)
if npts is None:
npts = kmax-kmin+1
# Generate bins
kn = cp.linspace(kmin, kmax, npts, endpoint=True) # Left edges of bins
dk = kn[1] - kn[0]
# Radially average power spectral density
if ndim == 1:
fac = 2*np.pi
elif ndim == 2:
fac = 4*np.pi
elif ndim == 3:
fac = 4./3.*np.pi
spectrum = cp.zeros_like(kn)
stdev = cp.zeros_like(kn)
vol = cp.zeros_like(kn)
counts = cp.zeros(kn.shape, dtype=np.int64)
for i, ki in enumerate(kn):
ii = cp.where(cp.logical_and(kr >= ki, kr < ki+dk))
samples = density[ii]
vk = fac*cp.pi*((ki+dk)**ndim-(ki)**ndim)
if average:
spectrum[i] = vk*cp.mean(samples)
else:
spectrum[i] = cp.sum(samples)
if diagnostics:
Nk = samples.size
stdev[i] = vk * cp.std(samples, ddof=1)
vol[i] = vk
counts[i] = Nk
del density, kr
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
if bench:
print(f"Time: {time() - t0:.04f} s")
result = [spectrum.get(), kn.get()]
if diagnostics:
result.extend([counts.get(), vol.get(), stdev.get()])
return tuple(result)
def _cufftn(data, overwrite_input=False, **kwargs):
"""
Calculate the N-dimensional fft of an image
with memory efficiency
"""
# Get memory pools
mempool = cp.get_default_memory_pool()
pinned_mempool = cp.get_default_pinned_memory_pool()
# Real vs. Complex data
if data.dtype in [cp.float32, cp.float64]:
value_type = 'R2C'
fftn = cufft.rfftn
elif data.dtype in [cp.complex64, cp.complex128]:
value_type = 'C2C'
fftn = cufft.fftn
else:
raise ValueError(f"{data.dtype} is unrecognized data type.")
# Get plan for computing fft
plan = cufft.get_fft_plan(data, value_type=value_type)
# Compute fft
with plan:
fft = fftn(data, overwrite_x=overwrite_input, **kwargs)
# Release memory
del plan
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
return fft
@cp.fuse(kernel_name='mod_squared')
def _mod_squared(a):
return cp.real(a*cp.conj(a))
def _kmag_sampling(shape, real=True):
"""
Generates the |k| coordinate system.
"""
if real:
freq = np.fft.rfftfreq
s = list(shape)
s[-1] = (s[-1]-1)*2
shape = s
else:
freq = np.fft.fftfreq
ndim = len(shape)
kmag = np.zeros(shape)
ksqr = []
for i in range(ndim):
ni = shape[i]
sample = freq(ni) if i == ndim - 1 else np.fft.fftfreq(ni)
if real:
sample = np.abs(sample)
k1d = sample * ni
ksqr.append(k1d * k1d)
if ndim == 1:
ksqr = ksqr[0]
elif ndim == 2:
ksqr = np.add.outer(ksqr[0], ksqr[1])
elif ndim == 3:
ksqr = np.add.outer(np.add.outer(ksqr[0], ksqr[1]), ksqr[2])
kmag = np.sqrt(ksqr)
return kmag
if __name__ == '__main__':
import pyFC
from matplotlib import pyplot as plt
dim = 100
fc = pyFC.LogNormalFractalCube(
ni=dim, nj=dim, nk=dim, kmin=10, mean=1, beta=-5/3)
fc.gen_cube()
data = fc.cube
psd, kn, stdev, vol, N = powerspectrum(data, diagnostics=True)
print(psd.mean())
def zero_log10(s):
"""
Takes logarithm of an array while retaining the zeros
"""
sp = np.where(s > 0., s, 1)
return np.log10(sp)
log_psd = zero_log10(psd)
log_kn = zero_log10(kn)
idxs = np.where(log_kn >= np.log10(fc.kmin))
m, b = np.polyfit(log_kn[idxs], log_psd[idxs], 1)
plt.errorbar(kn, psd,
label=rf'PSD, $\beta = {fc.beta}$', color='g')
plt.plot(log_kn[idxs], m*log_kn[idxs]+b,
label=rf'Fit, $\beta = {m}$', color='k')
plt.ylabel(r"$\log{P(k)}$")
plt.xlabel(r"$\log{k}$")
plt.legend(loc='upper right')
plt.show()
|
[
"numpy.abs",
"cupy.empty",
"cupy.zeros_like",
"numpy.polyfit",
"cupy.get_default_memory_pool",
"numpy.add.outer",
"cupy.fuse",
"cupy.std",
"numpy.fft.fftfreq",
"pyFC.LogNormalFractalCube",
"numpy.log10",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show",
"cupy.zeros",
"cupy.real",
"matplotlib.pyplot.legend",
"cupy.conj",
"cupy.get_default_pinned_memory_pool",
"cupy.mean",
"matplotlib.pyplot.ylabel",
"numpy.issubdtype",
"cupyx.scipy.fft.get_fft_plan",
"matplotlib.pyplot.plot",
"cupy.asarray",
"numpy.zeros",
"cupy.sum",
"time.time",
"numpy.where",
"cupy.linspace",
"cupy.logical_and",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((6522, 6556), 'cupy.fuse', 'cp.fuse', ([], {'kernel_name': '"""mod_squared"""'}), "(kernel_name='mod_squared')\n", (6529, 6556), True, 'import cupy as cp\n'), ((2898, 2936), 'numpy.issubdtype', 'np.issubdtype', (['u[0].dtype', 'np.floating'], {}), '(u[0].dtype, np.floating)\n', (2911, 2936), True, 'import numpy as np\n'), ((3236, 3264), 'cupy.get_default_memory_pool', 'cp.get_default_memory_pool', ([], {}), '()\n', (3262, 3264), True, 'import cupy as cp\n'), ((3286, 3321), 'cupy.get_default_pinned_memory_pool', 'cp.get_default_pinned_memory_pool', ([], {}), '()\n', (3319, 3321), True, 'import cupy as cp\n'), ((3414, 3442), 'cupy.empty', 'cp.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (3422, 3442), True, 'import cupy as cp\n'), ((4474, 4518), 'cupy.linspace', 'cp.linspace', (['kmin', 'kmax', 'npts'], {'endpoint': '(True)'}), '(kmin, kmax, npts, endpoint=True)\n', (4485, 4518), True, 'import cupy as cp\n'), ((4754, 4771), 'cupy.zeros_like', 'cp.zeros_like', (['kn'], {}), '(kn)\n', (4767, 4771), True, 'import cupy as cp\n'), ((4784, 4801), 'cupy.zeros_like', 'cp.zeros_like', (['kn'], {}), '(kn)\n', (4797, 4801), True, 'import cupy as cp\n'), ((4812, 4829), 'cupy.zeros_like', 'cp.zeros_like', (['kn'], {}), '(kn)\n', (4825, 4829), True, 'import cupy as cp\n'), ((4843, 4877), 'cupy.zeros', 'cp.zeros', (['kn.shape'], {'dtype': 'np.int64'}), '(kn.shape, dtype=np.int64)\n', (4851, 4877), True, 'import cupy as cp\n'), ((5808, 5836), 'cupy.get_default_memory_pool', 'cp.get_default_memory_pool', ([], {}), '()\n', (5834, 5836), True, 'import cupy as cp\n'), ((5858, 5893), 'cupy.get_default_pinned_memory_pool', 'cp.get_default_pinned_memory_pool', ([], {}), '()\n', (5891, 5893), True, 'import cupy as cp\n'), ((6255, 6302), 'cupyx.scipy.fft.get_fft_plan', 'cufft.get_fft_plan', (['data'], {'value_type': 'value_type'}), '(data, value_type=value_type)\n', (6273, 6302), True, 'from cupyx.scipy import fft as cufft\n'), ((6895, 6910), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (6903, 6910), True, 'import numpy as np\n'), ((7359, 7372), 'numpy.sqrt', 'np.sqrt', (['ksqr'], {}), '(ksqr)\n', (7366, 7372), True, 'import numpy as np\n'), ((7500, 7579), 'pyFC.LogNormalFractalCube', 'pyFC.LogNormalFractalCube', ([], {'ni': 'dim', 'nj': 'dim', 'nk': 'dim', 'kmin': '(10)', 'mean': '(1)', 'beta': '(-5 / 3)'}), '(ni=dim, nj=dim, nk=dim, kmin=10, mean=1, beta=-5 / 3)\n', (7525, 7579), False, 'import pyFC\n'), ((8008, 8050), 'numpy.polyfit', 'np.polyfit', (['log_kn[idxs]', 'log_psd[idxs]', '(1)'], {}), '(log_kn[idxs], log_psd[idxs], 1)\n', (8018, 8050), True, 'import numpy as np\n'), ((8056, 8124), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['kn', 'psd'], {'label': 'f"""PSD, $\\\\beta = {fc.beta}$"""', 'color': '"""g"""'}), "(kn, psd, label=f'PSD, $\\\\beta = {fc.beta}$', color='g')\n", (8068, 8124), True, 'from matplotlib import pyplot as plt\n'), ((8146, 8235), 'matplotlib.pyplot.plot', 'plt.plot', (['log_kn[idxs]', '(m * log_kn[idxs] + b)'], {'label': 'f"""Fit, $\\\\beta = {m}$"""', 'color': '"""k"""'}), "(log_kn[idxs], m * log_kn[idxs] + b, label=f'Fit, $\\\\beta = {m}$',\n color='k')\n", (8154, 8235), True, 'from matplotlib import pyplot as plt\n'), ((8245, 8272), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\log{P(k)}$"""'], {}), "('$\\\\log{P(k)}$')\n", (8255, 8272), True, 'from matplotlib import pyplot as plt\n'), ((8277, 8301), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\log{k}$"""'], {}), "('$\\\\log{k}$')\n", (8287, 8301), True, 'from matplotlib import pyplot as plt\n'), ((8306, 8335), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (8316, 8335), True, 'from matplotlib import pyplot as plt\n'), ((8341, 8351), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8349, 8351), True, 'from matplotlib import pyplot as plt\n'), ((2795, 2801), 'time.time', 'time', ([], {}), '()\n', (2799, 2801), False, 'from time import time\n'), ((3485, 3514), 'cupy.asarray', 'cp.asarray', (['u[i]'], {'dtype': 'dtype'}), '(u[i], dtype=dtype)\n', (3495, 3514), True, 'import cupy as cp\n'), ((7838, 7861), 'numpy.where', 'np.where', (['(s > 0.0)', 's', '(1)'], {}), '(s > 0.0, s, 1)\n', (7846, 7861), True, 'import numpy as np\n'), ((7876, 7888), 'numpy.log10', 'np.log10', (['sp'], {}), '(sp)\n', (7884, 7888), True, 'import numpy as np\n'), ((3743, 3762), 'cupy.zeros', 'cp.zeros', (['fft.shape'], {}), '(fft.shape)\n', (3751, 3762), True, 'import cupy as cp\n'), ((3875, 3887), 'cupy.real', 'cp.real', (['fft'], {}), '(fft)\n', (3882, 3887), True, 'import cupy as cp\n'), ((4932, 4970), 'cupy.logical_and', 'cp.logical_and', (['(kr >= ki)', '(kr < ki + dk)'], {}), '(kr >= ki, kr < ki + dk)\n', (4946, 4970), True, 'import cupy as cp\n'), ((5156, 5171), 'cupy.sum', 'cp.sum', (['samples'], {}), '(samples)\n', (5162, 5171), True, 'import cupy as cp\n'), ((6599, 6609), 'cupy.conj', 'cp.conj', (['a'], {}), '(a)\n', (6606, 6609), True, 'import cupy as cp\n'), ((7021, 7039), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['ni'], {}), '(ni)\n', (7035, 7039), True, 'import numpy as np\n'), ((7078, 7092), 'numpy.abs', 'np.abs', (['sample'], {}), '(sample)\n', (7084, 7092), True, 'import numpy as np\n'), ((7227, 7257), 'numpy.add.outer', 'np.add.outer', (['ksqr[0]', 'ksqr[1]'], {}), '(ksqr[0], ksqr[1])\n', (7239, 7257), True, 'import numpy as np\n'), ((7978, 7995), 'numpy.log10', 'np.log10', (['fc.kmin'], {}), '(fc.kmin)\n', (7986, 7995), True, 'import numpy as np\n'), ((5099, 5115), 'cupy.mean', 'cp.mean', (['samples'], {}), '(samples)\n', (5106, 5115), True, 'import cupy as cp\n'), ((5254, 5277), 'cupy.std', 'cp.std', (['samples'], {'ddof': '(1)'}), '(samples, ddof=1)\n', (5260, 5277), True, 'import cupy as cp\n'), ((7306, 7336), 'numpy.add.outer', 'np.add.outer', (['ksqr[0]', 'ksqr[1]'], {}), '(ksqr[0], ksqr[1])\n', (7318, 7336), True, 'import numpy as np\n'), ((5455, 5461), 'time.time', 'time', ([], {}), '()\n', (5459, 5461), False, 'from time import time\n')]
|
from __future__ import absolute_import, division, print_function
from dials.algorithms.refinement.parameterisation.model_parameters import (
Parameter,
ModelParameterisation,
)
import abc
from scitbx.array_family import flex
from dials_refinement_helpers_ext import GaussianSmoother as GS
# reusable PHIL string for options affecting scan-varying parameterisation
phil_str = """
smoother
.help = "Options that affect scan-varying parameterisation"
.expert_level = 1
{
interval_width_degrees = 36.0
.help = "Width of scan between checkpoints in degrees. Can be set to Auto."
.type = float(value_min=0.)
absolute_num_intervals = None
.help = "Number of intervals between checkpoints if scan_varying"
"refinement is requested. If set, this overrides"
"interval_width_degrees"
.type = int(value_min=1)
}
"""
class ScanVaryingParameterSet(Parameter):
"""Testing a class for a scan-varying parameter, in which values at rotation
angle phi may be derived using smoothed interpolation between checkpoint
values stored here. Externally, this is presented as a set of parameters.
num_samples is the number of checkpoints. Other arguments are as Parameter.
"""
def __init__(
self,
value,
num_samples=5,
axis=None,
ptype=None,
name="ScanVaryingParameterSet",
):
assert num_samples >= 2 # otherwise use scan-independent parameterisation
value = [value] * num_samples
self._name_stem = name
name = [
e + "_sample%d" % i for i, e in enumerate([self._name_stem] * num_samples)
]
Parameter.__init__(self, value, axis, ptype, name)
self._esd = [None] * num_samples
self._num_samples = num_samples
return
def __len__(self):
return self._num_samples
@property
def value(self):
return self._value
@value.setter
def value(self, val):
assert len(val) == len(self)
self._value = val
self._esd = [None] * len(self)
@property
def name_stem(self):
return self._name_stem
def __str__(self):
msg = "ScanVaryingParameterSet " + self.name_stem + ":\n"
try:
msg += " Type: " + self.param_type + "\n"
except TypeError:
msg += " Type: " + str(self.param_type) + "\n"
try:
msg += " Axis: (%5.3f, %5.3f, %5.3f)" % tuple(self.axis) + "\n"
except TypeError:
msg += " Axis: " + str(self.axis) + "\n"
vals = ", ".join(["%5.3f"] * len(self)) % tuple(self.value)
msg += " Values: " + vals + "\n"
try:
sigs = ", ".join(["%5.3f"] * len(self)) % tuple(self.esd)
except TypeError:
sigs = ", ".join([str(e) for e in self.esd])
msg += " Sigmas: (" + sigs + ") \n"
return msg
# wrap the C++ GaussianSmoother, modifying return values to emulate the
# old Python version.
class GaussianSmoother(GS):
"""A Gaussian smoother for ScanVaryingModelParameterisations"""
def value_weight(self, x, param):
result = super(GaussianSmoother, self).value_weight(x, flex.double(param.value))
return (result.get_value(), result.get_weight(), result.get_sumweight())
def multi_value_weight(self, x, param):
result = super(GaussianSmoother, self).multi_value_weight(
flex.double(x), flex.double(param.value)
)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def positions(self):
return list(super(GaussianSmoother, self).positions())
class ScanVaryingModelParameterisation(ModelParameterisation):
"""Extending ModelParameterisation to deal with ScanVaryingParameterSets.
For simplicity at this stage it is decreed that a
ScanVaryingModelParameterisation consists only of ScanVaryingParameterSets.
There is no combination with normal Parameters. This could be changed later,
but there may be no reason to do so, hence starting with this simpler
design"""
# The initial state is here equivalent to the initial state of the
# time static version of the parameterisation, as it is assumed that we
# start with a flat model wrt rotation angle.
__metaclass__ = abc.ABCMeta
def __init__(
self,
model,
initial_state,
param_sets,
smoother,
experiment_ids,
is_multi_state=False,
):
ModelParameterisation.__init__(
self, model, initial_state, param_sets, experiment_ids, is_multi_state
)
self._num_sets = len(self._param)
self._num_samples = len(param_sets[0])
self._total_len = self._num_samples * self._num_sets
# ensure all internal parameter sets have the same number of parameters
for param in self._param[1:]:
assert len(param) == self._num_samples
# Link up with an object that will perform the smoothing.
self._smoother = smoother
assert self._smoother.num_values() == self._num_samples
# define an attribute for caching the variance-covariance matrix of
# parameters
self._var_cov = None
return
def num_samples(self):
"""the number of samples of each parameter"""
return self._num_samples
def num_free(self):
"""the number of free parameters"""
if self._num_free is None:
self._num_free = (
sum(not x.get_fixed() for x in self._param) * self._num_samples
)
return self._num_free
# def num_total(self): inherited unchanged from ModelParameterisation
def num_sets(self):
"""the number of parameter sets"""
return self._num_sets
@abc.abstractmethod
def compose(self, t):
"""compose the model state at image number t from its initial state and
its parameter list. Also calculate the derivatives of the state wrt
each parameter in the list.
Unlike ModelParameterisation, does not automatically update the actual
model class. This should be done once refinement is complete."""
pass
def get_param_vals(self, only_free=True):
"""export the values of the internal list of parameters as a
sequence of floats.
If only_free, the values of fixed parameters are filtered from the
returned list. Otherwise all parameter values are returned"""
if only_free:
return [x for e in self._param if not e.get_fixed() for x in e.value]
else:
return [x for e in self._param for x in e.value]
def get_param_names(self, only_free=True):
"""export the names of the internal list of parameters
If only_free, the names of fixed parameters are filtered from the
returned list. Otherwise all parameter names are returned"""
# FIXME combine functionality with get_param_vals by returning a named,
# ordered list?
if only_free:
return [x for e in self._param if not e.get_fixed() for x in e.name]
else:
return [x for e in self._param for x in e.name]
def set_param_vals(self, vals):
"""set the values of the internal list of parameters from a
sequence of floats.
First break the sequence into sub sequences of the same length
as the _num_samples.
Only free parameter sets can have values assigned, therefore the
length of vals must equal the value of num_free"""
assert len(vals) == self.num_free()
i = 0
for p in self._param:
if not p.get_fixed(): # only set the free parameter sets
new_vals = vals[i : i + self._num_samples]
p.value = new_vals
i += self._num_samples
# compose with the new parameter values
# self.compose()
return
def set_param_esds(self, esds):
"""set the estimated standard deviations of the internal list of parameters
from a sequence of floats.
First break the sequence into sub sequences of the same length
as the _num_samples.
Only free parameters can be set, therefore the length of esds must equal
the value of num_free"""
assert len(esds) == self.num_free()
i = 0
for p in self._param:
if not p.get_fixed(): # only set the free parameter sets
new_esds = esds[i : i + self._num_samples]
p.esd = new_esds
i += self._num_samples
return
# def get_fixed(self): inherited unchanged from ModelParameterisation
# def set_fixed(self, fix): inherited unchanged from ModelParameterisation
# def get_state(self): inherited unchanged from ModelParameterisation
def get_ds_dp(self, only_free=True, use_none_as_null=False):
"""get a list of derivatives of the state wrt each parameter, as
a list in the same order as the internal list of parameters. Requires
compose to be called first at scan coordinate 't' so that each
scan-dependent parameter is evaluated at coordinate t, corresponding to
the original, unnormalised coordinates used to set up the smoother
(t will most likely be along the dimension of image number).
If only_free, the derivatives with respect to fixed parameters are
omitted from the returned list. Otherwise a list for all parameters is
returned, with null values for the fixed parameters.
The internal list of derivatives self._dstate_dp may use None for null
elements. By default these are converted to the null state, but
optionally these may remain None to detect them easier and avoid
doing calculations on null elements
"""
if use_none_as_null:
null = None
else:
null = self._null_state
if only_free:
return [
null if ds_dp is None else ds_dp
for row, p in zip(self._dstate_dp, self._param)
if not p.get_fixed()
for ds_dp in row
]
else:
return [
null if p.get_fixed() or ds_dp is None else ds_dp
for row, p in zip(self._dstate_dp, self._param)
for ds_dp in row
]
def get_smoothed_parameter_value(self, t, pset):
"""export the smoothed value of a parameter set at image number 't'
using the smoother."""
return self._smoother.value_weight(t, pset)[0]
def calculate_state_uncertainties(self, var_cov=None):
"""Given a variance-covariance array for the parameters of this model,
propagate those estimated errors into the uncertainties of the model state
at every scan point"""
if var_cov is not None:
# first call, just cache the variance-covariance matrix
self._var_cov = var_cov
return None
# later calls, make sure it has been cached! Otherwise ESDs cannot be
# calculated, so return None
if self._var_cov is None:
return None
# later calls, assumes compose has been called at image number t, so that
# get_ds_dp will be specific for that image. Now call the base class method
# and return the result
return super(
ScanVaryingModelParameterisation, self
).calculate_state_uncertainties(self._var_cov)
def set_state_uncertainties(self, var_cov_list):
"""Send the calculated variance-covariance matrices for model state elements
for all scan points back to the model for storage alongside the model state
"""
pass
|
[
"dials.algorithms.refinement.parameterisation.model_parameters.Parameter.__init__",
"dials.algorithms.refinement.parameterisation.model_parameters.ModelParameterisation.__init__",
"scitbx.array_family.flex.double"
] |
[((1665, 1715), 'dials.algorithms.refinement.parameterisation.model_parameters.Parameter.__init__', 'Parameter.__init__', (['self', 'value', 'axis', 'ptype', 'name'], {}), '(self, value, axis, ptype, name)\n', (1683, 1715), False, 'from dials.algorithms.refinement.parameterisation.model_parameters import Parameter, ModelParameterisation\n'), ((4523, 4629), 'dials.algorithms.refinement.parameterisation.model_parameters.ModelParameterisation.__init__', 'ModelParameterisation.__init__', (['self', 'model', 'initial_state', 'param_sets', 'experiment_ids', 'is_multi_state'], {}), '(self, model, initial_state, param_sets,\n experiment_ids, is_multi_state)\n', (4553, 4629), False, 'from dials.algorithms.refinement.parameterisation.model_parameters import Parameter, ModelParameterisation\n'), ((3214, 3238), 'scitbx.array_family.flex.double', 'flex.double', (['param.value'], {}), '(param.value)\n', (3225, 3238), False, 'from scitbx.array_family import flex\n'), ((3445, 3459), 'scitbx.array_family.flex.double', 'flex.double', (['x'], {}), '(x)\n', (3456, 3459), False, 'from scitbx.array_family import flex\n'), ((3461, 3485), 'scitbx.array_family.flex.double', 'flex.double', (['param.value'], {}), '(param.value)\n', (3472, 3485), False, 'from scitbx.array_family import flex\n')]
|
import json
import zipfile
import importlib
from functools import partial
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
import torchero
from torchero.utils.mixins import DeviceMixin
from torchero import meters
from torchero import SupervisedTrainer
class InputDataset(Dataset):
""" Simple Dataset wrapper
to transform input before giving it
to the dataloader
"""
def __init__(self, ds, transform):
self.ds = ds
self.transform = transform
def __getitem__(self, idx):
return self.transform(self.ds[idx])
def __len__(self):
return len(self.ds)
class ModelImportException(Exception):
pass
class ModelNotCompiled(Exception):
pass
class PredictionItem(object):
def __init__(self, preds):
self._preds = preds
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
repr(self._preds))
@property
def tensor(self):
return self._preds
def numpy(self):
return self._preds.cpu().numpy()
class PredictionsResult(object):
def __init__(self, preds, pred_class=PredictionItem):
self._preds = [
pred_class(pred) for pred in preds
]
@property
def tensor(self):
return torch.stack([pred.tensor for pred in self._preds])
def numpy(self):
return np.stack([pred.numpy() for pred in self._preds])
def __iter__(self):
return iter(self._preds)
def __len__(self):
return len(self._preds)
def __getitem__(self, idx):
return self._preds[idx]
def __repr__(self):
list_format = []
for pred in self._preds[:10]:
list_format.append(repr(pred))
if len(self._preds) > 10:
list_format.append('...')
format_string = '{}([{}])'.format(self.__class__.__name__,
'\n,'.join(list_format))
return format_string
class Model(DeviceMixin):
""" Model Class for Binary Classification (single or multilabel) tasks
"""
""" Model class that wrap nn.Module models to add
training, prediction, saving & loading capabilities
"""
@classmethod
def load_from_file(_, path_or_fp, net=None):
""" Load a saved model from disk an convert it to the desired type (ImageModel, TextModel, etc)
Arguments:
net (nn.Module): Neural network initialized in the same way as the saved one.
path_or_fp (file-like or str): Path to saved model
"""
with zipfile.ZipFile(path_or_fp, mode='r') as zip_fp:
with zip_fp.open('config.json', 'r') as fp:
config = json.loads(fp.read().decode('utf-8'))
model_type = config['torchero_model_type']
module = importlib.import_module(model_type['module'])
model_type = getattr(module, model_type['type'])
if net is None:
if 'net' not in config:
raise ModelImportException("Invalid network configuration json (Expected 'net' key)")
net_type = config['net']['type']
net_module = importlib.import_module(net_type['module'])
net_type = getattr(net_module, net_type['type'])
if 'config' not in config['net']:
raise ModelImportException("Network configuration not found in config.json ('net.config'). Create function passing an already initialized network")
if hasattr(net_type, 'from_config') and 'config' in config['net']:
net = net_type.from_config(config['net']['config'])
model = model_type(net)
model.load(path_or_fp)
return model
def __init__(self, model):
""" Constructor
Arguments:
model (nn.Module): Model to be wrapped
"""
super(Model, self).__init__()
self.model = model
self._trainer = None
def pred_class(self, preds):
return PredictionsResult(preds)
@property
def trainer(self):
if self._trainer is None:
raise ModelNotCompiled("Model hasn't been compiled with any trainer. Use model.compile first")
return self._trainer
def compile(self, optimizer, loss, metrics, hparams={}, callbacks=[], val_metrics=None):
""" Compile this model with a optimizer a loss and set of given metrics
Arguments:
optimizer (str or instance of torch.optim.Optimizer): Optimizer to train the model
loss (str or instance of torch.nn.Module): Loss (criterion) to be minimized
metrics (list or dict of `torchero.meters.BaseMeter`, optional): A list of metrics
or dictionary of metrics names and meters to record for training set
hparams (list or dict of `torchero.meters.BaseMeter`, optional): A list of meters
or dictionary of metrics names and hyperparameters to record
val_metrics (list or dict of `torchero.meters.BaseMeter`, optional): Same as metrics argument
for only used for validation set. If None it uses the same metrics as `metrics` argument.
callbacks (list of `torchero.callbacks.Callback`): List of callbacks to use in trainings
"""
self._trainer = SupervisedTrainer(model=self.model,
criterion=loss,
optimizer=optimizer,
callbacks=callbacks,
acc_meters=metrics,
val_acc_meters=val_metrics,
hparams=hparams)
self._trainer.to(self.device)
return self
def input_to_tensor(self, *X):
""" Converts inputs to tensors
"""
return X
def _predict_batch(self, *X):
""" Generate output predictions for the input tensors
This method can be called with a single input or multiple (If the model has multiple inputs)
This method is not intended to be used directly. Use predict instead
"""
self.model.train(False)
with torch.no_grad():
# Converts each input tensor to the given device
X = list(map(self._convert_tensor, X))
return self.model(*X)
@property
def callbacks(self):
return self.trainer.callbacks
@property
def optimizer(self):
return self.trainer.optimizer
@optimizer.setter
def optimizer(Self, optimizer):
self.trainer.optimizer = optimizer
@property
def hparams(self):
return dict(self.trainer.hparams)
@property
def history(self):
return self.trainer.history
@property
def loss(self):
return self.trainer.criterion
@loss.setter
def loss(self, loss):
self.trainer.criterion = loss
def total_parameters(self):
""" Returns the total number of parameters
"""
parameters = self.model.parameters()
parameters = map(lambda p: p.numel(), parameters)
return sum(parameters)
def total_trainable_parameters(self):
""" Returns the total number of trainable parameters
"""
parameters = self.model.parameters()
parameters = filter(lambda p: p.requires_grad, parameters)
parameters = map(lambda p: p.numel(), parameters)
return sum(parameters)
def to(self, device):
""" Moves the model to the given device
Arguments:
device (str or torch.device)
"""
super(Model, self).to(device)
try:
self.trainer.to(device)
except ModelNotCompiled:
pass
def _combine_preds(self, preds):
""" Combines the list of predictions in a single tensor
"""
preds = torch.stack(preds)
return self.pred_class(preds)
def predict_on_dataloader(self, dl, has_targets=True):
""" Generate output predictions on an dataloader
Arguments:
dl (`torch.utils.data.DataLoader`): input DataLoader
has_targets (`torch.utils.DataLoader`): Omit target
Notes:
* The dataloader batches should yield `torch.Tensor`'s
"""
preds = []
for X in dl:
if has_targets:
X, _ = X
if isinstance(X, tuple):
y = self._predict_batch(*X)
else:
y = self._predict_batch(X)
preds.extend(y)
preds = self._combine_preds(preds)
return preds
def predict(self,
ds,
batch_size=None,
to_tensor=True,
has_targets=False,
num_workers=0,
pin_memory=False,
prefetch_factor=2):
""" Generate output predictions
Arguments:
ds (* `torch.utils.data.Dataset`
* `torch.utils.data.DataLoader`
* `list`
* `np.array`): Input samples
batch_size (int or None): Number of samples per batch. If None is
passed it will default to 32.
to_tensor (bool): Set this to True to convert inputs to tensors first (default behaviour)
has_targets (bool): Whether to omit samples that already contains targets
num_workers (int, optional): Number of subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
pin_memory (bool): If True, the data loader will copy Tensors into
CUDA pinned memory before returning them. If your data elements are
a custom type, or your collate_fn returns a batch that is a custom
type, see the example below.
prefetch_factor (int, optional):
Number of samples loaded in advance by each worker. 2 means
there will be a total of 2 * num_workers samples prefetched
across all workers.
"""
dl = self._get_dataloader(ds,
shuffle=False,
batch_size=batch_size,
shallow_dl=to_tensor,
num_workers=num_workers,
pin_memory=pin_memory,
prefetch_factor=prefetch_factor)
return self.predict_on_dataloader(dl, has_targets=has_targets)
def train_on_dataloader(self, train_dl, val_dl=None, epochs=1):
""" Trains the model for a fixed number of epochs
Arguments:
train_ds (`torch.utils.data.DataLoader`): Train dataloader
val_ds (`torch.utils.data.Dataset`): Test dataloader
epochs (int): Number of epochs to train the model
"""
self.trainer.train(dataloader=train_dl,
valid_dataloader=val_dl,
epochs=epochs)
return self.trainer.history
def load_checkpoint(self, checkpoint=None):
self.trainer.load_checkpoint(checkpoint=None)
def evaluate_on_dataloader(self,
dataloader,
metrics=None):
""" Evaluate metrics on a given dataloader
Arguments:
dataloader (`torch.utils.data.DataLoader`): Input Dataloader
metrics (list of mapping, optional): Metrics to evaluate. If None is passed
it will used the same defined at compile step
"""
return self.trainer.evaluate(dataloader=dataloader,
metrics=metrics)
def _create_dataloader(self, *args, **kwargs):
return DataLoader(*args, **kwargs)
def _get_dataloader(self,
ds,
batch_size=None,
shallow_dl=False,
**dl_kwargs):
if isinstance(ds, (Dataset, list)):
dl = self._create_dataloader(InputDataset(ds, self.input_to_tensor) if shallow_dl else ds,
batch_size=batch_size or 32,
**dl_kwargs)
elif isinstance(ds, DataLoader):
dl = ds
else:
raise TypeError("ds type not supported. Use Dataloader or Dataset instances")
return dl
def evaluate(self,
ds,
metrics=None,
batch_size=None,
collate_fn=None,
sampler=None,
num_workers=0,
pin_memory=False,
prefetch_factor=2):
""" Evaluate metrics
Arguments:
ds (* `torch.utils.data.Dataset`
* `torch.utils.data.DataLoader`
* `list`
* `np.array`): Input data
metrics (list of mapping, optional): Metrics to evaluate. If None is passed
it will used the same defined at compile step
batch_size (int or None): Number of samples per batch. If None is
passed it will default to 32. Only relevant for non dataloader data
collate_fn (callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset. See `torch.utils.data.DataLoader`
sampler (Sampler or Iterable, optional): Defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented. If specified, :attr:`shuffle` must not be specified.
See ``torch.utisl.data.DataLoader``
num_workers (int, optional): Number of subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
pin_memory (bool): If True, the data loader will copy Tensors into
CUDA pinned memory before returning them. If your data elements are
a custom type, or your collate_fn returns a batch that is a custom
type, see the example below.
prefetch_factor (int, optional):
Number of samples loaded in advance by each worker. 2 means
there will be a total of 2 * num_workers samples prefetched
across all workers.
"""
dl = self._get_dataloader(ds,
batch_size=batch_size,
shuffle=False,
collate_fn=collate_fn,
sampler=sampler,
num_workers=num_workers,
pin_memory=pin_memory,
prefetch_factor=prefetch_factor)
return self.evaluate_on_dataloader(dl, metrics=metrics)
def fit(self,
train_ds,
val_ds=None,
epochs=1,
batch_size=None,
shuffle=True,
collate_fn=None,
sampler=None,
num_workers=0,
val_num_workers=None,
pin_memory=False,
val_pin_memory=False,
prefetch_factor=2,
val_prefetch_factor=None):
""" Trains the model for a fixed number of epochs
Arguments:
train_ds (* `torch.utils.data.Dataset`
* `torch.utils.data.DataLoader`
* `list`
* `np.array`): Train data
val_ds (* `torch.utils.data.Dataset`
* `torch.utils.data.DataLoader`
* `list`
* `np.array`): Validation data
batch_size (int or None): Number of samples per batch. If None is
passed it will default to 32. Only relevant for non dataloader data
epochs (int): Number of epochs to train the model
shuffle (bool): Set to ``True``to shuffle train dataset before every epoch. Only for
non dataloader train data.
collate_fn (callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset. See `torch.utils.data.DataLoader`
sampler (Sampler or Iterable, optional): Defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented. If specified, :attr:`shuffle` must not be specified.
See ``torch.utisl.data.DataLoader``
num_workers (int, optional): Number of subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
val_num_workers (int, optional): Same as num_workers but for the validation dataset.
If not passed num_workers argument will be used
pin_memory (bool): If True, the data loader will copy Tensors into
CUDA pinned memory before returning them. If your data elements are
a custom type, or your collate_fn returns a batch that is a custom
type, see the example below.
val_pin_memory (bool): Same as pin_memory but for the validation dataset.
If not passed pin_memory argument will be used
prefetch_factor (int, optional):
Number of samples loaded in advance by each worker. 2 means
there will be a total of 2 * num_workers samples prefetched
across all workers.
val_prefetch_factor (int, optional): Same as prefetch_factor but for the validation dataset.
If not passed prefetch_factor argument will be used
"""
train_dl = self._get_dataloader(train_ds,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=collate_fn,
sampler=sampler,
num_workers=num_workers,
pin_memory=pin_memory,
prefetch_factor=prefetch_factor)
if val_ds is None:
val_dl = None
else:
val_dl = self._get_dataloader(val_ds,
batch_size=batch_size,
shuffle=False,
collate_fn=collate_fn,
sampler=sampler,
num_workers=val_num_workers or num_workers,
pin_memory=val_pin_memory or pin_memory,
prefetch_factor=val_prefetch_factor or prefetch_factor)
return self.train_on_dataloader(train_dl,
val_dl,
epochs)
@property
def config(self):
config = {
'torchero_version': torchero.__version__,
'torchero_model_type': {'module': self.__class__.__module__,
'type': self.__class__.__name__},
'compiled': self._trainer is not None,
}
if hasattr(self.model, 'config'):
config.update({'net': {
'type': {'module': self.model.__class__.__module__,
'type': self.model.__class__.__name__},
'config': self.model.config
}})
return config
def init_from_config(self, config):
pass
def save(self, path_or_fp):
self.model.eval()
with zipfile.ZipFile(path_or_fp, mode='w') as zip_fp:
self._save_to_zip(zip_fp)
def _save_to_zip(self, zip_fp):
with zip_fp.open('model.pth', 'w') as fp:
torch.save(self.model.state_dict(), fp)
with zip_fp.open('config.json', 'w') as fp:
fp.write(json.dumps(self.config, indent=4).encode())
try:
self.trainer._save_to_zip(zip_fp, prefix='trainer/')
except ModelNotCompiled:
pass
def load(self, path_or_fp):
with zipfile.ZipFile(path_or_fp, mode='r') as zip_fp:
self._load_from_zip(zip_fp)
def _load_from_zip(self, zip_fp):
with zip_fp.open('model.pth', 'r') as fp:
self.model.load_state_dict(torch.load(fp))
with zip_fp.open('config.json', 'r') as config_fp:
config = json.loads(config_fp.read().decode())
if config['compiled'] is True:
self._trainer = SupervisedTrainer(model=self.model,
criterion=None,
optimizer=None)
self._trainer._load_from_zip(zip_fp, prefix='trainer/')
self.init_from_config(config)
class UnamedClassificationPredictionItem(PredictionItem):
""" Model Prediction with classes names
"""
def __init__(self, preds):
super(UnamedClassificationPredictionItem, self).__init__(preds)
if self._preds.ndim == 0:
self._preds = self._preds.unsqueeze(-1)
def as_dict(self):
return dict(enumerate(self._preds.tolist()))
def max(self):
return self._preds.max().item()
def argmax(self):
return self._preds.argmax().item()
def topk(self, k):
values, indices = self._preds.topk(k)
return list(zip(indices.tolist(), values.tolist()))
def as_tuple(self):
return tuple(self._preds.tolist())
def __repr__(self):
return repr(self.as_tuple())
class NamedClassificationPredictionItem(PredictionItem):
""" Model Prediction with classes names
"""
def __init__(self, preds, names=None):
super(NamedClassificationPredictionItem, self).__init__(preds)
self.names = names
if self._preds.ndim == 0:
self._preds = self._preds.unsqueeze(-1)
def max(self):
return self._preds.max().item()
def argmax(self):
return self.names[self._preds.argmax().item()]
def topk(self, k):
values, indices = self._preds.topk(k)
names = map(self.names.__getitem__, indices.tolist())
return list(zip(names, values.tolist()))
def as_dict(self):
return dict(zip(self.names, self._preds.tolist()))
def as_tuple(self):
return tuple(pred.tolist())
def __repr__(self):
return repr(self.as_dict())
class ClassificationPredictionsResult(PredictionsResult):
""" List of model classification predictions
"""
def __init__(self, preds, names=None):
self.names = names
if self.names is None:
pred_class = UnamedClassificationPredictionItem
else:
pred_class = partial(NamedClassificationPredictionItem, names=self.names)
super(ClassificationPredictionsResult, self).__init__(preds, pred_class=pred_class)
def as_dict(self):
return [pred.as_dict() for pred in self._preds]
def as_tuple(self):
return [pred.as_tuple() for pred in self._preds]
def max(self):
return [pred.max() for pred in self._preds]
def argmax(self):
return [pred.argmax() for pred in self._preds]
def topk(self, k):
return [pred.topk(k) for pred in self._preds]
def as_df(self):
import pandas as pd
return pd.DataFrame.from_records(self.as_dict())
class BinaryClassificationModel(Model):
def __init__(self, model, use_logits=True, threshold=0.5, labels=None):
""" Constructor
Arguments:
model (nn.Module): Model to be wrapped
use_logits (bool): Set this as `True` if your model does **not**
contain sigmoid as activation in the final layer (preferable)
or 'False' otherwise
threshold (float): Threshold used for metrics and predictions to determine if a prediction is true
"""
super(BinaryClassificationModel, self).__init__(model)
self.use_logits = use_logits
self.threshold = threshold
self.labels = labels
@property
def config(self):
config = super(BinaryClassificationModel, self).config
config['labels'] = self.labels
return config
def init_from_config(self, config):
super(BinaryClassificationModel, self).init_from_config(config)
self.labels = config['labels']
def compile(self, optimizer, loss=None, metrics=None, hparams={}, callbacks=[], val_metrics=None):
""" Compile this model with a optimizer a loss and set of given metrics
Arguments:
optimizer (str or instance of torch.optim.Optimizer): Optimizer to train the model
loss (str or instance of torch.nn.Module, optional): Loss (criterion) to be minimized.
By default 'binary_cross_entropy_wl' (logits are already calculated on the loss)
if use_entropy else 'binary_cross_entropy' (logits are not calculated on the loss)
metrics (list or dict of `torchero.meters.BaseMeter`): A list of metrics
or dictionary of metrics names and meters to record for training set.
By default ['accuracy', 'balanced_accuracy']
hparams (list or dict of `torchero.meters.BaseMeter`, optional): A list of meters
or dictionary of metrics names and hyperparameters to record
val_metrics (list or dict of `torchero.meters.BaseMeter`, optional): Same as metrics argument
for only used for validation set. If None it uses the same metrics as `metrics` argument.
callbacks (list of `torchero.callbacks.Callback`): List of callbacks to use in trainings
"""
if loss is None:
loss = 'binary_cross_entropy_wl' if self.use_logits else 'binary_cross_entropy'
if metrics is None:
metrics = ([meters.BinaryWithLogitsAccuracy(threshold=self.threshold),
meters.Recall(threshold=self.threshold, with_logits=True),
meters.Precision(threshold=self.threshold, with_logits=True),
meters.F1Score(threshold=self.threshold, with_logits=True)]
if self.use_logits else
[meters.BinaryAccuracy(threshold=self.threshold),
meters.Recall(threshold=self.threshold, with_logits=False),
meters.Precision(threshold=self.threshold, with_logits=False),
meters.F1Score(threshold=self.threshold, with_logits=False)])
return super(BinaryClassificationModel, self).compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
hparams=hparams,
callbacks=callbacks,
val_metrics=val_metrics)
def pred_class(self, preds):
return ClassificationPredictionsResult(preds, names=self.labels)
def classification_report(self,
ds,
batch_size=None,
collate_fn=None,
sampler=None):
clf_report = meters.binary_scores.BinaryClassificationReport(threshold=self.threshold,
with_logits=self.use_logits,
names=self.labels)
metrics = self.evaluate(ds,
metrics={'clf': clf_report},
batch_size=batch_size,
collate_fn=collate_fn,
sampler=sampler)
return metrics['clf']
def _predict_batch(self, *X, output_probas=True):
preds = super(BinaryClassificationModel, self)._predict_batch(*X)
if self.use_logits:
preds = torch.sigmoid(preds)
if not output_probas:
preds = preds > self.threshold
return preds
class ClassificationModel(Model):
""" Model Class for Classification (for categorical targets) tasks
"""
def __init__(self, model, use_softmax=True, classes=None):
""" Constructor
Arguments:
model (nn.Module): Model to be wrapped
use_softmax (bool): Set this as `True` if your model does **not**
contain softmax as activation in the final layer (preferable)
or 'False' otherwise
"""
super(ClassificationModel, self).__init__(model)
self.use_softmax = use_softmax
self.classes = classes
@property
def config(self):
config = super(ClassificationModel, self).config
config['classes'] = self.classes
return config
def init_from_config(self, config):
super(ClassificationModel, self).init_from_config(config)
self.classes = config['classes']
def compile(self, optimizer, loss=None, metrics=None, hparams={}, callbacks=[], val_metrics=None):
""" Compile this model with a optimizer a loss and set of given metrics
Arguments:
optimizer (str or instance of torch.optim.Optimizer): Optimizer to train the model
loss (str or instance of torch.nn.Module, optional): Loss (criterion) to be minimized.
By default 'cross_entropy' if use_entropy else 'nll'
metrics (list or dict of `torchero.meters.BaseMeter`): A list of metrics
or dictionary of metrics names and meters to record for training set.
By default ['accuracy', 'balanced_accuracy']
hparams (list or dict of `torchero.meters.BaseMeter`, optional): A list of meters
or dictionary of metrics names and hyperparameters to record
val_metrics (list or dict of `torchero.meters.BaseMeter`, optional): Same as metrics argument
for only used for validation set. If None it uses the same metrics as `metrics` argument.
callbacks (list of `torchero.callbacks.Callback`): List of callbacks to use in trainings
"""
if loss is None:
loss = 'cross_entropy' if self.use_softmax else 'nll'
if metrics is None:
metrics = [meters.CategoricalAccuracy(), meters.BalancedAccuracy()]
return super(ClassificationModel, self).compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
hparams=hparams,
callbacks=callbacks,
val_metrics=val_metrics)
def pred_class(self, preds):
return ClassificationPredictionsResult(preds, names=self.classes)
def _predict_batch(self, *X):
preds = super(ClassificationModel, self)._predict_batch(*X)
if self.use_softmax:
preds = torch.softmax(preds, dim=-1)
return preds
class RegressionModel(Model):
""" Model Class for regression tasks
"""
def compile(self, optimizer, loss='mse', metrics=None, hparams={}, callbacks=[], val_metrics=None):
""" Compile this model with a optimizer a loss and set of given metrics
Arguments:
optimizer (str or instance of torch.optim.Optimizer): Optimizer to train the model
loss (str or instance of torch.nn.Module, optional): Loss (criterion) to be minimized. Default: 'mse'
metrics (list or dict of `torchero.meters.BaseMeter`): A list of metrics
or dictionary of metrics names and meters to record for training set.
By default RMSE
hparams (list or dict of `torchero.meters.BaseMeter`, optional): A list of meters
or dictionary of metrics names and hyperparameters to record
val_metrics (list or dict of `torchero.meters.BaseMeter`, optional): Same as metrics argument
for only used for validation set. If None it uses the same metrics as `metrics` argument.
callbacks (list of `torchero.callbacks.Callback`): List of callbacks to use in trainings
"""
if metrics is None:
metrics = [meters.RMSE()]
return super(RegressionModel, self).compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
hparams=hparams,
callbacks=callbacks,
val_metrics=val_metrics)
def load_model_from_file(path_or_fp, net=None):
return Model.load_from_file(path_or_fp, net)
|
[
"json.dumps",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torchero.meters.Precision",
"torchero.meters.RMSE",
"torch.load",
"torchero.meters.BalancedAccuracy",
"torch.softmax",
"torchero.meters.Recall",
"torchero.SupervisedTrainer",
"torchero.meters.F1Score",
"torchero.meters.CategoricalAccuracy",
"torchero.meters.binary_scores.BinaryClassificationReport",
"functools.partial",
"importlib.import_module",
"torchero.meters.BinaryWithLogitsAccuracy",
"zipfile.ZipFile",
"torch.stack",
"torchero.meters.BinaryAccuracy",
"torch.sigmoid"
] |
[((1330, 1380), 'torch.stack', 'torch.stack', (['[pred.tensor for pred in self._preds]'], {}), '([pred.tensor for pred in self._preds])\n', (1341, 1380), False, 'import torch\n'), ((2845, 2890), 'importlib.import_module', 'importlib.import_module', (["model_type['module']"], {}), "(model_type['module'])\n", (2868, 2890), False, 'import importlib\n'), ((5318, 5484), 'torchero.SupervisedTrainer', 'SupervisedTrainer', ([], {'model': 'self.model', 'criterion': 'loss', 'optimizer': 'optimizer', 'callbacks': 'callbacks', 'acc_meters': 'metrics', 'val_acc_meters': 'val_metrics', 'hparams': 'hparams'}), '(model=self.model, criterion=loss, optimizer=optimizer,\n callbacks=callbacks, acc_meters=metrics, val_acc_meters=val_metrics,\n hparams=hparams)\n', (5335, 5484), False, 'from torchero import SupervisedTrainer\n'), ((7911, 7929), 'torch.stack', 'torch.stack', (['preds'], {}), '(preds)\n', (7922, 7929), False, 'import torch\n'), ((11826, 11853), 'torch.utils.data.DataLoader', 'DataLoader', (['*args'], {}), '(*args, **kwargs)\n', (11836, 11853), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((27684, 27809), 'torchero.meters.binary_scores.BinaryClassificationReport', 'meters.binary_scores.BinaryClassificationReport', ([], {'threshold': 'self.threshold', 'with_logits': 'self.use_logits', 'names': 'self.labels'}), '(threshold=self.threshold,\n with_logits=self.use_logits, names=self.labels)\n', (27731, 27809), False, 'from torchero import meters\n'), ((2609, 2646), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_or_fp'], {'mode': '"""r"""'}), "(path_or_fp, mode='r')\n", (2624, 2646), False, 'import zipfile\n'), ((3180, 3223), 'importlib.import_module', 'importlib.import_module', (["net_type['module']"], {}), "(net_type['module'])\n", (3203, 3223), False, 'import importlib\n'), ((6219, 6234), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6232, 6234), False, 'import torch\n'), ((19889, 19926), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_or_fp'], {'mode': '"""w"""'}), "(path_or_fp, mode='w')\n", (19904, 19926), False, 'import zipfile\n'), ((20406, 20443), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_or_fp'], {'mode': '"""r"""'}), "(path_or_fp, mode='r')\n", (20421, 20443), False, 'import zipfile\n'), ((20824, 20891), 'torchero.SupervisedTrainer', 'SupervisedTrainer', ([], {'model': 'self.model', 'criterion': 'None', 'optimizer': 'None'}), '(model=self.model, criterion=None, optimizer=None)\n', (20841, 20891), False, 'from torchero import SupervisedTrainer\n'), ((23028, 23088), 'functools.partial', 'partial', (['NamedClassificationPredictionItem'], {'names': 'self.names'}), '(NamedClassificationPredictionItem, names=self.names)\n', (23035, 23088), False, 'from functools import partial\n'), ((28407, 28427), 'torch.sigmoid', 'torch.sigmoid', (['preds'], {}), '(preds)\n', (28420, 28427), False, 'import torch\n'), ((31532, 31560), 'torch.softmax', 'torch.softmax', (['preds'], {'dim': '(-1)'}), '(preds, dim=-1)\n', (31545, 31560), False, 'import torch\n'), ((20623, 20637), 'torch.load', 'torch.load', (['fp'], {}), '(fp)\n', (20633, 20637), False, 'import torch\n'), ((30768, 30796), 'torchero.meters.CategoricalAccuracy', 'meters.CategoricalAccuracy', ([], {}), '()\n', (30794, 30796), False, 'from torchero import meters\n'), ((30798, 30823), 'torchero.meters.BalancedAccuracy', 'meters.BalancedAccuracy', ([], {}), '()\n', (30821, 30823), False, 'from torchero import meters\n'), ((32825, 32838), 'torchero.meters.RMSE', 'meters.RMSE', ([], {}), '()\n', (32836, 32838), False, 'from torchero import meters\n'), ((26173, 26230), 'torchero.meters.BinaryWithLogitsAccuracy', 'meters.BinaryWithLogitsAccuracy', ([], {'threshold': 'self.threshold'}), '(threshold=self.threshold)\n', (26204, 26230), False, 'from torchero import meters\n'), ((26256, 26313), 'torchero.meters.Recall', 'meters.Recall', ([], {'threshold': 'self.threshold', 'with_logits': '(True)'}), '(threshold=self.threshold, with_logits=True)\n', (26269, 26313), False, 'from torchero import meters\n'), ((26339, 26399), 'torchero.meters.Precision', 'meters.Precision', ([], {'threshold': 'self.threshold', 'with_logits': '(True)'}), '(threshold=self.threshold, with_logits=True)\n', (26355, 26399), False, 'from torchero import meters\n'), ((26425, 26483), 'torchero.meters.F1Score', 'meters.F1Score', ([], {'threshold': 'self.threshold', 'with_logits': '(True)'}), '(threshold=self.threshold, with_logits=True)\n', (26439, 26483), False, 'from torchero import meters\n'), ((26556, 26603), 'torchero.meters.BinaryAccuracy', 'meters.BinaryAccuracy', ([], {'threshold': 'self.threshold'}), '(threshold=self.threshold)\n', (26577, 26603), False, 'from torchero import meters\n'), ((26629, 26687), 'torchero.meters.Recall', 'meters.Recall', ([], {'threshold': 'self.threshold', 'with_logits': '(False)'}), '(threshold=self.threshold, with_logits=False)\n', (26642, 26687), False, 'from torchero import meters\n'), ((26713, 26774), 'torchero.meters.Precision', 'meters.Precision', ([], {'threshold': 'self.threshold', 'with_logits': '(False)'}), '(threshold=self.threshold, with_logits=False)\n', (26729, 26774), False, 'from torchero import meters\n'), ((26800, 26859), 'torchero.meters.F1Score', 'meters.F1Score', ([], {'threshold': 'self.threshold', 'with_logits': '(False)'}), '(threshold=self.threshold, with_logits=False)\n', (26814, 26859), False, 'from torchero import meters\n'), ((20188, 20221), 'json.dumps', 'json.dumps', (['self.config'], {'indent': '(4)'}), '(self.config, indent=4)\n', (20198, 20221), False, 'import json\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import torch
from itertools import permutations
def loss_calc(est, ref, loss_type):
"""
time-domain loss: sisdr
"""
# time domain (wav input)
if loss_type == "sisdr":
loss = batch_SDR_torch(est, ref)
if loss_type == "mse":
loss = batch_mse_torch(est, ref)
if loss_type == "log_mse":
loss = batch_log_mse_torch(est, ref)
return loss
def calc_sdr_torch(estimation, origin, mask=None):
"""
batch-wise SDR caculation for one audio file on pytorch Variables.
estimation: (batch, nsample)
origin: (batch, nsample)
mask: optional, (batch, nsample), binary
"""
if mask is not None:
origin = origin * mask
estimation = estimation * mask
origin_power = torch.pow(origin, 2).sum(1, keepdim=True) + 1e-8 # (batch, 1)
scale = torch.sum(origin*estimation, 1, keepdim=True) / origin_power # (batch, 1)
est_true = scale * origin # (batch, nsample)
est_res = estimation - est_true # (batch, nsample)
true_power = torch.pow(est_true, 2).sum(1)
res_power = torch.pow(est_res, 2).sum(1)
return 10*torch.log10(true_power) - 10*torch.log10(res_power) # (batch, 1)
def batch_SDR_torch(estimation, origin, mask=None):
"""
batch-wise SDR caculation for multiple audio files.
estimation: (batch, nsource, nsample)
origin: (batch, nsource, nsample)
mask: optional, (batch, nsample), binary
"""
batch_size_est, nsource_est, nsample_est = estimation.size()
batch_size_ori, nsource_ori, nsample_ori = origin.size()
assert batch_size_est == batch_size_ori, "Estimation and original sources should have same shape."
assert nsource_est == nsource_ori, "Estimation and original sources should have same shape."
assert nsample_est == nsample_ori, "Estimation and original sources should have same shape."
assert nsource_est < nsample_est, "Axis 1 should be the number of sources, and axis 2 should be the signal."
batch_size = batch_size_est
nsource = nsource_est
nsample = nsample_est
# zero mean signals
estimation = estimation - torch.mean(estimation, 2, keepdim=True).expand_as(estimation)
origin = origin - torch.mean(origin, 2, keepdim=True).expand_as(estimation)
# possible permutations
perm = list(set(permutations(np.arange(nsource))))
# pair-wise SDR
SDR = torch.zeros((batch_size, nsource, nsource)).type(estimation.type())
for i in range(nsource):
for j in range(nsource):
SDR[:,i,j] = calc_sdr_torch(estimation[:,i], origin[:,j], mask)
# choose the best permutation
SDR_max = []
SDR_perm = []
for permute in perm:
sdr = []
for idx in range(len(permute)):
sdr.append(SDR[:,idx,permute[idx]].view(batch_size,-1))
sdr = torch.sum(torch.cat(sdr, 1), 1)
SDR_perm.append(sdr.view(batch_size, 1))
SDR_perm = torch.cat(SDR_perm, 1)
SDR_max, _ = torch.max(SDR_perm, dim=1)
return - SDR_max / nsource
# def calc_mse_torch(estimation, origin):
# return torch.mean(torch.pow(estimation-origin,2),1).mean(1)
def batch_mse_torch(estimation, origin):
"""
batch-wise mse caculation for multiple audio files.
estimation: (batch, nsource, frames, freq_bins)
origin: (batch, nsource, frames, freq_bins)
nsource = 2
"""
mse1 = torch.sqrt(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])
mse2 = torch.sqrt(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])
return torch.stack((mse1, mse2),1).min(1)[0]
def batch_log_mse_torch(estimation, origin):
"""
batch-wise mse caculation for multiple audio files.
estimation: (batch, nsource, frames, freq_bins)
origin: (batch, nsource, frames, freq_bins)
nsource = 2
"""
# eps = 1e-20
# mse1 = torch.log10(torch.sqrt(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])+eps)
# mse2 = torch.log10(torch.sqrt(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])+eps)
mse1 = torch.log10(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])
mse2 = torch.log10(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])
return torch.stack((mse1, mse2),1).min(1)[0]
if __name__ == "__main__":
est = torch.rand(10, 2, 32, 1000)
ref = torch.rand(10, 2, 32, 1000)
out = loss_calc(est, ref, "mse")
print(out.shape)
print(out)
|
[
"torch.mean",
"torch.stack",
"torch.cat",
"torch.log10",
"torch.pow",
"torch.max",
"numpy.arange",
"torch.rand",
"torch.zeros",
"torch.sum"
] |
[((3031, 3053), 'torch.cat', 'torch.cat', (['SDR_perm', '(1)'], {}), '(SDR_perm, 1)\n', (3040, 3053), False, 'import torch\n'), ((3071, 3097), 'torch.max', 'torch.max', (['SDR_perm'], {'dim': '(1)'}), '(SDR_perm, dim=1)\n', (3080, 3097), False, 'import torch\n'), ((4405, 4432), 'torch.rand', 'torch.rand', (['(10)', '(2)', '(32)', '(1000)'], {}), '(10, 2, 32, 1000)\n', (4415, 4432), False, 'import torch\n'), ((4443, 4470), 'torch.rand', 'torch.rand', (['(10)', '(2)', '(32)', '(1000)'], {}), '(10, 2, 32, 1000)\n', (4453, 4470), False, 'import torch\n'), ((908, 955), 'torch.sum', 'torch.sum', (['(origin * estimation)', '(1)'], {'keepdim': '(True)'}), '(origin * estimation, 1, keepdim=True)\n', (917, 955), False, 'import torch\n'), ((1116, 1138), 'torch.pow', 'torch.pow', (['est_true', '(2)'], {}), '(est_true, 2)\n', (1125, 1138), False, 'import torch\n'), ((1162, 1183), 'torch.pow', 'torch.pow', (['est_res', '(2)'], {}), '(est_res, 2)\n', (1171, 1183), False, 'import torch\n'), ((1210, 1233), 'torch.log10', 'torch.log10', (['true_power'], {}), '(true_power)\n', (1221, 1233), False, 'import torch\n'), ((1239, 1261), 'torch.log10', 'torch.log10', (['res_power'], {}), '(res_power)\n', (1250, 1261), False, 'import torch\n'), ((2491, 2534), 'torch.zeros', 'torch.zeros', (['(batch_size, nsource, nsource)'], {}), '((batch_size, nsource, nsource))\n', (2502, 2534), False, 'import torch\n'), ((2945, 2962), 'torch.cat', 'torch.cat', (['sdr', '(1)'], {}), '(sdr, 1)\n', (2954, 2962), False, 'import torch\n'), ((828, 848), 'torch.pow', 'torch.pow', (['origin', '(2)'], {}), '(origin, 2)\n', (837, 848), False, 'import torch\n'), ((2226, 2265), 'torch.mean', 'torch.mean', (['estimation', '(2)'], {'keepdim': '(True)'}), '(estimation, 2, keepdim=True)\n', (2236, 2265), False, 'import torch\n'), ((2310, 2345), 'torch.mean', 'torch.mean', (['origin', '(2)'], {'keepdim': '(True)'}), '(origin, 2, keepdim=True)\n', (2320, 2345), False, 'import torch\n'), ((2434, 2452), 'numpy.arange', 'np.arange', (['nsource'], {}), '(nsource)\n', (2443, 2452), True, 'import numpy as np\n'), ((3652, 3680), 'torch.stack', 'torch.stack', (['(mse1, mse2)', '(1)'], {}), '((mse1, mse2), 1)\n', (3663, 3680), False, 'import torch\n'), ((4329, 4357), 'torch.stack', 'torch.stack', (['(mse1, mse2)', '(1)'], {}), '((mse1, mse2), 1)\n', (4340, 4357), False, 'import torch\n'), ((3495, 3528), 'torch.pow', 'torch.pow', (['(estimation - origin)', '(2)'], {}), '(estimation - origin, 2)\n', (3504, 3528), False, 'import torch\n'), ((4171, 4204), 'torch.pow', 'torch.pow', (['(estimation - origin)', '(2)'], {}), '(estimation - origin, 2)\n', (4180, 4204), False, 'import torch\n')]
|
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
from store import base_store, mongo_driver
from utils import logger, lib
import properties
import re
LOGGER = logger.get_logger(os.path.basename(__file__.split(".")[0]))
class InputStore(base_store.InputStore):
def __init__(self, dataset, **kwargs):
base_store.InputStore.__init__(self, dataset, **kwargs)
def load_inputs(self, args_key):
arguments = mongo_driver.get_collection(self.dataset, "fuzzed_args").find_one({"key": args_key})["args"]
assert len(arguments) == properties.FUZZ_ARGUMENT_SIZE
if self.is_array(arguments):
key_args = arguments
else:
key_args = [[] for _ in range(len(arguments[0]))]
for i in range(len(arguments[0])):
for arg in arguments:
key_args[i].append(arg)
return key_args
class FunctionStore(base_store.FunctionStore):
def __init__(self, dataset, **kwargs):
self.is_test = None
base_store.FunctionStore.__init__(self, dataset, **kwargs)
def load_function(self, function_name):
collection_name = "test_functions_executed" if self.is_test else "functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find_one({"name": function_name})
def load_functions(self):
collection_name = "test_functions_executed" if self.is_test else "functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find()
def load_metadata(self, funct):
return mongo_driver.get_collection(self.dataset, "functions_metadata").find_one({"name": funct["name"]})
def update_function_arg_type(self, function_name, function_arg_types):
collection = mongo_driver.get_collection(self.dataset, "py_functions_arg_types")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "name")
collection.insert({
"name": function_name,
"types": function_arg_types
})
def load_function_arg_type(self, function_name):
try:
return mongo_driver.get_collection(self.dataset, "py_functions_arg_types").find_one({"name": function_name})
except Exception as e:
LOGGER.critical("Failed to load args for function: '%s'. Returning None."
"\nMessage: %s" % (function_name, e.message))
return None
def save_py_function(self, function_json):
collection_name = "test_py_functions_executed" if self.is_test else "py_functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "name")
try:
collection.insert(function_json)
except Exception:
del function_json['outputs']
self.save_failed_py_function(function_json)
def load_py_function(self, function_name):
collection_name = "test_py_functions_executed" if self.is_test else "py_functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find_one({"name": function_name})
def exists_py_function(self, function_name):
return self.load_py_function(function_name) is not None
def save_failed_py_function(self, function_json):
collection_name = "test_py_functions_failed" if self.is_test else "py_functions_failed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "name")
collection.insert(function_json)
def is_invalid_py_function(self, function_name):
collection_name = "test_py_functions_failed" if self.is_test else "py_functions_failed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find_one({"name": function_name}) is not None
def load_py_functions(self):
collection_name = "test_py_functions_executed" if self.is_test else "py_functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find()
def save_py_metadata(self, func_json):
collection = mongo_driver.get_collection(self.dataset, "py_functions_metadata")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "name")
if mongo_driver.contains_document(collection, "name", func_json["name"]):
mongo_driver.delete_document(collection, "name", func_json["name"])
collection.insert(func_json)
def load_py_metadata(self, function_name):
try:
collection = mongo_driver.get_collection(self.dataset, "py_functions_metadata")
return collection.find_one({"name": function_name})
except Exception:
LOGGER.exception("Failed to metadata for function: '%s'. Returning None" % function_name)
return None
def get_executed_functions(self, language):
collection = mongo_driver.get_collection(self.dataset, "language_executed_functions")
document = collection.find_one({"language": language})
if document is None:
return None
return document['names']
class PyFileMetaStore(base_store.PyFileMetaStore):
def __init__(self, dataset, **kwargs):
base_store.PyFileMetaStore.__init__(self, dataset, **kwargs)
def load_meta(self, file_name):
sep_positions = [m.start() for m in re.finditer(os.sep, file_name)]
if sep_positions and len(sep_positions) > 3:
fp_regex = file_name[sep_positions[2]:]
else:
fp_regex = file_name
collection = mongo_driver.get_collection(self.dataset, "py_file_meta")
return collection.find_one({"file_path": {"$regex": fp_regex}})
def save_meta(self, bson_dict):
collection = mongo_driver.get_collection(self.dataset, "py_file_meta")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "file_path")
collection.insert(bson_dict)
class ArgumentStore(base_store.ArgumentStore):
def __init__(self, dataset, **kwargs):
self.is_test = None
base_store.ArgumentStore.__init__(self, dataset, **kwargs)
def load_args(self, args_key):
collection_name = "test_fuzzed_args" if self.is_test else "fuzzed_args"
collection = mongo_driver.get_collection(self.dataset, collection_name)
try:
return collection.find_one({"key": args_key})
except Exception as e:
LOGGER.exception("Failed to load args with key: '%s'. Returning None" % args_key)
return None
class ExecutionStore(base_store.ExecutionStore):
def __init__(self, dataset, **kwargs):
base_store.ExecutionStore.__init__(self, dataset, **kwargs)
def save_language_executed_function_names(self, language, names):
collection = mongo_driver.get_collection(self.dataset, "language_executed_functions")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "language")
if mongo_driver.contains_document(collection, "language", language):
mongo_driver.delete_document(collection, "language", language)
collection.insert({
"language": language,
"names": names
})
def save_cloned_function_names(self, name, clones):
collection = mongo_driver.get_collection(self.dataset, "cloned_functions")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "_function_name_")
if mongo_driver.contains_document(collection, "_function_name_", name):
mongo_driver.delete_document(collection, "_function_name_", name)
clones["_function_name_"] = name
collection.insert(clones)
def load_cloned_function_names(self, name):
collection = mongo_driver.get_collection(self.dataset, "cloned_functions")
return mongo_driver.get_document(collection, "_function_name_", name)
class ClusterStore(base_store.ClusterStore):
def __init__(self, dataset, **kwargs):
base_store.ClusterStore.__init__(self, dataset, **kwargs)
def save_clusters(self, clusters, suffix):
collection_name = "clusters_%s" % suffix
collection = mongo_driver.get_collection(self.dataset, collection_name)
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "cluster_id")
for cluster_id, functions in clusters.items():
LOGGER.info("Saving cluster: '%d', with %d functions" % (cluster_id, len(functions)))
cluster = {
"cluster_id": cluster_id,
"functions": [lib.to_json(f) for f in functions]
}
collection.insert(cluster)
|
[
"os.path.abspath",
"store.base_store.ClusterStore.__init__",
"store.base_store.PyFileMetaStore.__init__",
"store.mongo_driver.contains_document",
"store.base_store.InputStore.__init__",
"store.base_store.ExecutionStore.__init__",
"re.finditer",
"store.mongo_driver.get_collection",
"store.base_store.FunctionStore.__init__",
"store.mongo_driver.is_collection_exists",
"store.base_store.ArgumentStore.__init__",
"utils.lib.to_json",
"store.mongo_driver.delete_document",
"store.mongo_driver.get_document",
"store.mongo_driver.create_unique_index_for_collection"
] |
[((38, 58), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (53, 58), False, 'import os\n'), ((379, 434), 'store.base_store.InputStore.__init__', 'base_store.InputStore.__init__', (['self', 'dataset'], {}), '(self, dataset, **kwargs)\n', (409, 434), False, 'from store import base_store, mongo_driver\n'), ((1008, 1066), 'store.base_store.FunctionStore.__init__', 'base_store.FunctionStore.__init__', (['self', 'dataset'], {}), '(self, dataset, **kwargs)\n', (1041, 1066), False, 'from store import base_store, mongo_driver\n'), ((1217, 1275), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', 'collection_name'], {}), '(self.dataset, collection_name)\n', (1244, 1275), False, 'from store import base_store, mongo_driver\n'), ((1468, 1526), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', 'collection_name'], {}), '(self.dataset, collection_name)\n', (1495, 1526), False, 'from store import base_store, mongo_driver\n'), ((1791, 1858), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""py_functions_arg_types"""'], {}), "(self.dataset, 'py_functions_arg_types')\n", (1818, 1858), False, 'from store import base_store, mongo_driver\n'), ((2613, 2671), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', 'collection_name'], {}), '(self.dataset, collection_name)\n', (2640, 2671), False, 'from store import base_store, mongo_driver\n'), ((3118, 3176), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', 'collection_name'], {}), '(self.dataset, collection_name)\n', (3145, 3176), False, 'from store import base_store, mongo_driver\n'), ((3503, 3561), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', 'collection_name'], {}), '(self.dataset, collection_name)\n', (3530, 3561), False, 'from store import base_store, mongo_driver\n'), ((3892, 3950), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', 'collection_name'], {}), '(self.dataset, collection_name)\n', (3919, 3950), False, 'from store import base_store, mongo_driver\n'), ((4164, 4222), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', 'collection_name'], {}), '(self.dataset, collection_name)\n', (4191, 4222), False, 'from store import base_store, mongo_driver\n'), ((4311, 4377), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""py_functions_metadata"""'], {}), "(self.dataset, 'py_functions_metadata')\n", (4338, 4377), False, 'from store import base_store, mongo_driver\n'), ((4517, 4586), 'store.mongo_driver.contains_document', 'mongo_driver.contains_document', (['collection', '"""name"""', "func_json['name']"], {}), "(collection, 'name', func_json['name'])\n", (4547, 4586), False, 'from store import base_store, mongo_driver\n'), ((5094, 5166), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""language_executed_functions"""'], {}), "(self.dataset, 'language_executed_functions')\n", (5121, 5166), False, 'from store import base_store, mongo_driver\n'), ((5397, 5457), 'store.base_store.PyFileMetaStore.__init__', 'base_store.PyFileMetaStore.__init__', (['self', 'dataset'], {}), '(self, dataset, **kwargs)\n', (5432, 5457), False, 'from store import base_store, mongo_driver\n'), ((5715, 5772), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""py_file_meta"""'], {}), "(self.dataset, 'py_file_meta')\n", (5742, 5772), False, 'from store import base_store, mongo_driver\n'), ((5893, 5950), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""py_file_meta"""'], {}), "(self.dataset, 'py_file_meta')\n", (5920, 5950), False, 'from store import base_store, mongo_driver\n'), ((6239, 6297), 'store.base_store.ArgumentStore.__init__', 'base_store.ArgumentStore.__init__', (['self', 'dataset'], {}), '(self, dataset, **kwargs)\n', (6272, 6297), False, 'from store import base_store, mongo_driver\n'), ((6425, 6483), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', 'collection_name'], {}), '(self.dataset, collection_name)\n', (6452, 6483), False, 'from store import base_store, mongo_driver\n'), ((6774, 6833), 'store.base_store.ExecutionStore.__init__', 'base_store.ExecutionStore.__init__', (['self', 'dataset'], {}), '(self, dataset, **kwargs)\n', (6808, 6833), False, 'from store import base_store, mongo_driver\n'), ((6920, 6992), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""language_executed_functions"""'], {}), "(self.dataset, 'language_executed_functions')\n", (6947, 6992), False, 'from store import base_store, mongo_driver\n'), ((7136, 7200), 'store.mongo_driver.contains_document', 'mongo_driver.contains_document', (['collection', '"""language"""', 'language'], {}), "(collection, 'language', language)\n", (7166, 7200), False, 'from store import base_store, mongo_driver\n'), ((7423, 7484), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""cloned_functions"""'], {}), "(self.dataset, 'cloned_functions')\n", (7450, 7484), False, 'from store import base_store, mongo_driver\n'), ((7635, 7702), 'store.mongo_driver.contains_document', 'mongo_driver.contains_document', (['collection', '"""_function_name_"""', 'name'], {}), "(collection, '_function_name_', name)\n", (7665, 7702), False, 'from store import base_store, mongo_driver\n'), ((7907, 7968), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""cloned_functions"""'], {}), "(self.dataset, 'cloned_functions')\n", (7934, 7968), False, 'from store import base_store, mongo_driver\n'), ((7980, 8042), 'store.mongo_driver.get_document', 'mongo_driver.get_document', (['collection', '"""_function_name_"""', 'name'], {}), "(collection, '_function_name_', name)\n", (8005, 8042), False, 'from store import base_store, mongo_driver\n'), ((8135, 8192), 'store.base_store.ClusterStore.__init__', 'base_store.ClusterStore.__init__', (['self', 'dataset'], {}), '(self, dataset, **kwargs)\n', (8167, 8192), False, 'from store import base_store, mongo_driver\n'), ((8302, 8360), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', 'collection_name'], {}), '(self.dataset, collection_name)\n', (8329, 8360), False, 'from store import base_store, mongo_driver\n'), ((1870, 1915), 'store.mongo_driver.is_collection_exists', 'mongo_driver.is_collection_exists', (['collection'], {}), '(collection)\n', (1903, 1915), False, 'from store import base_store, mongo_driver\n'), ((1923, 1990), 'store.mongo_driver.create_unique_index_for_collection', 'mongo_driver.create_unique_index_for_collection', (['collection', '"""name"""'], {}), "(collection, 'name')\n", (1970, 1990), False, 'from store import base_store, mongo_driver\n'), ((2683, 2728), 'store.mongo_driver.is_collection_exists', 'mongo_driver.is_collection_exists', (['collection'], {}), '(collection)\n', (2716, 2728), False, 'from store import base_store, mongo_driver\n'), ((2736, 2803), 'store.mongo_driver.create_unique_index_for_collection', 'mongo_driver.create_unique_index_for_collection', (['collection', '"""name"""'], {}), "(collection, 'name')\n", (2783, 2803), False, 'from store import base_store, mongo_driver\n'), ((3573, 3618), 'store.mongo_driver.is_collection_exists', 'mongo_driver.is_collection_exists', (['collection'], {}), '(collection)\n', (3606, 3618), False, 'from store import base_store, mongo_driver\n'), ((3626, 3693), 'store.mongo_driver.create_unique_index_for_collection', 'mongo_driver.create_unique_index_for_collection', (['collection', '"""name"""'], {}), "(collection, 'name')\n", (3673, 3693), False, 'from store import base_store, mongo_driver\n'), ((4389, 4434), 'store.mongo_driver.is_collection_exists', 'mongo_driver.is_collection_exists', (['collection'], {}), '(collection)\n', (4422, 4434), False, 'from store import base_store, mongo_driver\n'), ((4442, 4509), 'store.mongo_driver.create_unique_index_for_collection', 'mongo_driver.create_unique_index_for_collection', (['collection', '"""name"""'], {}), "(collection, 'name')\n", (4489, 4509), False, 'from store import base_store, mongo_driver\n'), ((4594, 4661), 'store.mongo_driver.delete_document', 'mongo_driver.delete_document', (['collection', '"""name"""', "func_json['name']"], {}), "(collection, 'name', func_json['name'])\n", (4622, 4661), False, 'from store import base_store, mongo_driver\n'), ((4769, 4835), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""py_functions_metadata"""'], {}), "(self.dataset, 'py_functions_metadata')\n", (4796, 4835), False, 'from store import base_store, mongo_driver\n'), ((5962, 6007), 'store.mongo_driver.is_collection_exists', 'mongo_driver.is_collection_exists', (['collection'], {}), '(collection)\n', (5995, 6007), False, 'from store import base_store, mongo_driver\n'), ((6015, 6087), 'store.mongo_driver.create_unique_index_for_collection', 'mongo_driver.create_unique_index_for_collection', (['collection', '"""file_path"""'], {}), "(collection, 'file_path')\n", (6062, 6087), False, 'from store import base_store, mongo_driver\n'), ((7004, 7049), 'store.mongo_driver.is_collection_exists', 'mongo_driver.is_collection_exists', (['collection'], {}), '(collection)\n', (7037, 7049), False, 'from store import base_store, mongo_driver\n'), ((7057, 7128), 'store.mongo_driver.create_unique_index_for_collection', 'mongo_driver.create_unique_index_for_collection', (['collection', '"""language"""'], {}), "(collection, 'language')\n", (7104, 7128), False, 'from store import base_store, mongo_driver\n'), ((7208, 7270), 'store.mongo_driver.delete_document', 'mongo_driver.delete_document', (['collection', '"""language"""', 'language'], {}), "(collection, 'language', language)\n", (7236, 7270), False, 'from store import base_store, mongo_driver\n'), ((7496, 7541), 'store.mongo_driver.is_collection_exists', 'mongo_driver.is_collection_exists', (['collection'], {}), '(collection)\n', (7529, 7541), False, 'from store import base_store, mongo_driver\n'), ((7549, 7627), 'store.mongo_driver.create_unique_index_for_collection', 'mongo_driver.create_unique_index_for_collection', (['collection', '"""_function_name_"""'], {}), "(collection, '_function_name_')\n", (7596, 7627), False, 'from store import base_store, mongo_driver\n'), ((7710, 7775), 'store.mongo_driver.delete_document', 'mongo_driver.delete_document', (['collection', '"""_function_name_"""', 'name'], {}), "(collection, '_function_name_', name)\n", (7738, 7775), False, 'from store import base_store, mongo_driver\n'), ((8372, 8417), 'store.mongo_driver.is_collection_exists', 'mongo_driver.is_collection_exists', (['collection'], {}), '(collection)\n', (8405, 8417), False, 'from store import base_store, mongo_driver\n'), ((8425, 8498), 'store.mongo_driver.create_unique_index_for_collection', 'mongo_driver.create_unique_index_for_collection', (['collection', '"""cluster_id"""'], {}), "(collection, 'cluster_id')\n", (8472, 8498), False, 'from store import base_store, mongo_driver\n'), ((1602, 1665), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""functions_metadata"""'], {}), "(self.dataset, 'functions_metadata')\n", (1629, 1665), False, 'from store import base_store, mongo_driver\n'), ((5534, 5564), 're.finditer', 're.finditer', (['os.sep', 'file_name'], {}), '(os.sep, file_name)\n', (5545, 5564), False, 'import re\n'), ((487, 543), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""fuzzed_args"""'], {}), "(self.dataset, 'fuzzed_args')\n", (514, 543), False, 'from store import base_store, mongo_driver\n'), ((2159, 2226), 'store.mongo_driver.get_collection', 'mongo_driver.get_collection', (['self.dataset', '"""py_functions_arg_types"""'], {}), "(self.dataset, 'py_functions_arg_types')\n", (2186, 2226), False, 'from store import base_store, mongo_driver\n'), ((8717, 8731), 'utils.lib.to_json', 'lib.to_json', (['f'], {}), '(f)\n', (8728, 8731), False, 'from utils import logger, lib\n')]
|
from __future__ import absolute_import, division
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
from pytest import raises
from fatiando.seismic import conv
def test_impulse_response():
"""
conv.convolutional_model raises the source wavelet as result when the model
is a centred spike, considering the dimension of the model equal to the
source wavelet
"""
w = conv.rickerwave(30., 2.e-3)
rc_test = np.zeros((w.shape[0], 20))
rc_test[w.shape[0]//2, :] = 1.
spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3)
for j in range(0, rc_test.shape[1]):
assert_array_almost_equal(spike[:, j], w, 9)
def test_rc_shorter_than_wavelet():
"""
When the reflectivity series is shorter than the wavelength, the spike
response is observed like in the opposite case. The difference is that the
the ricker wavelet (or other symmetric wavelet) is shorter in the result.
"""
w = conv.rickerwave(30., 2.e-3)
rc_test = np.zeros((21, 20))
rc_test[rc_test.shape[0]//2, :] = 1
spike = conv.convolutional_model(rc_test, 30., conv.rickerwave, dt=2.e-3)
for j in range(0, rc_test.shape[1]):
wmin = (w.shape[0] - rc_test.shape[0])//2
wmax = -(w.shape[0] - rc_test.shape[0])//2
assert_array_almost_equal(spike[:, j], w[wmin:wmax], 9)
def test_reflectivity_wrong_dimensions():
"""
Velocity and density are provided as matrix or vector to reflectivity
calculation, so they must have the same dimension.
"""
vel = np.ones((10, 10))
dens = np.ones((11, 11))
raises(AssertionError, conv.reflectivity, vel, dens)
vel = np.ones((10))
dens = np.ones((11))
raises(AssertionError, conv.reflectivity, vel, dens)
def test_depth_2_time_wrong_dimensions():
"""
Velocity and property are provided as matrix to depth to time cconversion,
so they must have the same dimension.
"""
vel = np.ones((10, 10))
dens = np.ones((11, 11))
dt = 2.e-3
dz = 1.
raises(AssertionError, conv.depth_2_time, vel, dens, dt, dz)
def test_ricker():
"""
conv.rickerwave inputs must satisfy the condition for sampling and
stability, otherwise this implies in a error.
"""
f = 50.
dt = 2.e-3
raises(AssertionError, conv.rickerwave, f, dt)
|
[
"numpy.zeros",
"numpy.ones",
"fatiando.seismic.conv.rickerwave",
"pytest.raises",
"fatiando.seismic.conv.convolutional_model",
"numpy.testing.assert_array_almost_equal"
] |
[((428, 456), 'fatiando.seismic.conv.rickerwave', 'conv.rickerwave', (['(30.0)', '(0.002)'], {}), '(30.0, 0.002)\n', (443, 456), False, 'from fatiando.seismic import conv\n'), ((470, 496), 'numpy.zeros', 'np.zeros', (['(w.shape[0], 20)'], {}), '((w.shape[0], 20))\n', (478, 496), True, 'import numpy as np\n'), ((544, 610), 'fatiando.seismic.conv.convolutional_model', 'conv.convolutional_model', (['rc_test', '(30.0)', 'conv.rickerwave'], {'dt': '(0.002)'}), '(rc_test, 30.0, conv.rickerwave, dt=0.002)\n', (568, 610), False, 'from fatiando.seismic import conv\n'), ((998, 1026), 'fatiando.seismic.conv.rickerwave', 'conv.rickerwave', (['(30.0)', '(0.002)'], {}), '(30.0, 0.002)\n', (1013, 1026), False, 'from fatiando.seismic import conv\n'), ((1040, 1058), 'numpy.zeros', 'np.zeros', (['(21, 20)'], {}), '((21, 20))\n', (1048, 1058), True, 'import numpy as np\n'), ((1111, 1177), 'fatiando.seismic.conv.convolutional_model', 'conv.convolutional_model', (['rc_test', '(30.0)', 'conv.rickerwave'], {'dt': '(0.002)'}), '(rc_test, 30.0, conv.rickerwave, dt=0.002)\n', (1135, 1177), False, 'from fatiando.seismic import conv\n'), ((1582, 1599), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1589, 1599), True, 'import numpy as np\n'), ((1611, 1628), 'numpy.ones', 'np.ones', (['(11, 11)'], {}), '((11, 11))\n', (1618, 1628), True, 'import numpy as np\n'), ((1633, 1685), 'pytest.raises', 'raises', (['AssertionError', 'conv.reflectivity', 'vel', 'dens'], {}), '(AssertionError, conv.reflectivity, vel, dens)\n', (1639, 1685), False, 'from pytest import raises\n'), ((1696, 1707), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1703, 1707), True, 'import numpy as np\n'), ((1721, 1732), 'numpy.ones', 'np.ones', (['(11)'], {}), '(11)\n', (1728, 1732), True, 'import numpy as np\n'), ((1739, 1791), 'pytest.raises', 'raises', (['AssertionError', 'conv.reflectivity', 'vel', 'dens'], {}), '(AssertionError, conv.reflectivity, vel, dens)\n', (1745, 1791), False, 'from pytest import raises\n'), ((1983, 2000), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1990, 2000), True, 'import numpy as np\n'), ((2012, 2029), 'numpy.ones', 'np.ones', (['(11, 11)'], {}), '((11, 11))\n', (2019, 2029), True, 'import numpy as np\n'), ((2061, 2121), 'pytest.raises', 'raises', (['AssertionError', 'conv.depth_2_time', 'vel', 'dens', 'dt', 'dz'], {}), '(AssertionError, conv.depth_2_time, vel, dens, dt, dz)\n', (2067, 2121), False, 'from pytest import raises\n'), ((2311, 2357), 'pytest.raises', 'raises', (['AssertionError', 'conv.rickerwave', 'f', 'dt'], {}), '(AssertionError, conv.rickerwave, f, dt)\n', (2317, 2357), False, 'from pytest import raises\n'), ((659, 703), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['spike[:, j]', 'w', '(9)'], {}), '(spike[:, j], w, 9)\n', (684, 703), False, 'from numpy.testing import assert_array_almost_equal, assert_allclose\n'), ((1327, 1382), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['spike[:, j]', 'w[wmin:wmax]', '(9)'], {}), '(spike[:, j], w[wmin:wmax], 9)\n', (1352, 1382), False, 'from numpy.testing import assert_array_almost_equal, assert_allclose\n')]
|
#!/usr/bin/env python
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# @Author: <NAME>
# @Lab of Machine Learning and Data Mining, TianJin University
# @Email: <EMAIL>
# @Date: 2018-10-26 15:32:34
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from __future__ import print_function
from __future__ import absolute_import
import argparse
import os
import subprocess
class NvidiaSuper:
NVIDIA_COMMAND = 'nvidia-smi'
def __init__(self):
self.source = None
self.gpu_process = []
self._get_source()
self._get_process_pool()
def _get_source(self):
try:
res = subprocess.Popen(
self.NVIDIA_COMMAND,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
# oncomment below if you use py2
# encoding='utf-8'
)
self.source = res.stdout.readlines()
except:
raise EnvironmentError('No GPU driver.')
def _get_process_line(self):
for idx, line in enumerate(self.source):
if 'Processes' in line:
return idx
def _get_process_pool(self):
idx_line = self._get_process_line() + 3
for line in self.source[idx_line:]:
if line.startswith('+-'):
break
if 'No running processes found' in line:
return []
info_lst = line.strip().split()
idx_gpu = info_lst[1]
pid = info_lst[2]
s = self.ps_info(pid)
s.append('\n')
info = []
info.append(idx_gpu)
# user
info.append(s[0])
# pid
info.append(s[1])
# stat
info.append(s[7])
# start
info.append(s[8])
# time
info.append(s[9])
command = ' '.join(s[10:])
info.append(command)
self.gpu_process.append('\t'.join(info))
return self.gpu_process
@staticmethod
def ps_info(pid):
res = subprocess.Popen(
'ps -u -p ' + pid,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
# oncomment below if you use py2
# encoding='utf-8'
)
return res.stdout.readlines()[1].split()
def print_to(self):
print(''.join(self.source))
title = ['GPU', 'USER', 'PID', 'STAT', 'START', 'TIME', 'COMMAND']
print('\t'.join(title))
print(''.join(self.gpu_process))
if __name__ == '__main__':
mnitor = NvidiaSuper()
mnitor.print_to()
|
[
"subprocess.Popen"
] |
[((2192, 2307), 'subprocess.Popen', 'subprocess.Popen', (["('ps -u -p ' + pid)"], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'close_fds': '(True)'}), "('ps -u -p ' + pid, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n", (2208, 2307), False, 'import subprocess\n'), ((683, 800), 'subprocess.Popen', 'subprocess.Popen', (['self.NVIDIA_COMMAND'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'close_fds': '(True)'}), '(self.NVIDIA_COMMAND, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n', (699, 800), False, 'import subprocess\n')]
|
#!/usr/bin/env python3
import sys
import numpy as np
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, BatchNormalization
from keras.layers.convolutional import Conv3D, Deconv3D
from keras.layers.core import Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import plot_model
class Discriminator(object):
def __init__(self, side=16):
self.INPUT_SHAPE = (side,side,side,3)
self.OPTIMIZER = Adam(lr=0.000001, beta_1=0.5)
self.Discriminator = self.model()
self.Discriminator.compile(loss='binary_crossentropy', optimizer=self.OPTIMIZER, metrics=['accuracy'] )
# self.save_model()
self.summary()
def block(self,first_layer,filter_size=512,kernel_size=(3,3,3)):
x = Conv3D(filters=filter_size, kernel_size=kernel_size, kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='same')(first_layer)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
return x
def model(self):
input_layer = Input(shape=self.INPUT_SHAPE)
x = self.block(input_layer,filter_size=8)
x = self.block(x,filter_size=16,)
x = self.block(x,filter_size=32)
x = self.block(x,filter_size=64)
x = Conv3D(filters=1, kernel_size=(3,3,3),
strides=(1,1,1), kernel_initializer='glorot_normal',
bias_initializer='zeros', padding='valid')(x)
x = BatchNormalization()(x)
x = Flatten()(x)
output_layer = Dense(1, activation='sigmoid')(x)
model = Model(inputs=input_layer, outputs=output_layer)
return model
def summary(self):
return self.Discriminator.summary()
def save_model(self):
plot_model(self.Discriminator.model, to_file='/data/Discriminator_Model.png')
|
[
"keras.optimizers.Adam",
"keras.layers.Flatten",
"keras.layers.convolutional.Conv3D",
"keras.models.Model",
"keras.utils.plot_model",
"keras.layers.Dense",
"keras.layers.advanced_activations.LeakyReLU",
"keras.layers.Input",
"keras.layers.BatchNormalization"
] |
[((538, 564), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(1e-06)', 'beta_1': '(0.5)'}), '(lr=1e-06, beta_1=0.5)\n', (542, 564), False, 'from keras.optimizers import Adam\n'), ((1150, 1179), 'keras.layers.Input', 'Input', ([], {'shape': 'self.INPUT_SHAPE'}), '(shape=self.INPUT_SHAPE)\n', (1155, 1179), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, BatchNormalization\n'), ((1681, 1728), 'keras.models.Model', 'Model', ([], {'inputs': 'input_layer', 'outputs': 'output_layer'}), '(inputs=input_layer, outputs=output_layer)\n', (1686, 1728), False, 'from keras.models import Sequential, Model\n'), ((1854, 1931), 'keras.utils.plot_model', 'plot_model', (['self.Discriminator.model'], {'to_file': '"""/data/Discriminator_Model.png"""'}), "(self.Discriminator.model, to_file='/data/Discriminator_Model.png')\n", (1864, 1931), False, 'from keras.utils import plot_model\n'), ((858, 993), 'keras.layers.convolutional.Conv3D', 'Conv3D', ([], {'filters': 'filter_size', 'kernel_size': 'kernel_size', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'padding': '"""same"""'}), "(filters=filter_size, kernel_size=kernel_size, kernel_initializer=\n 'glorot_normal', bias_initializer='zeros', padding='same')\n", (864, 993), False, 'from keras.layers.convolutional import Conv3D, Deconv3D\n'), ((1034, 1054), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1052, 1054), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, BatchNormalization\n'), ((1070, 1084), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1079, 1084), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((1368, 1515), 'keras.layers.convolutional.Conv3D', 'Conv3D', ([], {'filters': '(1)', 'kernel_size': '(3, 3, 3)', 'strides': '(1, 1, 1)', 'kernel_initializer': '"""glorot_normal"""', 'bias_initializer': '"""zeros"""', 'padding': '"""valid"""'}), "(filters=1, kernel_size=(3, 3, 3), strides=(1, 1, 1),\n kernel_initializer='glorot_normal', bias_initializer='zeros', padding=\n 'valid')\n", (1374, 1515), False, 'from keras.layers.convolutional import Conv3D, Deconv3D\n'), ((1558, 1578), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1576, 1578), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, BatchNormalization\n'), ((1594, 1603), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1601, 1603), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, BatchNormalization\n'), ((1630, 1660), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1635, 1660), False, 'from keras.layers import Input, Dense, Reshape, Flatten, Dropout, BatchNormalization\n')]
|
import numpy as np
def L2Loss(y_predicted, y_ground_truth, reduction="None"):
"""returns l2 loss between two arrays
:param y_predicted: array of predicted values
:type y_predicted: ndarray
:param y_ground_truth: array of ground truth values
:type y_ground_truth: ndarray
:param reduction: reduction mode, defaults to "mean"
:type reduction: str, optional
:return: l2-loss
:rtype: scalar if reduction is sum or mean, else ndarray
"""
# Calculate the difference array
difference = y_predicted - y_ground_truth
# Raise every difference value to the power of 2
squared_difference = np.multiply(difference, difference)
# L2 distance is the reduced form of the squared difference array
if reduction == "sum":
# Reduction can be done by summing up all the values in the difference array (this is known as "L2-Loss")
l2_distance = np.sum(squared_difference)
return l2_distance
elif reduction == "mean":
# Reduction can also be done by taking the mean (this is known as "Mean Squared Error")
mean_squared_error = np.mean(squared_difference)
return mean_squared_error
elif reduction == "None":
return squared_difference
else:
print('ValueError: reduction should be "sum" / "mean" / "None"')
def main():
print("Initializing predicted and ground truth arrays:\n")
print('(NOTE: Enter the values in a space-separated format. Ex: "5.36 1.02 2.03")')
y_predicted = [
float(item) for item in input("Enter the predicted values: ").split()
]
y_ground_truth = [
float(item)
for item in input("Enter the corresponding ground truth values: ").split()
]
assert len(y_predicted) == len(
y_ground_truth
), "Number of predicted values {} and ground truth {} values should match".format(
len(y_predicted), len(y_ground_truth)
)
y_predicted = np.array(y_predicted)
y_ground_truth = np.array(y_ground_truth)
reduction = str(input('Enter the reduction mode: "sum" / "mean" / "None": '))
loss = L2Loss(y_predicted, y_ground_truth, reduction=reduction)
print("L2-Loss with {}-reduction: {}".format(reduction, loss))
if __name__ == "__main__":
main()
|
[
"numpy.array",
"numpy.mean",
"numpy.multiply",
"numpy.sum"
] |
[((637, 672), 'numpy.multiply', 'np.multiply', (['difference', 'difference'], {}), '(difference, difference)\n', (648, 672), True, 'import numpy as np\n'), ((1941, 1962), 'numpy.array', 'np.array', (['y_predicted'], {}), '(y_predicted)\n', (1949, 1962), True, 'import numpy as np\n'), ((1984, 2008), 'numpy.array', 'np.array', (['y_ground_truth'], {}), '(y_ground_truth)\n', (1992, 2008), True, 'import numpy as np\n'), ((906, 932), 'numpy.sum', 'np.sum', (['squared_difference'], {}), '(squared_difference)\n', (912, 932), True, 'import numpy as np\n'), ((1115, 1142), 'numpy.mean', 'np.mean', (['squared_difference'], {}), '(squared_difference)\n', (1122, 1142), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright © 2018 Broadcom. All Rights Reserved. The term “Broadcom” refers to
# Broadcom Inc. and/or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`gigabitethernet_speed_set` - PyFOS util to set GE port speed.
*******************************************************************************
The :mod:`gigabitethernet_speed_set` Util is used to set speed of GE port.
This module is a stand-alone script that can be used to set the
switch GE port speed on an extension platform.
gigabitethernet_speed_set.py: Usage
* Infrastructure options:
* -i,--ipaddr=IPADDR: IP address of FOS switch.
* -L,--login=LOGIN: Login name.
* -P,--password=PASSWORD: Password.
* -f,--vfid=VFID: VFID to which the request is directed.
* -s,--secured=MODE: HTTPS mode "self" or "CA"[Optional].
* -v,--verbose: Verbose mode[Optional].
* Util scripts options:
* -n,--name=NAME: Set name.
* --speed=VALUE: Set speed.
* Outputs:
* Python dictionary content with RESTCONF response data.
.. function:: gigabitethernet_speed_set.set_port_speed(session,\
name, speed)
*Modify extension gigabitethernet speed*
Example usage of the method::
ret = gigabitethernet_speed_set.set_port_speed(session,
name, speed)
print (ret)
Details::
gigabitethernet = gigabitethernet()
gigabitethernet.set_name(name)
gigabitethernet.set_speed(speed)
result = gigabitethernet.patch(session)
* Inputs:
:param session: Session returned by login.
:param name: Gigabitethernet port name expressed as slot/port.
:param speed: Speed for the GE port to be set.
* Outputs:
:rtype: Dictionary of return status matching rest response.
*Use cases*
Modify extension gigabitethernet port speed to 1G or 10G.
"""
import pyfos.pyfos_auth as pyfos_auth
import pyfos.pyfos_util as pyfos_util
from pyfos.pyfos_brocade_gigabitethernet import gigabitethernet
import sys
import pyfos.utils.brcd_util as brcd_util
isHttps = "0"
def _set_port_speed(session, rest_obj):
result = rest_obj.patch(session)
return (result)
def set_port_speed(session, name, speed):
geObject = gigabitethernet()
geObject.set_name(name)
geObject.set_speed(speed)
result = _set_port_speed(session, geObject)
return (result)
def validate(geObject):
if geObject.peek_name() is None or \
geObject.peek_speed() is None:
return 1
return 0
def main(argv):
# myinputs = "-h -i 10.17.3.70 --name 4/17 --speed 10000000000"
# myinputs = "-h -i 10.17.3.70 --speed 1000000000 -n 4/17"
# myinputs = "--name 4/17 --speed 1000000000"
# myinputs = "-i 10.17.3.70 --name 4/17"
# argv = myinputs.split()
filters = ['name', 'speed']
inputs = brcd_util.parse(argv, gigabitethernet, filters,
validate)
session = brcd_util.getsession(inputs)
result = _set_port_speed(inputs['session'], inputs['utilobject'])
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"pyfos.utils.brcd_util.getsession",
"pyfos.utils.brcd_util.parse",
"pyfos.pyfos_brocade_gigabitethernet.gigabitethernet",
"pyfos.pyfos_auth.logout",
"pyfos.pyfos_util.response_print"
] |
[((2824, 2841), 'pyfos.pyfos_brocade_gigabitethernet.gigabitethernet', 'gigabitethernet', ([], {}), '()\n', (2839, 2841), False, 'from pyfos.pyfos_brocade_gigabitethernet import gigabitethernet\n'), ((3427, 3484), 'pyfos.utils.brcd_util.parse', 'brcd_util.parse', (['argv', 'gigabitethernet', 'filters', 'validate'], {}), '(argv, gigabitethernet, filters, validate)\n', (3442, 3484), True, 'import pyfos.utils.brcd_util as brcd_util\n'), ((3528, 3556), 'pyfos.utils.brcd_util.getsession', 'brcd_util.getsession', (['inputs'], {}), '(inputs)\n', (3548, 3556), True, 'import pyfos.utils.brcd_util as brcd_util\n'), ((3632, 3665), 'pyfos.pyfos_util.response_print', 'pyfos_util.response_print', (['result'], {}), '(result)\n', (3657, 3665), True, 'import pyfos.pyfos_util as pyfos_util\n'), ((3670, 3696), 'pyfos.pyfos_auth.logout', 'pyfos_auth.logout', (['session'], {}), '(session)\n', (3687, 3696), True, 'import pyfos.pyfos_auth as pyfos_auth\n')]
|
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opendr.engine.datasets import Dataset
from opendr.engine.data import Image
from opendr.perception.object_detection_2d.datasets.transforms import BoundingBoxListToNumpyArray
from opendr.engine.constants import OPENDR_SERVER_URL
from pycocotools.coco import COCO
import os
from urllib.request import urlretrieve
import ssl
import time
from zipfile import ZipFile
import tarfile
import pickle
import numpy as np
import math
from tqdm import tqdm
import gc
class Dataset_NMS(Dataset):
def __init__(self, path=None, dataset_name=None, split=None, use_ssd=True, device='cuda'):
super().__init__()
available_dataset = ['COCO', 'PETS', 'TEST_MODULE']
self.dataset_sets = {'train': None,
'val': None,
'test': None}
if dataset_name not in available_dataset:
except_str = 'Unsupported dataset: ' + dataset_name + '. Currently available are:'
for j in range(len(available_dataset)):
except_str = except_str + ' \'' + available_dataset[j] + '\''
if j < len(available_dataset) - 1:
except_str = except_str + ','
except_str = except_str + '.'
raise ValueError(except_str)
ssl._create_default_https_context = ssl._create_unverified_context
self.dataset_name = dataset_name
self.split = split
# self.__prepare_dataset()
self.path = os.path.join(path, dataset_name)
self.src_data = []
if self.dataset_name == "PETS":
self.detector = 'JPD'
self.detector_type = 'default'
if use_ssd:
self.detector = 'SSD'
self.detector_type = 'custom'
self.dataset_sets['train'] = 'train'
self.dataset_sets['val'] = 'val'
self.dataset_sets['test'] = 'test'
if self.dataset_sets[self.split] is None:
raise ValueError(self.split + ' split is not available...')
if not os.path.exists(os.path.join(self.path, 'images/S1/L1')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S1_L1.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S1/L2')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S1_L2.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S2/L1')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S2_L1.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S2/L2')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S2_L2.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S2/L3')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S2_L3.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(os.path.join(self.path, 'images/S3/Multiple_Flow')):
self.download(
'http://ftp.cs.rdg.ac.uk/pub/PETS2009/Crowd_PETS09_dataset/a_data/Crowd_PETS09/S3_MF.tar.bz2',
download_path=os.path.join(self.path, 'images'), file_format="tar.bz2", create_dir=True)
if not os.path.exists(
os.path.join(self.path, 'annotations', 'pets_' + self.dataset_sets[self.split] + '.json')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/PETS_annotations_json.zip',
download_path=os.path.join(self.path, 'annotations'), file_format="zip",
create_dir=True)
pkl_filename = os.path.join(self.path,
'data_' + self.detector + '_' + self.dataset_sets[self.split] + '_pets.pkl')
if not os.path.exists(pkl_filename):
ssd = None
if use_ssd:
from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner
ssd = SingleShotDetectorLearner(device=device)
ssd.download(".", mode="pretrained")
ssd.load("./ssd_default_person", verbose=True)
if not os.path.exists(
os.path.join(self.path, 'detections',
'PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/PETS_detections.zip',
download_path=os.path.join(self.path, 'detections'), file_format="zip",
create_dir=True)
if not os.path.exists(
os.path.join(self.path, 'annotations', 'PETS-' + self.dataset_sets[self.split] + '.idl')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/PETS_annotations.zip',
download_path=os.path.join(self.path, 'annotations'), file_format="zip",
create_dir=True)
with open(os.path.join(self.path, 'annotations',
'PETS-' + self.dataset_sets[self.split] + '.idl')) as fp_gt:
fp_dt = None
if self.detector_type == 'default':
fp_dt = open(os.path.join(self.path, 'detections',
'PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl'))
print('Preparing PETS ' + self.dataset_sets[self.split] + ' set...')
current_id = 0
number_samples = 1696
if self.split == 'val':
current_id = 1696
number_samples = 240
elif self.split == 'test':
current_id = 1936
number_samples = 436
pbarDesc = "Overall progress"
pbar = tqdm(desc=pbarDesc, total=number_samples)
if self.detector_type == 'default':
line_dt = fp_dt.readline()
line_gt = fp_gt.readline()
while line_gt:
remove_strings = ['PETS09-', '\"', ':', '(', ')', ',', '', ';']
data_gt = line_gt.replace(':', ' ')
for j in range(len(remove_strings)):
data_gt = data_gt.replace(remove_strings[j], '')
data_gt = data_gt.split()
filename_gt = data_gt[0][0:2] + '/' + data_gt[0][2:]
if filename_gt[0:6] == 'S2/L1/':
filename_gt = filename_gt.replace('img/00', 'Time_12-34/View_001/frame_')
num = int(filename_gt[-8:-4]) - 1
filename_gt = filename_gt[:-8] + str(num).zfill(4) + '.jpg'
if filename_gt[0:6] == 'S2/L2/':
filename_gt = filename_gt.replace('img/00', 'Time_14-55/View_001/frame_')
num = int(filename_gt[-8:-4]) - 1
filename_gt = filename_gt[:-8] + str(num).zfill(4) + '.jpg'
if filename_gt[0:2] == 'S3':
filename_gt = filename_gt.replace('_MF', 'Multiple_Flow')
if self.detector_type == 'default':
data_dt = line_dt.replace(':', ' ')
for j in range(len(remove_strings)):
data_dt = data_dt.replace(remove_strings[j], '')
data_dt = data_dt.split()
filename_dt = data_dt[0][0:2] + '/' + data_dt[0][2:]
if filename_dt[0:6] == 'S2/L1/':
filename_dt = filename_dt.replace('img/00', 'Time_12-34/View_001/frame_')
num = int(filename_dt[-8:-4]) - 1
filename_dt = filename_dt[:-8] + str(num).zfill(4) + '.jpg'
if filename_dt[0:6] == 'S2/L2/':
filename_dt = filename_dt.replace('img/00', 'Time_14-55/View_001/frame_')
num = int(filename_dt[-8:-4]) - 1
filename_dt = filename_dt[:-8] + str(num).zfill(4) + '.jpg'
if filename_dt[0:2] == 'S3':
filename_dt = filename_dt.replace('_MF', 'Multiple_Flow')
if filename_gt != filename_dt:
raise ValueError('Errors in files...')
img = Image.open(os.path.join(self.path, 'images/', filename_gt))
dt_boxes = []
if self.detector_type == 'default':
for i in range(1, (len(data_dt)), 5):
dt_box = np.array((float(data_dt[i]), float(data_dt[i + 1]), float(data_dt[i + 2]),
float(data_dt[i + 3]), 1 / (1 + math.exp(- float(data_dt[i + 4])))))
dt_boxes.append(dt_box)
else:
bboxes_list = ssd.infer(img, threshold=0.0, custom_nms=None, nms_thresh=0.975,
nms_topk=6000, post_nms=6000)
bboxes_list = BoundingBoxListToNumpyArray()(bboxes_list)
bboxes_list = bboxes_list[bboxes_list[:, 4] > 0.015]
bboxes_list = bboxes_list[np.argsort(bboxes_list[:, 4]), :][::-1]
bboxes_list = bboxes_list[:5000, :]
for b in range(len(bboxes_list)):
dt_boxes.append(np.array([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],
bboxes_list[b, 3], bboxes_list[b, 4][0]]))
gt_boxes = []
for i in range(1, (len(data_gt)), 5):
gt_box = np.array((float(data_gt[i]), float(data_gt[i + 1]), float(data_gt[i + 2]),
float(data_gt[i + 3])))
gt_boxes.append(gt_box)
self.src_data.append({
'id': current_id,
'filename': os.path.join('images', filename_gt),
'resolution': img.opencv().shape[0:2][::-1],
'gt_boxes': [np.asarray([]), np.asarray(gt_boxes)],
'dt_boxes': [np.asarray([]), np.asarray(dt_boxes)]
})
current_id = current_id + 1
pbar.update(1)
if self.detector_type == 'default':
line_dt = fp_dt.readline()
line_gt = fp_gt.readline()
pbar.close()
if self.detector_type == 'default':
fp_dt.close()
elif self.detector == 'SSD':
del ssd
gc.collect()
with open(pkl_filename, 'wb') as handle:
pickle.dump(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)
else:
with open(pkl_filename, 'rb') as fp_pkl:
self.src_data = pickle.load(fp_pkl)
self.classes = ['background', 'human']
self.class_ids = [-1, 1]
self.annotation_file = 'pets_' + self.dataset_sets[self.split] + '.json'
elif self.dataset_name == "COCO":
self.dataset_sets['train'] = 'train'
self.dataset_sets['val'] = 'minival'
self.dataset_sets['test'] = 'valminusminival'
if self.dataset_sets[self.split] is None:
raise ValueError(self.split + ' split is not available...')
elif self.dataset_sets[self.split] == 'train':
imgs_split = 'train2014'
else:
imgs_split = 'val2014'
self.detector = 'FRCN'
self.detector_type = 'default'
ssd = None
if use_ssd:
self.detector = 'SSD'
self.detector_type = 'custom'
from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner
ssd = SingleShotDetectorLearner(device=device)
ssd.download(".", mode="pretrained")
ssd.load("./ssd_default_person", verbose=True)
if not os.path.exists(os.path.join(self.path, imgs_split)):
self.download('http://images.cocodataset.org/zips/' + imgs_split + '.zip',
download_path=os.path.join(self.path), file_format="zip",
create_dir=True)
pkl_filename = os.path.join(self.path, 'data_' + self.detector + '_' +
self.dataset_sets[self.split] + '_coco.pkl')
if not os.path.exists(pkl_filename):
if not os.path.exists(os.path.join(self.path, 'annotations', 'instances_' +
self.dataset_sets[self.split] +
'2014.json')):
if self.dataset_sets[self.split] == 'train':
ann_url = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip'
self.download(ann_url, download_path=os.path.join(self.path), file_format="zip",
create_dir=True)
else:
if self.dataset_sets[self.split] == 'minival':
ann_url = 'https://dl.dropboxusercontent.com/s/o43o90bna78omob/' \
'instances_minival2014.json.zip?dl=0'
else:
ann_url = 'https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/' \
'instances_valminusminival2014.json.zip?dl=0'
self.download(ann_url, download_path=os.path.join(self.path, 'annotations'), file_format="zip",
create_dir=True)
if not os.path.exists(os.path.join(self.path, 'detections', 'coco_2014_' +
self.dataset_sets[self.split] +
'_FRCN_train.pkl')):
self.download('http://datasets.d2.mpi-inf.mpg.de/hosang17cvpr/coco_2014_FRCN.tar.gz',
download_path=os.path.join(self.path, 'detections'), file_format='tar.gz',
create_dir=True)
with open(os.path.join(self.path, 'detections',
'coco_2014_' + self.dataset_sets[self.split] + '_FRCN_train.pkl'), 'rb') as f:
dets_default = pickle.load(f, encoding='latin1')
annots = COCO(annotation_file=os.path.join(self.path, 'annotations', 'instances_' +
self.dataset_sets[self.split] + '2014.json'))
pbarDesc = "Overall progress"
pbar = tqdm(desc=pbarDesc, total=len(dets_default[1]))
for i in range(len(dets_default[1])):
dt_boxes = []
img_info = annots.loadImgs([dets_default[1][i]])[0]
img = Image.open(os.path.join(self.path, imgs_split, img_info["file_name"]))
if self.detector_type == 'default':
dt_boxes = dets_default[0][1][i]
elif self.detector == 'SSD':
bboxes_list = ssd.infer(img, threshold=0.0, custom_nms=None, nms_thresh=0.975,
nms_topk=6000, post_nms=6000)
bboxes_list = BoundingBoxListToNumpyArray()(bboxes_list)
if bboxes_list.shape[0] > 0:
bboxes_list = bboxes_list[bboxes_list[:, 4] > 0.015]
if bboxes_list.shape[0] > 0:
bboxes_list = bboxes_list[np.argsort(bboxes_list[:, 4]), :][::-1]
bboxes_list = bboxes_list[:5000, :]
for b in range(len(bboxes_list)):
dt_boxes.append(np.array([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],
bboxes_list[b, 3], bboxes_list[b, 4][0]]))
dt_boxes = np.asarray(dt_boxes)
annots_in_frame = annots.loadAnns(
annots.getAnnIds(imgIds=[dets_default[1][i]], catIds=[1], iscrowd=False))
gt_boxes = []
for j in range(len(annots_in_frame)):
gt_boxes.append(annots_in_frame[j]['bbox'])
gt_boxes = np.asarray(np.asarray(gt_boxes))
if gt_boxes.shape[0] > 0:
gt_boxes[:, 2] = gt_boxes[:, 0] + gt_boxes[:, 2]
gt_boxes[:, 3] = gt_boxes[:, 1] + gt_boxes[:, 3]
self.src_data.append({
'id': dets_default[1][i],
'filename': os.path.join(imgs_split, img_info["file_name"]),
'resolution': [img_info['width'], img_info['height']],
'gt_boxes': [np.asarray([]), gt_boxes],
'dt_boxes': [np.asarray([]), dt_boxes]
})
pbar.update(1)
pbar.close()
if self.detector == 'SSD':
del ssd
gc.collect()
with open(pkl_filename, 'wb') as handle:
pickle.dump(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)
else:
with open(pkl_filename, 'rb') as fp_pkl:
self.src_data = pickle.load(fp_pkl)
self.classes = ['background', 'person']
self.class_ids = [-1, 1]
self.annotation_file = 'instances_' + self.dataset_sets[self.split] + '2014.json'
elif self.dataset_name == "TEST_MODULE":
self.dataset_sets['train'] = 'test'
self.dataset_sets['val'] = 'test'
self.dataset_sets['test'] = 'test'
if self.dataset_sets[self.split] is None:
raise ValueError(self.split + ' split is not available...')
pkl_filename = os.path.join(self.path, 'test_module.pkl')
if not os.path.exists(pkl_filename):
data_url = OPENDR_SERVER_URL + '/perception/object_detection_2d/nms/datasets/test_module.zip'
self.download(data_url, download_path=os.path.join(self.path).replace("TEST_MODULE", ""), file_format="zip",
create_dir=True)
with open(pkl_filename, 'rb') as fp_pkl:
self.src_data = pickle.load(fp_pkl)
self.classes = ['background', 'person']
self.class_ids = [-1, 1]
self.annotation_file = 'test_module_anns.json'
@staticmethod
def download(
url, download_path, dataset_sub_path=".", file_format="zip", create_dir=False):
if create_dir:
os.makedirs(download_path, exist_ok=True)
print("Downloading dataset from", url, "to", download_path)
start_time = 0
last_print = 0
def reporthook(count, block_size, total_size):
nonlocal start_time
nonlocal last_print
if count == 0:
start_time = time.time()
last_print = start_time
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
if time.time() - last_print >= 1:
last_print = time.time()
print(
"\r%d MB, %d KB/s, %d seconds passed" %
(progress_size / (1024 * 1024), speed, duration),
end=''
)
if file_format == "zip":
zip_path = os.path.join(download_path, "dataset.zip")
urlretrieve(url, zip_path, reporthook=reporthook)
print()
print("Extracting data from zip file")
with ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(download_path)
os.remove(zip_path)
elif file_format == "tar.bz2" or file_format == "tar.gz":
tar_path = os.path.join(download_path, "dataset." + file_format)
urlretrieve(url, tar_path, reporthook=reporthook)
print()
def members(tf):
l = len("Crowd_PETS09/")
for member in tf.getmembers():
if member.path.startswith("Crowd_PETS09/"):
member.path = member.path[l:]
yield member
with tarfile.open(tar_path, "r:" + file_format.split('.')[1]) as tar:
if file_format == "tar.bz2":
tar.extractall(path=download_path, members=members(tar))
else:
tar.extractall(path=download_path)
tar.close()
os.remove(tar_path)
else:
raise ValueError("Unsupported file_format: " + file_format)
|
[
"os.remove",
"tqdm.tqdm",
"zipfile.ZipFile",
"os.makedirs",
"opendr.perception.object_detection_2d.ssd.ssd_learner.SingleShotDetectorLearner",
"pickle.dump",
"numpy.asarray",
"os.path.exists",
"time.time",
"opendr.perception.object_detection_2d.datasets.transforms.BoundingBoxListToNumpyArray",
"urllib.request.urlretrieve",
"gc.collect",
"pickle.load",
"numpy.array",
"numpy.argsort",
"os.path.join"
] |
[((2050, 2082), 'os.path.join', 'os.path.join', (['path', 'dataset_name'], {}), '(path, dataset_name)\n', (2062, 2082), False, 'import os\n'), ((5035, 5140), 'os.path.join', 'os.path.join', (['self.path', "('data_' + self.detector + '_' + self.dataset_sets[self.split] + '_pets.pkl')"], {}), "(self.path, 'data_' + self.detector + '_' + self.dataset_sets[\n self.split] + '_pets.pkl')\n", (5047, 5140), False, 'import os\n'), ((21238, 21279), 'os.makedirs', 'os.makedirs', (['download_path'], {'exist_ok': '(True)'}), '(download_path, exist_ok=True)\n', (21249, 21279), False, 'import os\n'), ((22149, 22191), 'os.path.join', 'os.path.join', (['download_path', '"""dataset.zip"""'], {}), "(download_path, 'dataset.zip')\n", (22161, 22191), False, 'import os\n'), ((22204, 22253), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'zip_path'], {'reporthook': 'reporthook'}), '(url, zip_path, reporthook=reporthook)\n', (22215, 22253), False, 'from urllib.request import urlretrieve\n'), ((22439, 22458), 'os.remove', 'os.remove', (['zip_path'], {}), '(zip_path)\n', (22448, 22458), False, 'import os\n'), ((5195, 5223), 'os.path.exists', 'os.path.exists', (['pkl_filename'], {}), '(pkl_filename)\n', (5209, 5223), False, 'import os\n'), ((14549, 14654), 'os.path.join', 'os.path.join', (['self.path', "('data_' + self.detector + '_' + self.dataset_sets[self.split] + '_coco.pkl')"], {}), "(self.path, 'data_' + self.detector + '_' + self.dataset_sets[\n self.split] + '_coco.pkl')\n", (14561, 14654), False, 'import os\n'), ((21572, 21583), 'time.time', 'time.time', ([], {}), '()\n', (21581, 21583), False, 'import time\n'), ((21671, 21682), 'time.time', 'time.time', ([], {}), '()\n', (21680, 21682), False, 'import time\n'), ((21882, 21893), 'time.time', 'time.time', ([], {}), '()\n', (21891, 21893), False, 'import time\n'), ((22342, 22364), 'zipfile.ZipFile', 'ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (22349, 22364), False, 'from zipfile import ZipFile\n'), ((22548, 22601), 'os.path.join', 'os.path.join', (['download_path', "('dataset.' + file_format)"], {}), "(download_path, 'dataset.' + file_format)\n", (22560, 22601), False, 'import os\n'), ((22614, 22663), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'tar_path'], {'reporthook': 'reporthook'}), '(url, tar_path, reporthook=reporthook)\n', (22625, 22663), False, 'from urllib.request import urlretrieve\n'), ((23275, 23294), 'os.remove', 'os.remove', (['tar_path'], {}), '(tar_path)\n', (23284, 23294), False, 'import os\n'), ((2642, 2681), 'os.path.join', 'os.path.join', (['self.path', '"""images/S1/L1"""'], {}), "(self.path, 'images/S1/L1')\n", (2654, 2681), False, 'import os\n'), ((2973, 3012), 'os.path.join', 'os.path.join', (['self.path', '"""images/S1/L2"""'], {}), "(self.path, 'images/S1/L2')\n", (2985, 3012), False, 'import os\n'), ((3304, 3343), 'os.path.join', 'os.path.join', (['self.path', '"""images/S2/L1"""'], {}), "(self.path, 'images/S2/L1')\n", (3316, 3343), False, 'import os\n'), ((3635, 3674), 'os.path.join', 'os.path.join', (['self.path', '"""images/S2/L2"""'], {}), "(self.path, 'images/S2/L2')\n", (3647, 3674), False, 'import os\n'), ((3966, 4005), 'os.path.join', 'os.path.join', (['self.path', '"""images/S2/L3"""'], {}), "(self.path, 'images/S2/L3')\n", (3978, 4005), False, 'import os\n'), ((4297, 4347), 'os.path.join', 'os.path.join', (['self.path', '"""images/S3/Multiple_Flow"""'], {}), "(self.path, 'images/S3/Multiple_Flow')\n", (4309, 4347), False, 'import os\n'), ((4660, 4754), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('pets_' + self.dataset_sets[self.split] + '.json')"], {}), "(self.path, 'annotations', 'pets_' + self.dataset_sets[self.\n split] + '.json')\n", (4672, 4754), False, 'import os\n'), ((5418, 5458), 'opendr.perception.object_detection_2d.ssd.ssd_learner.SingleShotDetectorLearner', 'SingleShotDetectorLearner', ([], {'device': 'device'}), '(device=device)\n', (5443, 5458), False, 'from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner\n'), ((7415, 7456), 'tqdm.tqdm', 'tqdm', ([], {'desc': 'pbarDesc', 'total': 'number_samples'}), '(desc=pbarDesc, total=number_samples)\n', (7419, 7456), False, 'from tqdm import tqdm\n'), ((13049, 13068), 'pickle.load', 'pickle.load', (['fp_pkl'], {}), '(fp_pkl)\n', (13060, 13068), False, 'import pickle\n'), ((14067, 14107), 'opendr.perception.object_detection_2d.ssd.ssd_learner.SingleShotDetectorLearner', 'SingleShotDetectorLearner', ([], {'device': 'device'}), '(device=device)\n', (14092, 14107), False, 'from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner\n'), ((14709, 14737), 'os.path.exists', 'os.path.exists', (['pkl_filename'], {}), '(pkl_filename)\n', (14723, 14737), False, 'import os\n'), ((20446, 20488), 'os.path.join', 'os.path.join', (['self.path', '"""test_module.pkl"""'], {}), "(self.path, 'test_module.pkl')\n", (20458, 20488), False, 'import os\n'), ((21822, 21833), 'time.time', 'time.time', ([], {}), '()\n', (21831, 21833), False, 'import time\n'), ((2864, 2897), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (2876, 2897), False, 'import os\n'), ((3195, 3228), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (3207, 3228), False, 'import os\n'), ((3526, 3559), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (3538, 3559), False, 'import os\n'), ((3857, 3890), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (3869, 3890), False, 'import os\n'), ((4188, 4221), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (4200, 4221), False, 'import os\n'), ((4530, 4563), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (4542, 4563), False, 'import os\n'), ((4902, 4940), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""'], {}), "(self.path, 'annotations')\n", (4914, 4940), False, 'import os\n'), ((5646, 5751), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""', "('PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl')"], {}), "(self.path, 'detections', 'PETS-' + self.dataset_sets[self.\n split] + '_siyudpm_dets.idl')\n", (5658, 5751), False, 'import os\n'), ((6110, 6203), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('PETS-' + self.dataset_sets[self.split] + '.idl')"], {}), "(self.path, 'annotations', 'PETS-' + self.dataset_sets[self.\n split] + '.idl')\n", (6122, 6203), False, 'import os\n'), ((6490, 6583), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('PETS-' + self.dataset_sets[self.split] + '.idl')"], {}), "(self.path, 'annotations', 'PETS-' + self.dataset_sets[self.\n split] + '.idl')\n", (6502, 6583), False, 'import os\n'), ((12869, 12937), 'pickle.dump', 'pickle.dump', (['self.src_data', 'handle'], {'protocol': 'pickle.DEFAULT_PROTOCOL'}), '(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)\n', (12880, 12937), False, 'import pickle\n'), ((14258, 14293), 'os.path.join', 'os.path.join', (['self.path', 'imgs_split'], {}), '(self.path, imgs_split)\n', (14270, 14293), False, 'import os\n'), ((16795, 16828), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (16806, 16828), False, 'import pickle\n'), ((18474, 18494), 'numpy.asarray', 'np.asarray', (['dt_boxes'], {}), '(dt_boxes)\n', (18484, 18494), True, 'import numpy as np\n'), ((19626, 19638), 'gc.collect', 'gc.collect', ([], {}), '()\n', (19636, 19638), False, 'import gc\n'), ((19716, 19784), 'pickle.dump', 'pickle.dump', (['self.src_data', 'handle'], {'protocol': 'pickle.DEFAULT_PROTOCOL'}), '(self.src_data, handle, protocol=pickle.DEFAULT_PROTOCOL)\n', (19727, 19784), False, 'import pickle\n'), ((19896, 19915), 'pickle.load', 'pickle.load', (['fp_pkl'], {}), '(fp_pkl)\n', (19907, 19915), False, 'import pickle\n'), ((20508, 20536), 'os.path.exists', 'os.path.exists', (['pkl_filename'], {}), '(pkl_filename)\n', (20522, 20536), False, 'import os\n'), ((20905, 20924), 'pickle.load', 'pickle.load', (['fp_pkl'], {}), '(fp_pkl)\n', (20916, 20924), False, 'import pickle\n'), ((5938, 5975), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""'], {}), "(self.path, 'detections')\n", (5950, 5975), False, 'import os\n'), ((6354, 6392), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""'], {}), "(self.path, 'annotations')\n", (6366, 6392), False, 'import os\n'), ((6755, 6860), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""', "('PETS-' + self.dataset_sets[self.split] + '_siyudpm_dets.idl')"], {}), "(self.path, 'detections', 'PETS-' + self.dataset_sets[self.\n split] + '_siyudpm_dets.idl')\n", (6767, 6860), False, 'import os\n'), ((10191, 10238), 'os.path.join', 'os.path.join', (['self.path', '"""images/"""', 'filename_gt'], {}), "(self.path, 'images/', filename_gt)\n", (10203, 10238), False, 'import os\n'), ((12771, 12783), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12781, 12783), False, 'import gc\n'), ((14431, 14454), 'os.path.join', 'os.path.join', (['self.path'], {}), '(self.path)\n', (14443, 14454), False, 'import os\n'), ((14777, 14880), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('instances_' + self.dataset_sets[self.split] + '2014.json')"], {}), "(self.path, 'annotations', 'instances_' + self.dataset_sets[\n self.split] + '2014.json')\n", (14789, 14880), False, 'import os\n'), ((16054, 16162), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""', "('coco_2014_' + self.dataset_sets[self.split] + '_FRCN_train.pkl')"], {}), "(self.path, 'detections', 'coco_2014_' + self.dataset_sets[self\n .split] + '_FRCN_train.pkl')\n", (16066, 16162), False, 'import os\n'), ((16604, 16712), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""', "('coco_2014_' + self.dataset_sets[self.split] + '_FRCN_train.pkl')"], {}), "(self.path, 'detections', 'coco_2014_' + self.dataset_sets[self\n .split] + '_FRCN_train.pkl')\n", (16616, 16712), False, 'import os\n'), ((16875, 16978), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""', "('instances_' + self.dataset_sets[self.split] + '2014.json')"], {}), "(self.path, 'annotations', 'instances_' + self.dataset_sets[\n self.split] + '2014.json')\n", (16887, 16978), False, 'import os\n'), ((17348, 17406), 'os.path.join', 'os.path.join', (['self.path', 'imgs_split', "img_info['file_name']"], {}), "(self.path, imgs_split, img_info['file_name'])\n", (17360, 17406), False, 'import os\n'), ((18850, 18870), 'numpy.asarray', 'np.asarray', (['gt_boxes'], {}), '(gt_boxes)\n', (18860, 18870), True, 'import numpy as np\n'), ((10958, 10987), 'opendr.perception.object_detection_2d.datasets.transforms.BoundingBoxListToNumpyArray', 'BoundingBoxListToNumpyArray', ([], {}), '()\n', (10985, 10987), False, 'from opendr.perception.object_detection_2d.datasets.transforms import BoundingBoxListToNumpyArray\n'), ((11986, 12021), 'os.path.join', 'os.path.join', (['"""images"""', 'filename_gt'], {}), "('images', filename_gt)\n", (11998, 12021), False, 'import os\n'), ((16466, 16503), 'os.path.join', 'os.path.join', (['self.path', '"""detections"""'], {}), "(self.path, 'detections')\n", (16478, 16503), False, 'import os\n'), ((19193, 19240), 'os.path.join', 'os.path.join', (['imgs_split', "img_info['file_name']"], {}), "(imgs_split, img_info['file_name'])\n", (19205, 19240), False, 'import os\n'), ((11350, 11462), 'numpy.array', 'np.array', (['[bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2], bboxes_list[b, 3],\n bboxes_list[b, 4][0]]'], {}), '([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],\n bboxes_list[b, 3], bboxes_list[b, 4][0]])\n', (11358, 11462), True, 'import numpy as np\n'), ((12137, 12151), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (12147, 12151), True, 'import numpy as np\n'), ((12153, 12173), 'numpy.asarray', 'np.asarray', (['gt_boxes'], {}), '(gt_boxes)\n', (12163, 12173), True, 'import numpy as np\n'), ((12217, 12231), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (12227, 12231), True, 'import numpy as np\n'), ((12233, 12253), 'numpy.asarray', 'np.asarray', (['dt_boxes'], {}), '(dt_boxes)\n', (12243, 12253), True, 'import numpy as np\n'), ((15265, 15288), 'os.path.join', 'os.path.join', (['self.path'], {}), '(self.path)\n', (15277, 15288), False, 'import os\n'), ((15902, 15940), 'os.path.join', 'os.path.join', (['self.path', '"""annotations"""'], {}), "(self.path, 'annotations')\n", (15914, 15940), False, 'import os\n'), ((17789, 17818), 'opendr.perception.object_detection_2d.datasets.transforms.BoundingBoxListToNumpyArray', 'BoundingBoxListToNumpyArray', ([], {}), '()\n', (17816, 17818), False, 'from opendr.perception.object_detection_2d.datasets.transforms import BoundingBoxListToNumpyArray\n'), ((19358, 19372), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (19368, 19372), True, 'import numpy as np\n'), ((19422, 19436), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (19432, 19436), True, 'import numpy as np\n'), ((11136, 11165), 'numpy.argsort', 'np.argsort', (['bboxes_list[:, 4]'], {}), '(bboxes_list[:, 4])\n', (11146, 11165), True, 'import numpy as np\n'), ((18279, 18391), 'numpy.array', 'np.array', (['[bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2], bboxes_list[b, 3],\n bboxes_list[b, 4][0]]'], {}), '([bboxes_list[b, 0], bboxes_list[b, 1], bboxes_list[b, 2],\n bboxes_list[b, 3], bboxes_list[b, 4][0]])\n', (18287, 18391), True, 'import numpy as np\n'), ((20702, 20725), 'os.path.join', 'os.path.join', (['self.path'], {}), '(self.path)\n', (20714, 20725), False, 'import os\n'), ((18073, 18102), 'numpy.argsort', 'np.argsort', (['bboxes_list[:, 4]'], {}), '(bboxes_list[:, 4])\n', (18083, 18102), True, 'import numpy as np\n')]
|
import json
from types import MappingProxyType
from typing import Any, Dict, Mapping, Type, TypeVar, Union
from typing_extensions import Protocol
from mashumaro.serializer.base import DataClassDictMixin
DEFAULT_DICT_PARAMS = {
"use_bytes": False,
"use_enum": False,
"use_datetime": False,
}
EncodedData = Union[str, bytes, bytearray]
T = TypeVar("T", bound="DataClassJSONMixin")
class Encoder(Protocol): # pragma no cover
def __call__(self, obj, **kwargs) -> EncodedData:
...
class Decoder(Protocol): # pragma no cover
def __call__(self, s: EncodedData, **kwargs) -> Dict[Any, Any]:
...
class DataClassJSONMixin(DataClassDictMixin):
def to_json(
self: T,
encoder: Encoder = json.dumps,
dict_params: Mapping = MappingProxyType({}),
**encoder_kwargs,
) -> EncodedData:
return encoder(
self.to_dict(**dict(DEFAULT_DICT_PARAMS, **dict_params)),
**encoder_kwargs,
)
@classmethod
def from_json(
cls: Type[T],
data: EncodedData,
decoder: Decoder = json.loads,
dict_params: Mapping = MappingProxyType({}),
**decoder_kwargs,
) -> T:
return cls.from_dict(
decoder(data, **decoder_kwargs),
**dict(DEFAULT_DICT_PARAMS, **dict_params),
)
|
[
"typing.TypeVar",
"types.MappingProxyType"
] |
[((353, 393), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '"""DataClassJSONMixin"""'}), "('T', bound='DataClassJSONMixin')\n", (360, 393), False, 'from typing import Any, Dict, Mapping, Type, TypeVar, Union\n'), ((784, 804), 'types.MappingProxyType', 'MappingProxyType', (['{}'], {}), '({})\n', (800, 804), False, 'from types import MappingProxyType\n'), ((1145, 1165), 'types.MappingProxyType', 'MappingProxyType', (['{}'], {}), '({})\n', (1161, 1165), False, 'from types import MappingProxyType\n')]
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-V", "--version", help="show program version", action="store_true")
args = parser.parse_args()
if args.version:
print("Version 0.1")
|
[
"argparse.ArgumentParser"
] |
[((26, 51), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (49, 51), False, 'import argparse\n')]
|
import glob
import json
import pandas as pd
from crypto_balancer.dummy_exchange import DummyExchange
LIMITS = {'BNB/BTC': {'amount': {'max': 90000000.0, 'min': 0.01},
'cost': {'max': None, 'min': 0.001},
'price': {'max': None, 'min': None}},
'BNB/ETH': {'amount': {'max': 90000000.0, 'min': 0.01},
'cost': {'max': None, 'min': 0.01},
'price': {'max': None, 'min': None}},
'BNB/USD': {'amount': {'max': 10000000.0, 'min': 0.01},
'cost': {'max': None, 'min': 10.0},
'price': {'max': None, 'min': None}},
'BTC/USD': {'amount': {'max': 10000000.0, 'min': 1e-06},
'cost': {'max': None, 'min': 10.0},
'price': {'max': None, 'min': None}},
'ETH/BTC': {'amount': {'max': 100000.0, 'min': 0.001},
'cost': {'max': None, 'min': 0.001},
'price': {'max': None, 'min': None}},
'ETH/USD': {'amount': {'max': 10000000.0, 'min': 1e-05},
'cost': {'max': None, 'min': 10.0},
'price': {'max': None, 'min': None}},
'XRP/BNB': {'amount': {'max': 90000000.0, 'min': 0.1},
'cost': {'max': None, 'min': 1.0},
'price': {'max': None, 'min': None}},
'XRP/BTC': {'amount': {'max': 90000000.0, 'min': 1.0},
'cost': {'max': None, 'min': 0.001},
'price': {'max': None, 'min': None}},
'XRP/ETH': {'amount': {'max': 90000000.0, 'min': 1.0},
'cost': {'max': None, 'min': 0.01},
'price': {'max': None, 'min': None}},
'XRP/USD': {'amount': {'max': 90000000.0, 'min': 0.1},
'cost': {'max': None, 'min': 1.0},
'price': {'max': None, 'min': None}},
'XLM/USD': {'amount': {'max': 90000000.0, 'min': 0.1},
'cost': {'max': None, 'min': 1.0},
'price': {'max': None, 'min': None}},
'XLM/XRP': {'amount': {'max': 90000000.0, 'min': 0.1},
'cost': {'max': None, 'min': 1.0},
'price': {'max': None, 'min': None}}}
class BacktestExchange(DummyExchange):
def __init__(self, filenames, balances, fee=0.001):
self.name = 'BacktestExchange'
self._currencies = balances.keys()
final_df = pd.DataFrame()
for path in glob.glob(filenames):
filename = path.split('/')[-1]
pair = filename.split('.')[0]
pair = pair.replace('-','/')
self.pairs.append(pair)
data = json.load(open(path, 'r'))#['Data']
df = pd.DataFrame(data)
df.set_index(pd.to_datetime(df['time'], unit='s'), inplace=True)
df = df[~df.index.duplicated()]
final_df[pair] = df['close']
# print('loaded', pair)
final_df.fillna(method='ffill', inplace=True)
# self._iter = final_df['2018-01-08':].iterrows()
# self._iter = final_df['2017-09-11':].iterrows()
# self._iter = final_df[:'2018-12-30'].iterrows()
# self._iter = final_df['2018-09-11':'2018-12-30'].iterrows()
self._iter = final_df.iterrows()
self._rates = {}
self._balances = balances
self._fee = fee
self.tick()
def tick(self):
self._rates = dict(next(self._iter)[1])
@property
def limits(self):
return LIMITS
|
[
"pandas.DataFrame",
"pandas.to_datetime",
"glob.glob"
] |
[((2520, 2534), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2532, 2534), True, 'import pandas as pd\n'), ((2555, 2575), 'glob.glob', 'glob.glob', (['filenames'], {}), '(filenames)\n', (2564, 2575), False, 'import glob\n'), ((2811, 2829), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2823, 2829), True, 'import pandas as pd\n'), ((2855, 2891), 'pandas.to_datetime', 'pd.to_datetime', (["df['time']"], {'unit': '"""s"""'}), "(df['time'], unit='s')\n", (2869, 2891), True, 'import pandas as pd\n')]
|
import subprocess
from distutils.version import StrictVersion
from platform import mac_ver
try:
from munkicon import plist
from munkicon import worker
except ImportError:
from .munkicon import plist
from .munkicon import worker
# Keys: 'user_home_path'
# 'secure_token'
# 'volume_owners'
class UserAccounts(object):
def __init__(self):
self.conditions = self._process()
def _users(self):
"""Users."""
result = set()
_ignore_users = ['daemon',
'nobody',
'root']
_cmd = ['/usr/bin/dscl', '.', '-list', '/Users']
_p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_r, _e = _p.communicate()
if _p.returncode == 0:
if isinstance(_r, bytes):
_r = _r.decode('utf-8').strip()
for _u in _r.splitlines():
if not _u.startswith('_'):
if _u not in _ignore_users:
result.add(_u)
return result
def _home_dirs(self):
"""Home Directories"""
result = {'user_home_path': list()}
_users = self._users()
_home_dirs = set()
if _users:
for _u in _users:
_cmd = ['/usr/bin/dscl', '-plist', '.', '-read', '/Users/{}'.format(_u), 'NFSHomeDirectory']
_p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_r, _e = _p.communicate()
if _p.returncode == 0:
if isinstance(_r, bytes):
_r = _r.decode('utf-8').strip()
if _r:
_h = plist.readPlistFromString(_r)['dsAttrTypeStandard:NFSHomeDirectory']
if _h:
try:
_r = '{},{}'.format(_u, _h[0].strip())
except IndexError:
_r = '{},{}'.format(_u, _h.strip())
_home_dirs.add(_r)
result['user_home_path'] = list(_home_dirs)
return result
def _secure_tokens(self):
"""Determine SecureToken status for user."""
result = {'secure_token': list()}
_users = self._users()
if _users and StrictVersion(mac_ver()[0]) >= StrictVersion('10.14'):
for _u in _users:
_status = 'DISABLED'
_cmd = ['/usr/sbin/sysadminctl', '-secureTokenStatus', _u]
_p = subprocess.Popen(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_r, _e = _p.communicate()
if _p.returncode == 0:
# Output is on stderr, not stdout
if isinstance(_e, bytes):
_e = _e.decode('utf-8').strip()
if 'ENABLED' in _e:
_status = 'ENABLED'
result['secure_token'].append('{},{}'.format(_u, _status))
else:
pass
return result
def _user_guids(self):
result = dict()
_users = self._users()
for _user in _users:
_cmd = ['/usr/bin/dscl', '.', '-read', f'/Users/{_user}', 'GeneratedUID']
_p = subprocess.run(_cmd, capture_output=True, encoding='utf-8')
if _p.returncode == 0 and _p.stdout:
_uid = _p.stdout.strip().replace('GeneratedUID: ', '')
result[_uid] = _user
return result
def _volume_owners(self):
"""Determine volume owners on APFS disks"""
result = {'volume_owners': list()}
_users_with_uid = self._user_guids()
_vol_own_users = set()
_cmd = ['/usr/sbin/diskutil', 'apfs', 'listUsers', '/', '-plist']
_p = subprocess.run(_cmd, capture_output=True)
if _p.returncode == 0 and _p.stdout:
try:
_users = plist.readPlistFromString(_p.stdout.strip())['Users']
for _user in _users:
_apfs_crypto_type = _user.get('APFSCryptoUserType')
_apfs_crypto_uuid = _user.get('APFSCryptoUserUUID')
_is_volume_owner = _user.get('VolumeOwner')
_hr_user_name = _users_with_uid.get(_apfs_crypto_uuid)
if _is_volume_owner and _apfs_crypto_type != 'PersonalRecovery':
_vol_own_users.add(_hr_user_name)
except Exception:
pass
result['volume_owners'] = sorted(list(_vol_own_users))
return result
def _process(self):
"""Process all conditions and generate the condition dictionary."""
result = dict()
result.update(self._home_dirs())
result.update(self._secure_tokens())
result.update(self._volume_owners())
return result
def runner(dest):
users = UserAccounts()
mc = worker.MunkiConWorker(conditions_file=dest, log_src=__file__)
mc.write(conditions=users.conditions)
|
[
"subprocess.run",
"subprocess.Popen",
"distutils.version.StrictVersion",
"munkicon.worker.MunkiConWorker",
"munkicon.plist.readPlistFromString",
"platform.mac_ver"
] |
[((4991, 5052), 'munkicon.worker.MunkiConWorker', 'worker.MunkiConWorker', ([], {'conditions_file': 'dest', 'log_src': '__file__'}), '(conditions_file=dest, log_src=__file__)\n', (5012, 5052), False, 'from munkicon import worker\n'), ((658, 728), 'subprocess.Popen', 'subprocess.Popen', (['_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (674, 728), False, 'import subprocess\n'), ((3868, 3909), 'subprocess.run', 'subprocess.run', (['_cmd'], {'capture_output': '(True)'}), '(_cmd, capture_output=True)\n', (3882, 3909), False, 'import subprocess\n'), ((3337, 3396), 'subprocess.run', 'subprocess.run', (['_cmd'], {'capture_output': '(True)', 'encoding': '"""utf-8"""'}), "(_cmd, capture_output=True, encoding='utf-8')\n", (3351, 3396), False, 'import subprocess\n'), ((1417, 1487), 'subprocess.Popen', 'subprocess.Popen', (['_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (1433, 1487), False, 'import subprocess\n'), ((2385, 2407), 'distutils.version.StrictVersion', 'StrictVersion', (['"""10.14"""'], {}), "('10.14')\n", (2398, 2407), False, 'from distutils.version import StrictVersion\n'), ((2573, 2643), 'subprocess.Popen', 'subprocess.Popen', (['_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (2589, 2643), False, 'import subprocess\n'), ((2368, 2377), 'platform.mac_ver', 'mac_ver', ([], {}), '()\n', (2375, 2377), False, 'from platform import mac_ver\n'), ((1729, 1758), 'munkicon.plist.readPlistFromString', 'plist.readPlistFromString', (['_r'], {}), '(_r)\n', (1754, 1758), False, 'from munkicon import plist\n')]
|
#!/usr/bin/env python3
import os
from itertools import chain
from collections import Counter
import argparse
import gatenlphiltlab
relators = [
"because",
"cuz",
"since",
"after",
"when",
"whenever",
"once",
"therefore",
"so",
"if",
"soon",
"result",
"results",
"resulted",
"resulting",
"cause",
"causes",
"caused",
"causing",
"starts",
"start",
"starts",
"started",
"starting",
"make",
"makes",
"made",
"making",
"precipitate",
"precipitates",
"precipitated",
"precipitating",
"lead",
"leads",
"led",
"produce",
"produces",
"produced",
"producing",
"provoke",
"provokes",
"provoked",
"provoking",
"breeds",
"breeds",
"bred",
"breeding",
"induce",
"induces",
"induced",
"inducing",
"create",
"creates",
"created",
"creating",
"effect",
"effects",
"effected",
"effecting",
]
parser = argparse.ArgumentParser(
description="Annotates causal connectives within GATE annotation files"
)
parser.add_argument(
"-i",
"--annotation-file",
dest="annotation_files",
nargs="+",
required="true",
help="GATE annotation files"
)
args = parser.parse_args()
for annotation_file_path in args.annotation_files:
annotation_file = gatenlphiltlab.AnnotationFile(annotation_file_path)
EAU_heuristics_set = annotation_file.create_annotation_set("EAU_heuristics")
tokens = [
annotation
for annotation in annotation_file.annotations
if annotation.type.lower() == "token"
]
for token in tokens:
# if token.text.lower() in relators:
if token.text.lower() == "because":
EAU_heuristics_set.create_annotation(
annotation_type="possible_causal_connective",
start=token.start_node,
end=token.end_node,
)
annotation_file.save_changes()
|
[
"argparse.ArgumentParser",
"gatenlphiltlab.AnnotationFile"
] |
[((1023, 1124), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Annotates causal connectives within GATE annotation files"""'}), "(description=\n 'Annotates causal connectives within GATE annotation files')\n", (1046, 1124), False, 'import argparse\n'), ((1383, 1434), 'gatenlphiltlab.AnnotationFile', 'gatenlphiltlab.AnnotationFile', (['annotation_file_path'], {}), '(annotation_file_path)\n', (1412, 1434), False, 'import gatenlphiltlab\n')]
|
import sys
import os
sys.path.append(snakemake.config['paths']['mcc_path'])
import scripts.mccutils as mccutils
def main():
download_success = mccutils.download(snakemake.params.url, snakemake.output[0], md5=snakemake.params.md5, max_attempts=3)
if not download_success:
print("popoolationTE2 download failed... exiting...")
print("try running --install with --clean for clean installation")
sys.exit(1)
if __name__ == "__main__":
main()
|
[
"sys.path.append",
"scripts.mccutils.download",
"sys.exit"
] |
[((21, 75), 'sys.path.append', 'sys.path.append', (["snakemake.config['paths']['mcc_path']"], {}), "(snakemake.config['paths']['mcc_path'])\n", (36, 75), False, 'import sys\n'), ((148, 255), 'scripts.mccutils.download', 'mccutils.download', (['snakemake.params.url', 'snakemake.output[0]'], {'md5': 'snakemake.params.md5', 'max_attempts': '(3)'}), '(snakemake.params.url, snakemake.output[0], md5=snakemake.\n params.md5, max_attempts=3)\n', (165, 255), True, 'import scripts.mccutils as mccutils\n'), ((425, 436), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (433, 436), False, 'import sys\n')]
|
import flask
import random
import sys
import os
import glob
import re
from pathlib import Path
import pickle
import numpy as np
# Import fast.ai Library
from fastai import *
from fastai.vision import *
# Flask utils
from flask import Flask, redirect, url_for, request, render_template,jsonify
from werkzeug.utils import secure_filename
app = flask.Flask(__name__)
UPLOAD_FOLDER = './UPLOAD_FOLDER/'
path=Path("path")
classes = ['stress', 'non-stress']
learn=load_learner(path,'a.pkl')
with open('classifier_pickle','rb') as f:
cls=pickle.load(f)
label_dictionary = {0: 'Healthy Plant', 1: 'Stress but recoverable',2:'Cannot Recover'}
def model_predict(img_path):
"""model_predict will return the preprocessed image
"""
img = open_image(img_path)
pred_class,pred_idx,outputs = learn.predict(img)
return pred_class
@app.route('/upload', methods = ['GET', 'POST'])
def handle_request():
print("hello");
imagefile = flask.request.files['image']
print("hello", flask.request);
filename = UPLOAD_FOLDER + str(random.randint(0, 5000)) + '.png'
#filename = werkzeug.utils.secure_filename(imagefile.filename)
#filename= "photo.jpg";
print("\nReceived image File name : " + imagefile.filename)
imagefile.save(filename)
preds=model_predict(filename)
print(type(preds))
return str(preds)
@app.route('/calculate', methods = ['GET', 'POST'])
def handle_response():
print("Hello");
# getting the data from a separate json file.
json = request.get_json()
# the keys that should be included in the json file.
transaction_keys = ['tdry' , 'twet', 'tcanopy', 'timeDay']
# return a error message if a key is not included in the file.
#stringValues= flask.request.values.get['dry', 'wet', 'canopy', 'time']
#print("Hello", flask.request);
a=json[transaction_keys[0]]
print(a)
b=json[transaction_keys[1]]
print(b)
c=json[transaction_keys[2]]
print(c)
d=json[transaction_keys[3]]
print(d)
pred=np.array([[a,b,c,d]])
pr=cls.predict(pred)
print(pr)
return jsonify(label_dictionary[int(pr)])
#ans=label_dictionary[int(pr)]
#print(ans)
#return ans
app.run(host="127.0.0.1",port=5000, debug=True)
|
[
"random.randint",
"flask.Flask",
"pathlib.Path",
"pickle.load",
"numpy.array",
"flask.request.get_json"
] |
[((346, 367), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (357, 367), False, 'import flask\n'), ((409, 421), 'pathlib.Path', 'Path', (['"""path"""'], {}), "('path')\n", (413, 421), False, 'from pathlib import Path\n'), ((544, 558), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (555, 558), False, 'import pickle\n'), ((1508, 1526), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1524, 1526), False, 'from flask import Flask, redirect, url_for, request, render_template, jsonify\n'), ((1990, 2014), 'numpy.array', 'np.array', (['[[a, b, c, d]]'], {}), '([[a, b, c, d]])\n', (1998, 2014), True, 'import numpy as np\n'), ((1054, 1077), 'random.randint', 'random.randint', (['(0)', '(5000)'], {}), '(0, 5000)\n', (1068, 1077), False, 'import random\n')]
|
from __future__ import absolute_import, print_function, unicode_literals
from gripql.graph import Graph
from gripql.util import BaseConnection, raise_for_status
class Connection(BaseConnection):
def __init__(self, url, user=None, password=None, token=None, credential_file=None):
super(Connection, self).__init__(url, user, password, token, credential_file)
self.url = self.base_url + "/v1/graph"
def listGraphs(self):
"""
List graphs.
"""
response = self.session.get(
self.url
)
raise_for_status(response)
return response.json()['graphs']
def addGraph(self, name):
"""
Create a new graph.
"""
response = self.session.post(
self.url + "/" + name,
{}
)
raise_for_status(response)
return response.json()
def deleteGraph(self, name):
"""
Delete graph.
"""
response = self.session.delete(
self.url + "/" + name
)
raise_for_status(response)
return response.json()
def getSchema(self, name):
"""
Get a graph schema.
"""
response = self.session.get(
self.url + "/" + name + "/schema"
)
raise_for_status(response)
return response.json()
def graph(self, name):
"""
Get a graph handle.
"""
return Graph(self.base_url, name, self.user, self.password, self.token, self.credential_file)
|
[
"gripql.graph.Graph",
"gripql.util.raise_for_status"
] |
[((568, 594), 'gripql.util.raise_for_status', 'raise_for_status', (['response'], {}), '(response)\n', (584, 594), False, 'from gripql.util import BaseConnection, raise_for_status\n'), ((825, 851), 'gripql.util.raise_for_status', 'raise_for_status', (['response'], {}), '(response)\n', (841, 851), False, 'from gripql.util import BaseConnection, raise_for_status\n'), ((1055, 1081), 'gripql.util.raise_for_status', 'raise_for_status', (['response'], {}), '(response)\n', (1071, 1081), False, 'from gripql.util import BaseConnection, raise_for_status\n'), ((1298, 1324), 'gripql.util.raise_for_status', 'raise_for_status', (['response'], {}), '(response)\n', (1314, 1324), False, 'from gripql.util import BaseConnection, raise_for_status\n'), ((1451, 1542), 'gripql.graph.Graph', 'Graph', (['self.base_url', 'name', 'self.user', 'self.password', 'self.token', 'self.credential_file'], {}), '(self.base_url, name, self.user, self.password, self.token, self.\n credential_file)\n', (1456, 1542), False, 'from gripql.graph import Graph\n')]
|
#!/usr/bin/python3
"""This module defines a class to manage file storage for hbnb clone"""
import json
class FileStorage:
"""This class manages storage of hbnb models in JSON format"""
__file_path = 'file.json'
__objects = {}
def all(self, cls=None):
"""Returns a dictionary of models currently in storage"""
if cls is not None:
if type(cls) == str:
cls = eval(cls)
new_dict = {}
for key, value in self.__objects.items():
# if self.__class__.__name__ == cls:
if type(value) == cls:
new_dict[key] = value
return new_dict
else:
return self.__objects
def new(self, obj):
"""Adds new object to storage dictionary"""
if obj:
key = "{}.{}".format(type(obj).__name__, obj.id)
self.__objects[key] = obj
def save(self):
"""Saves storage dictionary to file"""
with open(FileStorage.__file_path, 'w') as f:
temp = {}
temp.update(FileStorage.__objects)
for key, val in temp.items():
temp[key] = val.to_dict()
json.dump(temp, f)
def reload(self):
"""Loads storage dictionary from file"""
from models.base_model import BaseModel
from models.user import User
from models.place import Place
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.review import Review
classes = {
'BaseModel': BaseModel, 'User': User, 'Place': Place,
'State': State, 'City': City, 'Amenity': Amenity,
'Review': Review
}
try:
temp = {}
with open(FileStorage.__file_path, 'r') as f:
temp = json.load(f)
for key, val in temp.items():
self.all()[key] = classes[val['__class__']](**val)
except FileNotFoundError:
pass
def delete(self, obj=None):
"""Delete objects"""
if obj:
key = "{}.{}".format(type(obj).__name__, obj.id)
if self.__objects[key]:
del FileStorage.__objects[key]
self.save()
def close(self):
"""Method for deserializing the JSON file to objects"""
self.reload()
|
[
"json.dump",
"json.load"
] |
[((1198, 1216), 'json.dump', 'json.dump', (['temp', 'f'], {}), '(temp, f)\n', (1207, 1216), False, 'import json\n'), ((1911, 1923), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1920, 1923), False, 'import json\n')]
|
import time
from collections import defaultdict
from datetime import timedelta
import cvxpy as cp
import empiricalutilities as eu
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from transfer_entropy import TransferEntropy
plt.style.use('fivethirtyeight')
# %%
eqs = 'SPY DIA XLK XLV XLF IYZ XLY XLP XLI XLE XLU XME IYR XLB XPH IWM PHO ' \
'SOXX WOOD FDN GNR IBB ILF ITA IYT KIE PBW ' \
'AFK EZA ECH EWW EWC EWZ EEM EIDO EPOL EPP EWA EWD EWG EWH EWJ EWI EWK ' \
'EWL EWM EWP EWQ EWS EWT EWU EWY GXC HAO EZU RSX TUR'.split()
fi = 'AGG SHY IEI IEF TLT TIP LQD HYG MBB'.split()
cmdtys = 'GLD SLV DBA DBC USO UNG'.split()
fx = 'FXA FXB FXC FXE FXF FXY'.split()
assets = eqs + fi + cmdtys + fx
def cum_rets(rets):
cum_rets = []
cum_rets.append(1)
for i, ret in enumerate(rets):
cum_rets.append(cum_rets[i]*(1+ret))
return cum_rets
# %%
ete_mats = {}
mod = TransferEntropy(assets=assets)
period = 'Q'
months = mod.prices.index.to_period(period).unique().to_timestamp()
iters = len(months)-24
with tqdm(total=iters) as pbar:
for start, end in zip(months[:-1], months[1:]):
end -= timedelta(1)
mod.set_timeperiod(start, end)
mod.compute_effective_transfer_entropy(sims=30, bins=6,
std_threshold=1)
ete = mod.ete.copy()
ete_mats[start] = ete
pbar.update(1)
ete_df = pd.concat(ete_mats)
ete_df.to_csv(f'../ete_{period}.csv')
# %%
q = 4
res = defaultdict(dict)
mod = TransferEntropy(assets=assets)
iters = len(months)-1
for start, end in zip(months[:-1], months[1:]):
ete = ete_mats[start]
ete_out = ete.sum(axis=0)
ete_in = ete.sum(axis=1)
end -= timedelta(1)
mod.set_timeperiod(start, end)
returns = mod.prices.iloc[-1]/mod.prices.iloc[0]-1
vols = mod.data.std()
names = 'eteout etein etenetout etetotal'.split()
for name, ETE in zip(names, [ete_out, ete_in,
ete_out-ete_in, ete_in+ete_out]):
df = pd.DataFrame({'returns': returns, 'vol': vols, name: ETE})
df['q'] = pd.qcut(ETE, q=q, labels=False)
res[name][start] = df.groupby('q').agg('mean').reset_index().copy()
# %%
q_rets = {}
for name in names:
resdf = res[name]
resdf = pd.concat(resdf)
resdf.index = resdf.index.droplevel(1)
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
cmap = sns.color_palette('Blues_r', n_colors=4)
for c, qtile in zip(cmap, range(q)):
q_rets[qtile] = resdf[resdf['q']==qtile]['returns'].values
ax.plot(months, cum_rets(q_rets[qtile]), c=c,
lw=2, alpha=1, label=f'Quartile {qtile+1}')
fig.autofmt_xdate()
plt.ylabel('Cumulative Return')
plt.xlabel('Time')
plt.legend()
plt.tight_layout()
plt.savefig(f'../plots/{name}_quartile_returns.png', dpi=300)
eu.latex_figure(f'../data/plots/{name}_quartile_returns.png')
# %%
for name in names:
table = defaultdict(dict)
resdf = res[name]
resdf = pd.concat(resdf)
resdf.index = resdf.index.droplevel(1)
table
for qtile in range(q):
table[qtile]['r'] = resdf[resdf['q']==qtile]['returns'].mean()*12
table[qtile]['v'] = resdf[resdf['q']==qtile]['returns'].std()*np.sqrt(12)
table[qtile][name] = resdf[resdf['q']==qtile][name].mean()
table = pd.DataFrame.from_dict(table, orient='index')
table['sr'] = table['r']/table['v']
table = table.reset_index()
table = table[['index', 'r', 'v', 'sr', name]]
cols = 'Quartile Return Volatility Sharpe'.split()
cols += [name]
table.columns = cols
table['Quartile'] += 1
table[['Return', 'Volatility']] *= 100
eu.latex_print(table, prec=2, hide_index=True)
# %%
def get_CAPM_weights(er, cov, gamma):
n = cov.shape[0]
w = cp.Variable((n, 1))
gamma = cp.Parameter(nonneg=True, value=gamma)
ret = w.T @ er
risk = cp.quad_form(w, cov)
constraints = [
cp.sum(w) == 1,
w <= 0.1,
w >= 0,
ret >= 0.02,
]
obj = cp.Maximize(ret - gamma*risk)
prob = cp.Problem(obj, constraints)
prob.solve()
return w.value
def get_MV_weights(er, cov):
n = cov.shape[0]
w = cp.Variable((n, 1))
ret = w.T @ er
risk = cp.quad_form(w, cov)
constraints = [
cp.sum(w) == 1,
w <= 0.1,
w >= 0,
ret >= 0.02,
]
obj = cp.Minimize(risk)
prob = cp.Problem(obj, constraints)
prob.solve()
return w.value
def get_weights(er, start, ete):
n = len(ete)
w = cp.Variable((n, 1))
ret = w.T @ er
obj = cp.Minimize(w.T @ (ete))
constraints = [
cp.sum(w) == 1,
w <= 0.1,
w >= 0,
ret >= 0.02,
]
prob = cp.Problem(obj, constraints)
prob.solve()
return w.value
# %%
ete_mats = pd.read_csv('../ete_Q.csv', index_col=[0, 1], parse_dates=True,
infer_datetime_format=True)
ete_mats = ete_mats[assets].copy()
mod = TransferEntropy(assets=assets)
mo_df = mod.prices.resample('Q').last()
mo_ret_df = (mo_df/mo_df.shift(1).values-1).dropna()
EXP_RETS = mo_ret_df.mean().values.reshape(-1, 1)
e_perf = []
e_perf_ete = []
mv_perf = []
mv_perf_ete = []
capm = defaultdict(list)
capm_ete = defaultdict(list)
gammas = [0.1, 1, 10]
with tqdm(total=iters) as pbar:
for start, end in zip(months[:-1], months[1:]):
end -= timedelta(1)
mod.set_timeperiod(start, end)
# get month's returns, cov, and ete matrices
cov = np.cov(mod.data.values, rowvar=False)
ete_mat = ete_mats.loc[start]
ete_mat = ete_mat.T[assets].T.values.copy()
ete_out = ete_mat.sum(axis=0).reshape(-1, 1)
ete_in = ete_mat.sum(axis=1).reshape(-1, 1)
net_out = ete_out - ete_in
r = (mod.prices.iloc[-1]/mod.prices.iloc[0]-1).values
# get strategy weights
we = get_weights(EXP_RETS, start, net_out)
wmv = get_MV_weights(EXP_RETS, cov)
e_perf.append(np.squeeze(we.T @ r))
e_perf_ete.append(np.squeeze(we.T @ net_out))
mv_perf.append(np.squeeze(wmv.T @ r))
mv_perf_ete.append(np.squeeze(wmv.T @ net_out))
for gamma in gammas:
w_capm = get_CAPM_weights(EXP_RETS, cov, gamma)
capm[gamma].append(np.squeeze(w_capm.T @ r))
capm_ete[gamma].append(np.squeeze(w_capm.T @ net_out))
pbar.update(1)
# %%
alpha=0.75
lw=2
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
cmap2 = sns.color_palette('Reds_r', n_colors=len(gammas)*2)
ax.plot(months, cum_rets(e_perf), alpha=alpha,
label='ETE', lw=lw, c='steelblue')
ax.plot(months, cum_rets(mv_perf), alpha=alpha,
label='MV', lw=lw, c='forestgreen')
for i, gamma in enumerate(reversed(gammas[1:])):
ax.plot(months, cum_rets(capm[gamma]), alpha=alpha,
label=f'CAPM $\\gamma={gamma}$', lw=lw, c=cmap2[i])
fig.autofmt_xdate()
plt.ylabel('Cumulative Return')
plt.xlabel('Time')
plt.legend()
plt.tight_layout()
eu.save_fig(f'../plots/portfolio_comparison', dpi=300)
plt.show()
eu.latex_figure(f'../plots/portfolio_comparison')
# %%
tbl = pd.DataFrame({
'ETE': e_perf,
'MV': mv_perf,
'CAPM 1': capm[1],
'CAPM 10': capm[10],
}, index=months[1:])
tbl = (tbl.mean()*4).to_frame().join((tbl.std()*np.sqrt(4)).to_frame(),
rsuffix='vol')
tbl.columns = 'Return Volatility'.split()
tbl['Sharpe'] = tbl['Return']/tbl['Volatility']
tbl['Return'] *= 100
tbl['Volatility'] *= 100
tbl2 = pd.DataFrame({
'ETE': e_perf_ete,
'MV': mv_perf_ete,
'CAPM 1': capm_ete[1],
'CAPM 10': capm_ete[10],
}, index=months[1:])
tbl = tbl.join(tbl2.mean().to_frame())
tbl.columns = 'Return Volatility Sharpe ETE'.split()
eu.latex_print(tbl, prec=2)
|
[
"pandas.read_csv",
"collections.defaultdict",
"cvxpy.sum",
"matplotlib.pyplot.style.use",
"empiricalutilities.save_fig",
"matplotlib.pyplot.tight_layout",
"cvxpy.Maximize",
"cvxpy.quad_form",
"pandas.DataFrame",
"datetime.timedelta",
"cvxpy.Problem",
"empiricalutilities.latex_figure",
"pandas.qcut",
"numpy.cov",
"matplotlib.pyplot.subplots",
"pandas.concat",
"tqdm.tqdm",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.legend",
"cvxpy.Variable",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
"transfer_entropy.TransferEntropy",
"cvxpy.Minimize",
"cvxpy.Parameter",
"empiricalutilities.latex_print",
"seaborn.color_palette",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((293, 325), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (306, 325), True, 'import matplotlib.pyplot as plt\n'), ((959, 989), 'transfer_entropy.TransferEntropy', 'TransferEntropy', ([], {'assets': 'assets'}), '(assets=assets)\n', (974, 989), False, 'from transfer_entropy import TransferEntropy\n'), ((1466, 1485), 'pandas.concat', 'pd.concat', (['ete_mats'], {}), '(ete_mats)\n', (1475, 1485), True, 'import pandas as pd\n'), ((1541, 1558), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1552, 1558), False, 'from collections import defaultdict\n'), ((1566, 1596), 'transfer_entropy.TransferEntropy', 'TransferEntropy', ([], {'assets': 'assets'}), '(assets=assets)\n', (1581, 1596), False, 'from transfer_entropy import TransferEntropy\n'), ((4887, 4982), 'pandas.read_csv', 'pd.read_csv', (['"""../ete_Q.csv"""'], {'index_col': '[0, 1]', 'parse_dates': '(True)', 'infer_datetime_format': '(True)'}), "('../ete_Q.csv', index_col=[0, 1], parse_dates=True,\n infer_datetime_format=True)\n", (4898, 4982), True, 'import pandas as pd\n'), ((5025, 5055), 'transfer_entropy.TransferEntropy', 'TransferEntropy', ([], {'assets': 'assets'}), '(assets=assets)\n', (5040, 5055), False, 'from transfer_entropy import TransferEntropy\n'), ((5265, 5282), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5276, 5282), False, 'from collections import defaultdict\n'), ((5294, 5311), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5305, 5311), False, 'from collections import defaultdict\n'), ((6485, 6520), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 6)'}), '(1, 1, figsize=(10, 6))\n', (6497, 6520), True, 'import matplotlib.pyplot as plt\n'), ((6942, 6973), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Return"""'], {}), "('Cumulative Return')\n", (6952, 6973), True, 'import matplotlib.pyplot as plt\n'), ((6974, 6992), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (6984, 6992), True, 'import matplotlib.pyplot as plt\n'), ((6993, 7005), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7003, 7005), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7024), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7022, 7024), True, 'import matplotlib.pyplot as plt\n'), ((7025, 7079), 'empiricalutilities.save_fig', 'eu.save_fig', (['f"""../plots/portfolio_comparison"""'], {'dpi': '(300)'}), "(f'../plots/portfolio_comparison', dpi=300)\n", (7036, 7079), True, 'import empiricalutilities as eu\n'), ((7080, 7090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7088, 7090), True, 'import matplotlib.pyplot as plt\n'), ((7091, 7140), 'empiricalutilities.latex_figure', 'eu.latex_figure', (['f"""../plots/portfolio_comparison"""'], {}), "(f'../plots/portfolio_comparison')\n", (7106, 7140), True, 'import empiricalutilities as eu\n'), ((7152, 7258), 'pandas.DataFrame', 'pd.DataFrame', (["{'ETE': e_perf, 'MV': mv_perf, 'CAPM 1': capm[1], 'CAPM 10': capm[10]}"], {'index': 'months[1:]'}), "({'ETE': e_perf, 'MV': mv_perf, 'CAPM 1': capm[1], 'CAPM 10':\n capm[10]}, index=months[1:])\n", (7164, 7258), True, 'import pandas as pd\n'), ((7539, 7661), 'pandas.DataFrame', 'pd.DataFrame', (["{'ETE': e_perf_ete, 'MV': mv_perf_ete, 'CAPM 1': capm_ete[1], 'CAPM 10':\n capm_ete[10]}"], {'index': 'months[1:]'}), "({'ETE': e_perf_ete, 'MV': mv_perf_ete, 'CAPM 1': capm_ete[1],\n 'CAPM 10': capm_ete[10]}, index=months[1:])\n", (7551, 7661), True, 'import pandas as pd\n'), ((7793, 7820), 'empiricalutilities.latex_print', 'eu.latex_print', (['tbl'], {'prec': '(2)'}), '(tbl, prec=2)\n', (7807, 7820), True, 'import empiricalutilities as eu\n'), ((1100, 1117), 'tqdm.tqdm', 'tqdm', ([], {'total': 'iters'}), '(total=iters)\n', (1104, 1117), False, 'from tqdm import tqdm\n'), ((1766, 1778), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (1775, 1778), False, 'from datetime import timedelta\n'), ((2340, 2356), 'pandas.concat', 'pd.concat', (['resdf'], {}), '(resdf)\n', (2349, 2356), True, 'import pandas as pd\n'), ((2415, 2450), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 6)'}), '(1, 1, figsize=(10, 6))\n', (2427, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2462, 2502), 'seaborn.color_palette', 'sns.color_palette', (['"""Blues_r"""'], {'n_colors': '(4)'}), "('Blues_r', n_colors=4)\n", (2479, 2502), True, 'import seaborn as sns\n'), ((2750, 2781), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Return"""'], {}), "('Cumulative Return')\n", (2760, 2781), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2804), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2796, 2804), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2821), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2819, 2821), True, 'import matplotlib.pyplot as plt\n'), ((2826, 2844), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2842, 2844), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2910), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../plots/{name}_quartile_returns.png"""'], {'dpi': '(300)'}), "(f'../plots/{name}_quartile_returns.png', dpi=300)\n", (2860, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2915, 2976), 'empiricalutilities.latex_figure', 'eu.latex_figure', (['f"""../data/plots/{name}_quartile_returns.png"""'], {}), "(f'../data/plots/{name}_quartile_returns.png')\n", (2930, 2976), True, 'import empiricalutilities as eu\n'), ((3015, 3032), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3026, 3032), False, 'from collections import defaultdict\n'), ((3067, 3083), 'pandas.concat', 'pd.concat', (['resdf'], {}), '(resdf)\n', (3076, 3083), True, 'import pandas as pd\n'), ((3400, 3445), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['table'], {'orient': '"""index"""'}), "(table, orient='index')\n", (3422, 3445), True, 'import pandas as pd\n'), ((3742, 3788), 'empiricalutilities.latex_print', 'eu.latex_print', (['table'], {'prec': '(2)', 'hide_index': '(True)'}), '(table, prec=2, hide_index=True)\n', (3756, 3788), True, 'import empiricalutilities as eu\n'), ((3861, 3880), 'cvxpy.Variable', 'cp.Variable', (['(n, 1)'], {}), '((n, 1))\n', (3872, 3880), True, 'import cvxpy as cp\n'), ((3893, 3931), 'cvxpy.Parameter', 'cp.Parameter', ([], {'nonneg': '(True)', 'value': 'gamma'}), '(nonneg=True, value=gamma)\n', (3905, 3931), True, 'import cvxpy as cp\n'), ((3962, 3982), 'cvxpy.quad_form', 'cp.quad_form', (['w', 'cov'], {}), '(w, cov)\n', (3974, 3982), True, 'import cvxpy as cp\n'), ((4102, 4133), 'cvxpy.Maximize', 'cp.Maximize', (['(ret - gamma * risk)'], {}), '(ret - gamma * risk)\n', (4113, 4133), True, 'import cvxpy as cp\n'), ((4143, 4171), 'cvxpy.Problem', 'cp.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (4153, 4171), True, 'import cvxpy as cp\n'), ((4268, 4287), 'cvxpy.Variable', 'cp.Variable', (['(n, 1)'], {}), '((n, 1))\n', (4279, 4287), True, 'import cvxpy as cp\n'), ((4318, 4338), 'cvxpy.quad_form', 'cp.quad_form', (['w', 'cov'], {}), '(w, cov)\n', (4330, 4338), True, 'import cvxpy as cp\n'), ((4458, 4475), 'cvxpy.Minimize', 'cp.Minimize', (['risk'], {}), '(risk)\n', (4469, 4475), True, 'import cvxpy as cp\n'), ((4487, 4515), 'cvxpy.Problem', 'cp.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (4497, 4515), True, 'import cvxpy as cp\n'), ((4611, 4630), 'cvxpy.Variable', 'cp.Variable', (['(n, 1)'], {}), '((n, 1))\n', (4622, 4630), True, 'import cvxpy as cp\n'), ((4660, 4682), 'cvxpy.Minimize', 'cp.Minimize', (['(w.T @ ete)'], {}), '(w.T @ ete)\n', (4671, 4682), True, 'import cvxpy as cp\n'), ((4805, 4833), 'cvxpy.Problem', 'cp.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (4815, 4833), True, 'import cvxpy as cp\n'), ((5340, 5357), 'tqdm.tqdm', 'tqdm', ([], {'total': 'iters'}), '(total=iters)\n', (5344, 5357), False, 'from tqdm import tqdm\n'), ((1194, 1206), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (1203, 1206), False, 'from datetime import timedelta\n'), ((2082, 2140), 'pandas.DataFrame', 'pd.DataFrame', (["{'returns': returns, 'vol': vols, name: ETE}"], {}), "({'returns': returns, 'vol': vols, name: ETE})\n", (2094, 2140), True, 'import pandas as pd\n'), ((2159, 2190), 'pandas.qcut', 'pd.qcut', (['ETE'], {'q': 'q', 'labels': '(False)'}), '(ETE, q=q, labels=False)\n', (2166, 2190), True, 'import pandas as pd\n'), ((5434, 5446), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (5443, 5446), False, 'from datetime import timedelta\n'), ((5554, 5591), 'numpy.cov', 'np.cov', (['mod.data.values'], {'rowvar': '(False)'}), '(mod.data.values, rowvar=False)\n', (5560, 5591), True, 'import numpy as np\n'), ((3308, 3319), 'numpy.sqrt', 'np.sqrt', (['(12)'], {}), '(12)\n', (3315, 3319), True, 'import numpy as np\n'), ((4011, 4020), 'cvxpy.sum', 'cp.sum', (['w'], {}), '(w)\n', (4017, 4020), True, 'import cvxpy as cp\n'), ((4367, 4376), 'cvxpy.sum', 'cp.sum', (['w'], {}), '(w)\n', (4373, 4376), True, 'import cvxpy as cp\n'), ((4713, 4722), 'cvxpy.sum', 'cp.sum', (['w'], {}), '(w)\n', (4719, 4722), True, 'import cvxpy as cp\n'), ((6036, 6056), 'numpy.squeeze', 'np.squeeze', (['(we.T @ r)'], {}), '(we.T @ r)\n', (6046, 6056), True, 'import numpy as np\n'), ((6084, 6110), 'numpy.squeeze', 'np.squeeze', (['(we.T @ net_out)'], {}), '(we.T @ net_out)\n', (6094, 6110), True, 'import numpy as np\n'), ((6136, 6157), 'numpy.squeeze', 'np.squeeze', (['(wmv.T @ r)'], {}), '(wmv.T @ r)\n', (6146, 6157), True, 'import numpy as np\n'), ((6186, 6213), 'numpy.squeeze', 'np.squeeze', (['(wmv.T @ net_out)'], {}), '(wmv.T @ net_out)\n', (6196, 6213), True, 'import numpy as np\n'), ((6336, 6360), 'numpy.squeeze', 'np.squeeze', (['(w_capm.T @ r)'], {}), '(w_capm.T @ r)\n', (6346, 6360), True, 'import numpy as np\n'), ((6397, 6427), 'numpy.squeeze', 'np.squeeze', (['(w_capm.T @ net_out)'], {}), '(w_capm.T @ net_out)\n', (6407, 6427), True, 'import numpy as np\n'), ((7347, 7357), 'numpy.sqrt', 'np.sqrt', (['(4)'], {}), '(4)\n', (7354, 7357), True, 'import numpy as np\n')]
|