code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
"""
Exercise setting and getting individual vector elements
"""
def test():
# package access
import gsl
# make a vector
v = gsl.vector(shape=100)
# fill with a test pattern
for i in range(len(v)): v[i] = i
# verify it happened
assert v[50] == 50
# access through reflection
v[-99] == v[1]
# all done
return v
# main
if __name__ == "__main__":
test()
# end of file
|
[
"gsl.vector"
] |
[((262, 283), 'gsl.vector', 'gsl.vector', ([], {'shape': '(100)'}), '(shape=100)\n', (272, 283), False, 'import gsl\n')]
|
from functools import wraps
import numpy as np
import torch
from mani_skill_learn.utils.data import split_in_dict_array, concat_list_of_array
def disable_gradients(network):
for param in network.parameters():
param.requires_grad = False
def worker_init_fn(worker_id):
"""The function is designed for pytorch multi-process dataloader.
Note that we use the pytorch random generator to generate a base_seed. Please try to be consistent.
References:
https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed
"""
base_seed = torch.IntTensor(1).random_().item()
# print(worker_id, base_seed)
np.random.seed(base_seed + worker_id)
def no_grad(f):
wraps(f)
def wrapper(*args, **kwargs):
with torch.no_grad():
return f(*args, **kwargs)
return wrapper
def run_with_mini_batch(function, data, batch_size):
"""
Run a pytorch function with mini-batch when the batch size of dat is very large.
:param function: the function
:param data: the input data which should be in dict array structure
:param batch_size: the batch_size of the mini-batch
:return: all the outputs.
"""
data_list = split_in_dict_array(data, batch_size, axis=0)
ans = []
for data_i in data_list:
ans_i = function(data_i)
ans.append(ans_i)
return concat_list_of_array(ans, axis=0)
|
[
"numpy.random.seed",
"torch.IntTensor",
"functools.wraps",
"torch.no_grad",
"mani_skill_learn.utils.data.concat_list_of_array",
"mani_skill_learn.utils.data.split_in_dict_array"
] |
[((658, 695), 'numpy.random.seed', 'np.random.seed', (['(base_seed + worker_id)'], {}), '(base_seed + worker_id)\n', (672, 695), True, 'import numpy as np\n'), ((718, 726), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (723, 726), False, 'from functools import wraps\n'), ((1214, 1259), 'mani_skill_learn.utils.data.split_in_dict_array', 'split_in_dict_array', (['data', 'batch_size'], {'axis': '(0)'}), '(data, batch_size, axis=0)\n', (1233, 1259), False, 'from mani_skill_learn.utils.data import split_in_dict_array, concat_list_of_array\n'), ((1372, 1405), 'mani_skill_learn.utils.data.concat_list_of_array', 'concat_list_of_array', (['ans'], {'axis': '(0)'}), '(ans, axis=0)\n', (1392, 1405), False, 'from mani_skill_learn.utils.data import split_in_dict_array, concat_list_of_array\n'), ((775, 790), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (788, 790), False, 'import torch\n'), ((584, 602), 'torch.IntTensor', 'torch.IntTensor', (['(1)'], {}), '(1)\n', (599, 602), False, 'import torch\n')]
|
"""
test melange.propagators
"""
from jax import random
from jax import vmap
import jax.numpy as jnp
from melange.propagators import *
from melange.tests.utils import checker_function, get_nondefault_potential_initializer
import tqdm
import numpy as np
from jax.config import config; config.update("jax_enable_x64", True)
def test_1D_ULA_propagator(key = random.PRNGKey(0), num_runs=1000):
"""
take a batch of 1000 particles distributed according to N(0,2), run dynamics with ULA for 1000 steps with dt=0.01 on a potential whose invariant is N(0,2)
and assert that the mean and variance is unchanged within a tolerance
"""
key, genkey = random.split(key)
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
x_ula_starter = random.multivariate_normal(key = genkey, mean = mu, cov = cov, shape=[num_runs])
dt=1e-2
batch_ula_move = vmap(ULA_move, in_axes=(0, None, None, 0, None))
potential_parameter = jnp.array([0.])
for i in tqdm.trange(100):
key, ula_keygen = random.split(key, 2)
ula_keys = random.split(ula_keygen, num_runs)
x_ULA = batch_ula_move(x_ula_starter, potential, dt, ula_keys, potential_parameter)
x_ula_starter = x_ULA
ula_mean, ula_std = x_ula_starter.mean(), x_ula_starter.std()
assert checker_function(ula_mean,0.2)
assert checker_function(ula_std - jnp.sqrt(2), 0.2)
def test_1D_driven_propagator(key = random.PRNGKey(0), num_runs=1000):
"""
take a batch of 1000 particles distributed according to N(0,2), run dynamics with driven langevin algorithm for 1000 steps with dt=0.01 on a potential whose invariant is N(0,2)
and assert that the mean and variance is unchanged within a tolerance.
"""
key, genkey = random.split(key)
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
x_driven_starter = random.multivariate_normal(key = genkey, mean = mu, cov = cov, shape=[num_runs])
dt=1e-2
#make dummy A and b functions
def A(x, a_param): return jnp.zeros((x.shape[0], x.shape[0]))
def b(x, b_param): return jnp.zeros(x.shape[0])
batch_driver_move = vmap(driven_Langevin_move, in_axes=(0,None,None,None,None,None,None,None,0))
potential_parameter = jnp.array([0.])
for i in tqdm.trange(100):
key, drive_keygen = random.split(key, 2)
drive_keys = random.split(drive_keygen, num_runs)
x_drive = batch_driver_move(x_driven_starter,
potential,
dt,
A,
b,
potential_parameter,
jnp.array([0.]),
jnp.array([0.]),
drive_keys)
x_driven_starter = x_drive
driven_mean, driven_std = x_driven_starter.mean(), x_driven_starter.std()
assert checker_function(driven_mean,0.2)
assert checker_function(driven_std - jnp.sqrt(2), 0.2)
def test_1d_kernel_consistency(key = random.PRNGKey(0)):
"""
with a 'dummy' driven forward kernel, assert that the log forward probability
is equal to that of the ULA propagator in one dimension
"""
from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio
dt=0.1
forward_potential_parameters= jnp.array([0.])
backward_potential_parameters = jnp.array([0.])
#make dummy A and b functions
def A(x, a_param): return jnp.zeros((x.shape[0], x.shape[0]))
def b(x, b_param): return jnp.zeros(x.shape[0])
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
xs = random.multivariate_normal(key = key, mean = jnp.array([1.]), cov = jnp.array([[1.]]), shape=[2])
EM_propagator, EM_kernel = generate_Euler_Maruyama_propagators()
driven_propagator, driven_kernel = generate_driven_Langevin_propagators()
EM_logp_ratio = Euler_Maruyama_log_proposal_ratio(xs[0], xs[1], potential, forward_potential_parameters, dt, potential, backward_potential_parameters, dt)
driven_logp_ratio = driven_Langevin_log_proposal_ratio(xs[0],
xs[1],
potential,
potential,
dt,
dt,
A,
b,
forward_potential_parameters,
backward_potential_parameters,
A_parameter = forward_potential_parameters,
b_parameter = forward_potential_parameters)
assert np.isclose(EM_logp_ratio, driven_logp_ratio)
def test_forward_ULA_driven_samplers(key = random.PRNGKey(0)):
"""
given a randomization key, execute `forward_ULA_sampler` and `forward_driven_diffusion_sampler`
with a time-independent potential that has the same mean and variance as the distribution of (5000) initial
samples. We only assert that the statistics of the post-propagated samples obey the same statistics (within a tolerance).
"""
from melange.propagators import forward_ULA_sampler, forward_driven_diffusion_sampler
dt=0.1
potential_parameters= jnp.zeros((100,1))
A_parameters = potential_parameters
b_parameters = potential_parameters
#make dummy A and b functions
def A(x, a_param): return jnp.zeros((x.shape[0], x.shape[0]))
def b(x, b_param): return jnp.zeros(x.shape[0])
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
xs = random.multivariate_normal(key = key, mean = mu, cov = cov, shape=[5000])
og_mean, og_variance = xs.mean(), xs.var()
#print(og_mean, og_variance)
ULA_trajs = forward_ULA_sampler(xs, potential, jnp.array([dt]*len(potential_parameters)), key, potential_parameters)
#print(ULA_trajs[-1].mean(), ULA_trajs[-1].var())
driven_trajs = forward_driven_diffusion_sampler(xs, potential, dt, key, A, b, potential_parameters, A_parameters, b_parameters)
#print(driven_trajs[-1].mean(), driven_trajs[-1].var())
mean_tolerance = 0.2
assert checker_function(ULA_trajs[-1].mean(), mean_tolerance)
assert checker_function(driven_trajs[-1].mean(), mean_tolerance)
variance_tolerance = 0.2
assert checker_function(ULA_trajs[-1].var() - 2., variance_tolerance)
assert checker_function(driven_trajs[-1].var()-2., variance_tolerance)
|
[
"jax.config.config.update",
"jax.numpy.array",
"melange.propagators.generate_Euler_Maruyama_propagators",
"jax.vmap",
"tqdm.trange",
"melange.propagators.driven_Langevin_log_proposal_ratio",
"jax.random.PRNGKey",
"jax.random.multivariate_normal",
"melange.propagators.generate_driven_Langevin_propagators",
"melange.tests.utils.get_nondefault_potential_initializer",
"numpy.isclose",
"melange.propagators.forward_driven_diffusion_sampler",
"jax.numpy.zeros",
"melange.propagators.Euler_Maruyama_log_proposal_ratio",
"melange.tests.utils.checker_function",
"jax.numpy.sqrt",
"jax.random.split"
] |
[((284, 321), 'jax.config.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (297, 321), False, 'from jax.config import config\n'), ((356, 373), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (370, 373), False, 'from jax import random\n'), ((658, 675), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (670, 675), False, 'from jax import random\n'), ((707, 746), 'melange.tests.utils.get_nondefault_potential_initializer', 'get_nondefault_potential_initializer', (['(1)'], {}), '(1)\n', (743, 746), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((767, 841), 'jax.random.multivariate_normal', 'random.multivariate_normal', ([], {'key': 'genkey', 'mean': 'mu', 'cov': 'cov', 'shape': '[num_runs]'}), '(key=genkey, mean=mu, cov=cov, shape=[num_runs])\n', (793, 841), False, 'from jax import random\n'), ((881, 929), 'jax.vmap', 'vmap', (['ULA_move'], {'in_axes': '(0, None, None, 0, None)'}), '(ULA_move, in_axes=(0, None, None, 0, None))\n', (885, 929), False, 'from jax import vmap\n'), ((956, 972), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (965, 972), True, 'import jax.numpy as jnp\n'), ((986, 1002), 'tqdm.trange', 'tqdm.trange', (['(100)'], {}), '(100)\n', (997, 1002), False, 'import tqdm\n'), ((1306, 1337), 'melange.tests.utils.checker_function', 'checker_function', (['ula_mean', '(0.2)'], {}), '(ula_mean, 0.2)\n', (1322, 1337), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((1430, 1447), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (1444, 1447), False, 'from jax import random\n'), ((1755, 1772), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (1767, 1772), False, 'from jax import random\n'), ((1804, 1843), 'melange.tests.utils.get_nondefault_potential_initializer', 'get_nondefault_potential_initializer', (['(1)'], {}), '(1)\n', (1840, 1843), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((1867, 1941), 'jax.random.multivariate_normal', 'random.multivariate_normal', ([], {'key': 'genkey', 'mean': 'mu', 'cov': 'cov', 'shape': '[num_runs]'}), '(key=genkey, mean=mu, cov=cov, shape=[num_runs])\n', (1893, 1941), False, 'from jax import random\n'), ((2136, 2224), 'jax.vmap', 'vmap', (['driven_Langevin_move'], {'in_axes': '(0, None, None, None, None, None, None, None, 0)'}), '(driven_Langevin_move, in_axes=(0, None, None, None, None, None, None,\n None, 0))\n', (2140, 2224), False, 'from jax import vmap\n'), ((2239, 2255), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (2248, 2255), True, 'import jax.numpy as jnp\n'), ((2269, 2285), 'tqdm.trange', 'tqdm.trange', (['(100)'], {}), '(100)\n', (2280, 2285), False, 'import tqdm\n'), ((2934, 2968), 'melange.tests.utils.checker_function', 'checker_function', (['driven_mean', '(0.2)'], {}), '(driven_mean, 0.2)\n', (2950, 2968), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((3065, 3082), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (3079, 3082), False, 'from jax import random\n'), ((3470, 3486), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (3479, 3486), True, 'import jax.numpy as jnp\n'), ((3522, 3538), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (3531, 3538), True, 'import jax.numpy as jnp\n'), ((3723, 3762), 'melange.tests.utils.get_nondefault_potential_initializer', 'get_nondefault_potential_initializer', (['(1)'], {}), '(1)\n', (3759, 3762), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((3902, 3939), 'melange.propagators.generate_Euler_Maruyama_propagators', 'generate_Euler_Maruyama_propagators', ([], {}), '()\n', (3937, 3939), False, 'from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio\n'), ((3979, 4017), 'melange.propagators.generate_driven_Langevin_propagators', 'generate_driven_Langevin_propagators', ([], {}), '()\n', (4015, 4017), False, 'from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio\n'), ((4039, 4185), 'melange.propagators.Euler_Maruyama_log_proposal_ratio', 'Euler_Maruyama_log_proposal_ratio', (['xs[0]', 'xs[1]', 'potential', 'forward_potential_parameters', 'dt', 'potential', 'backward_potential_parameters', 'dt'], {}), '(xs[0], xs[1], potential,\n forward_potential_parameters, dt, potential,\n backward_potential_parameters, dt)\n', (4072, 4185), False, 'from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio\n'), ((4202, 4444), 'melange.propagators.driven_Langevin_log_proposal_ratio', 'driven_Langevin_log_proposal_ratio', (['xs[0]', 'xs[1]', 'potential', 'potential', 'dt', 'dt', 'A', 'b', 'forward_potential_parameters', 'backward_potential_parameters'], {'A_parameter': 'forward_potential_parameters', 'b_parameter': 'forward_potential_parameters'}), '(xs[0], xs[1], potential, potential, dt,\n dt, A, b, forward_potential_parameters, backward_potential_parameters,\n A_parameter=forward_potential_parameters, b_parameter=\n forward_potential_parameters)\n', (4236, 4444), False, 'from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio\n'), ((4865, 4909), 'numpy.isclose', 'np.isclose', (['EM_logp_ratio', 'driven_logp_ratio'], {}), '(EM_logp_ratio, driven_logp_ratio)\n', (4875, 4909), True, 'import numpy as np\n'), ((4954, 4971), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (4968, 4971), False, 'from jax import random\n'), ((5440, 5459), 'jax.numpy.zeros', 'jnp.zeros', (['(100, 1)'], {}), '((100, 1))\n', (5449, 5459), True, 'import jax.numpy as jnp\n'), ((5712, 5751), 'melange.tests.utils.get_nondefault_potential_initializer', 'get_nondefault_potential_initializer', (['(1)'], {}), '(1)\n', (5748, 5751), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((5759, 5826), 'jax.random.multivariate_normal', 'random.multivariate_normal', ([], {'key': 'key', 'mean': 'mu', 'cov': 'cov', 'shape': '[5000]'}), '(key=key, mean=mu, cov=cov, shape=[5000])\n', (5785, 5826), False, 'from jax import random\n'), ((6099, 6215), 'melange.propagators.forward_driven_diffusion_sampler', 'forward_driven_diffusion_sampler', (['xs', 'potential', 'dt', 'key', 'A', 'b', 'potential_parameters', 'A_parameters', 'b_parameters'], {}), '(xs, potential, dt, key, A, b,\n potential_parameters, A_parameters, b_parameters)\n', (6131, 6215), False, 'from melange.propagators import forward_ULA_sampler, forward_driven_diffusion_sampler\n'), ((1030, 1050), 'jax.random.split', 'random.split', (['key', '(2)'], {}), '(key, 2)\n', (1042, 1050), False, 'from jax import random\n'), ((1070, 1104), 'jax.random.split', 'random.split', (['ula_keygen', 'num_runs'], {}), '(ula_keygen, num_runs)\n', (1082, 1104), False, 'from jax import random\n'), ((2024, 2059), 'jax.numpy.zeros', 'jnp.zeros', (['(x.shape[0], x.shape[0])'], {}), '((x.shape[0], x.shape[0]))\n', (2033, 2059), True, 'import jax.numpy as jnp\n'), ((2090, 2111), 'jax.numpy.zeros', 'jnp.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (2099, 2111), True, 'import jax.numpy as jnp\n'), ((2315, 2335), 'jax.random.split', 'random.split', (['key', '(2)'], {}), '(key, 2)\n', (2327, 2335), False, 'from jax import random\n'), ((2357, 2393), 'jax.random.split', 'random.split', (['drive_keygen', 'num_runs'], {}), '(drive_keygen, num_runs)\n', (2369, 2393), False, 'from jax import random\n'), ((3603, 3638), 'jax.numpy.zeros', 'jnp.zeros', (['(x.shape[0], x.shape[0])'], {}), '((x.shape[0], x.shape[0]))\n', (3612, 3638), True, 'import jax.numpy as jnp\n'), ((3669, 3690), 'jax.numpy.zeros', 'jnp.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (3678, 3690), True, 'import jax.numpy as jnp\n'), ((5596, 5631), 'jax.numpy.zeros', 'jnp.zeros', (['(x.shape[0], x.shape[0])'], {}), '((x.shape[0], x.shape[0]))\n', (5605, 5631), True, 'import jax.numpy as jnp\n'), ((5660, 5681), 'jax.numpy.zeros', 'jnp.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (5669, 5681), True, 'import jax.numpy as jnp\n'), ((1375, 1386), 'jax.numpy.sqrt', 'jnp.sqrt', (['(2)'], {}), '(2)\n', (1383, 1386), True, 'import jax.numpy as jnp\n'), ((2694, 2710), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (2703, 2710), True, 'import jax.numpy as jnp\n'), ((2745, 2761), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (2754, 2761), True, 'import jax.numpy as jnp\n'), ((3009, 3020), 'jax.numpy.sqrt', 'jnp.sqrt', (['(2)'], {}), '(2)\n', (3017, 3020), True, 'import jax.numpy as jnp\n'), ((3817, 3833), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (3826, 3833), True, 'import jax.numpy as jnp\n'), ((3840, 3858), 'jax.numpy.array', 'jnp.array', (['[[1.0]]'], {}), '([[1.0]])\n', (3849, 3858), True, 'import jax.numpy as jnp\n')]
|
# pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring,line-too-long
from unittest import mock
import pytest
from eze.plugins.tools.node_npmaudit import NpmAuditTool
from eze.utils.io import create_tempfile_path
from tests.plugins.tools.tool_helper import ToolMetaTestBase
class TestNpmAuditTool(ToolMetaTestBase):
ToolMetaClass = NpmAuditTool
SNAPSHOT_PREFIX = "node-npmaudit"
def test_creation__no_config(self):
# Given
input_config = {}
expected_config = {
"REPORT_FILE": create_tempfile_path("tmp-npmaudit-report.json"),
"SOURCE": None,
"ONLY_PROD": True,
#
"ADDITIONAL_ARGUMENTS": "",
"IGNORED_FILES": None,
"EXCLUDE": [],
"IGNORED_VULNERABILITIES": None,
"IGNORE_BELOW_SEVERITY": None,
"DEFAULT_SEVERITY": None,
}
# When
testee = NpmAuditTool(input_config)
# Then
assert testee.config == expected_config
def test_creation__with_config(self):
# Given
input_config = {
"SOURCE": "src",
}
expected_config = {
"REPORT_FILE": create_tempfile_path("tmp-npmaudit-report.json"),
"SOURCE": "src",
"ONLY_PROD": True,
#
"ADDITIONAL_ARGUMENTS": "",
"IGNORED_FILES": None,
"EXCLUDE": [],
"IGNORED_VULNERABILITIES": None,
"IGNORE_BELOW_SEVERITY": None,
"DEFAULT_SEVERITY": None,
}
# When
testee = NpmAuditTool(input_config)
# Then
assert testee.config == expected_config
@mock.patch("eze.plugins.tools.node_npmaudit.extract_cmd_version", mock.MagicMock(return_value="6.14.11"))
def test_check_installed__success(self):
# When
expected_output = "6.14.11"
output = NpmAuditTool.check_installed()
# Then
assert output == expected_output
@mock.patch("eze.plugins.tools.node_npmaudit.extract_cmd_version", mock.MagicMock(return_value="5.12.11"))
def test_check_installed__failure_version_low(self):
# When
expected_output = ""
output = NpmAuditTool.check_installed()
# Then
assert output == expected_output
@mock.patch("eze.plugins.tools.node_npmaudit.extract_cmd_version", mock.MagicMock(return_value=False))
def test_check_installed__failure_unavailable(self):
# When
expected_output = False
output = NpmAuditTool.check_installed()
# Then
assert output == expected_output
def test_parse_report__npm6_snapshot(self, snapshot):
# Test container fixture and snapshot
self.assert_parse_report_snapshot_test(
snapshot,
{},
"__fixtures__/plugins_tools/raw-node-npmaudit-v6-report.json",
"plugins_tools/node-npmaudit-result-v6-output.json",
)
def test_parse_report__npm7_snapshot(self, snapshot):
# Test container fixture and snapshot
self.assert_parse_report_snapshot_test(
snapshot,
{},
"__fixtures__/plugins_tools/raw-node-npmaudit-v7-report.json",
"plugins_tools/node-npmaudit-result-v7-output.json",
)
# new v7 tests
def test_create_recommendation_v7__major_fix(self):
# Given
expected_output = """fix available via `npm audit fix --force`
Will install mocha@8.4.0, which is a breaking change"""
input_vulnerability = {"fixAvailable": {"name": "mocha", "version": "8.4.0", "isSemVerMajor": True}}
testee = NpmAuditTool()
# When
output = testee.create_recommendation_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_recommendation_v7__minor_fix(self):
# Given
expected_output = """fix available via `npm audit fix --force`
Will install mocha@8.4.0"""
input_vulnerability = {"fixAvailable": {"name": "mocha", "version": "8.4.0", "isSemVerMajor": False}}
testee = NpmAuditTool()
# When
output = testee.create_recommendation_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_recommendation_v7__no_details(self):
# Given
expected_output = """fix available via `npm audit fix --force`"""
input_vulnerability = {"fixAvailable": True}
testee = NpmAuditTool()
# When
output = testee.create_recommendation_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_recommendation_v7__no_fix_available(self):
# Given
expected_output = "no fix available"
input_vulnerability = {"fixAvailable": False}
testee = NpmAuditTool()
# When
output = testee.create_recommendation_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_path_v7__nested_vul(self):
# Given
expected_output = """helmet>connect(2.11.1 - 3.6.4): has insecure dependency finalhandler>debug"""
input_vulnerability = {
"name": "connect",
"severity": "low",
"via": ["debug", "finalhandler"],
"effects": ["helmet"],
"range": "2.11.1 - 3.6.4",
"nodes": ["node_modules/connect"],
"fixAvailable": True,
}
testee = NpmAuditTool()
# When
output = testee.create_path_v7(input_vulnerability)
# Then
assert output == expected_output
def test_create_path_v7__edge_vul(self):
# Given
expected_output = (
"""connect>finalhandler>mocha>debug(<= 2.6.8 || >= 3.0.0 <= 3.0.1): Regular Expression Denial of Service"""
)
input_vulnerability = {
"name": "debug",
"severity": "low",
"via": [
{
"source": 534,
"name": "debug",
"dependency": "debug",
"title": "Regular Expression Denial of Service",
"url": "https://npmjs.com/advisories/534",
"severity": "low",
"range": "<= 2.6.8 || >= 3.0.0 <= 3.0.1",
}
],
"effects": ["connect", "finalhandler", "mocha"],
"range": "<=2.6.8 || 3.0.0 - 3.0.1",
"nodes": ["node_modules/debug"],
"fixAvailable": {"name": "mocha", "version": "8.4.0", "isSemVerMajor": True},
}
testee = NpmAuditTool()
# When
output = testee.create_path_v7(input_vulnerability)
# Then
assert output == expected_output
@mock.patch("eze.utils.cli.async_subprocess_run")
@mock.patch("eze.utils.cli.is_windows_os", mock.MagicMock(return_value=True))
@mock.patch("eze.utils.language.node.install_node_dependencies", mock.MagicMock(return_value=True))
@pytest.mark.asyncio
async def test_run_scan__cli_command__std(self, mock_async_subprocess_run):
# Given
input_config = {"REPORT_FILE": "foo_report.json"}
expected_cmd = "npm audit --json --only=prod"
# Test run calls correct program
await self.assert_run_scan_command(input_config, expected_cmd, mock_async_subprocess_run)
@mock.patch("eze.utils.cli.async_subprocess_run")
@mock.patch("eze.utils.cli.is_windows_os", mock.MagicMock(return_value=True))
@mock.patch("eze.utils.language.node.install_node_dependencies", mock.MagicMock(return_value=True))
@pytest.mark.asyncio
async def test_run_scan__cli_command__non_prod(self, mock_async_subprocess_run):
# Given
input_config = {"REPORT_FILE": "foo_report.json", "ONLY_PROD": False}
expected_cmd = "npm audit --json"
# Test run calls correct program
await self.assert_run_scan_command(input_config, expected_cmd, mock_async_subprocess_run)
|
[
"unittest.mock.MagicMock",
"eze.plugins.tools.node_npmaudit.NpmAuditTool",
"unittest.mock.patch",
"eze.utils.io.create_tempfile_path",
"eze.plugins.tools.node_npmaudit.NpmAuditTool.check_installed"
] |
[((6817, 6865), 'unittest.mock.patch', 'mock.patch', (['"""eze.utils.cli.async_subprocess_run"""'], {}), "('eze.utils.cli.async_subprocess_run')\n", (6827, 6865), False, 'from unittest import mock\n'), ((7432, 7480), 'unittest.mock.patch', 'mock.patch', (['"""eze.utils.cli.async_subprocess_run"""'], {}), "('eze.utils.cli.async_subprocess_run')\n", (7442, 7480), False, 'from unittest import mock\n'), ((960, 986), 'eze.plugins.tools.node_npmaudit.NpmAuditTool', 'NpmAuditTool', (['input_config'], {}), '(input_config)\n', (972, 986), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((1622, 1648), 'eze.plugins.tools.node_npmaudit.NpmAuditTool', 'NpmAuditTool', (['input_config'], {}), '(input_config)\n', (1634, 1648), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((1937, 1967), 'eze.plugins.tools.node_npmaudit.NpmAuditTool.check_installed', 'NpmAuditTool.check_installed', ([], {}), '()\n', (1965, 1967), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((1784, 1822), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '"""6.14.11"""'}), "(return_value='6.14.11')\n", (1798, 1822), False, 'from unittest import mock\n'), ((2254, 2284), 'eze.plugins.tools.node_npmaudit.NpmAuditTool.check_installed', 'NpmAuditTool.check_installed', ([], {}), '()\n', (2282, 2284), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((2096, 2134), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '"""5.12.11"""'}), "(return_value='5.12.11')\n", (2110, 2134), False, 'from unittest import mock\n'), ((2570, 2600), 'eze.plugins.tools.node_npmaudit.NpmAuditTool.check_installed', 'NpmAuditTool.check_installed', ([], {}), '()\n', (2598, 2600), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((2413, 2447), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (2427, 2447), False, 'from unittest import mock\n'), ((3685, 3699), 'eze.plugins.tools.node_npmaudit.NpmAuditTool', 'NpmAuditTool', ([], {}), '()\n', (3697, 3699), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((4140, 4154), 'eze.plugins.tools.node_npmaudit.NpmAuditTool', 'NpmAuditTool', ([], {}), '()\n', (4152, 4154), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((4514, 4528), 'eze.plugins.tools.node_npmaudit.NpmAuditTool', 'NpmAuditTool', ([], {}), '()\n', (4526, 4528), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((4866, 4880), 'eze.plugins.tools.node_npmaudit.NpmAuditTool', 'NpmAuditTool', ([], {}), '()\n', (4878, 4880), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((5515, 5529), 'eze.plugins.tools.node_npmaudit.NpmAuditTool', 'NpmAuditTool', ([], {}), '()\n', (5527, 5529), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((6665, 6679), 'eze.plugins.tools.node_npmaudit.NpmAuditTool', 'NpmAuditTool', ([], {}), '()\n', (6677, 6679), False, 'from eze.plugins.tools.node_npmaudit import NpmAuditTool\n'), ((6913, 6946), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (6927, 6946), False, 'from unittest import mock\n'), ((7017, 7050), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7031, 7050), False, 'from unittest import mock\n'), ((7528, 7561), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7542, 7561), False, 'from unittest import mock\n'), ((7632, 7665), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7646, 7665), False, 'from unittest import mock\n'), ((567, 615), 'eze.utils.io.create_tempfile_path', 'create_tempfile_path', (['"""tmp-npmaudit-report.json"""'], {}), "('tmp-npmaudit-report.json')\n", (587, 615), False, 'from eze.utils.io import create_tempfile_path\n'), ((1228, 1276), 'eze.utils.io.create_tempfile_path', 'create_tempfile_path', (['"""tmp-npmaudit-report.json"""'], {}), "('tmp-npmaudit-report.json')\n", (1248, 1276), False, 'from eze.utils.io import create_tempfile_path\n')]
|
# Generated by Django 3.2.12 on 2022-03-19 17:25
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('dog_shelters', '0011_auto_20220319_1724'),
]
operations = [
migrations.RemoveField(
model_name='dog',
name='id',
),
migrations.AlterField(
model_name='dog',
name='shelter',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, primary_key=django.db.models.fields.BigAutoField, serialize=False, to='dog_shelters.shelter'),
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.ForeignKey"
] |
[((305, 356), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""dog"""', 'name': '"""id"""'}), "(model_name='dog', name='id')\n", (327, 356), False, 'from django.db import migrations, models\n'), ((500, 666), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'primary_key': 'django.db.models.fields.BigAutoField', 'serialize': '(False)', 'to': '"""dog_shelters.shelter"""'}), "(on_delete=django.db.models.deletion.PROTECT, primary_key=\n django.db.models.fields.BigAutoField, serialize=False, to=\n 'dog_shelters.shelter')\n", (517, 666), False, 'from django.db import migrations, models\n')]
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ----------------------------------------------------------------------------------
import argparse
import os
import subprocess
import sys
from subprocess import Popen, PIPE
from utils import db_utils as db
# ------------------------------------- GLOBALS ------------------------------------
# this group only allows 10 rfsearch jobs to run concurrently
# this means 10*100 = 1000 jobs running concurrently which is the lsf limit
LSF_GROUP = "/family_srch"
MEMORY = 2000
CPU = 8
MAX_JOB_COUNT = 1000
family_exceptions = {'RF02924': '', 'RF03064': '', 'RF02913': '',
'RF02543': '', 'RF00017': '', 'RF02540': ''}
# ----------------------------------------------------------------------------------
def checkout_family(rfam_acc):
"""
Checks out a family from Rfam based on a valid Rfam accession.
rfam_acc: A valid Rfam accession
return: None
"""
cmd = "rfco.pl %s" % rfam_acc
subprocess.call(cmd, shell=True)
# add some checks here
# ----------------------------------------------------------------------------------
def submit_new_rfsearch_job(family_dir, rfmake=False):
"""
Submits a new lsf job that runs rfsearch to update SCORES for a new release.
If no threshold is set with rfsearch.pl, it uses existing thresholds by default.
family_dir: The physical location of the family directory
rfmake: If True, run rfmake after rfsearch completes. Default False
return: None
"""
# use the pre-process command to change directory to family_dir
rfam_acc = os.path.basename(family_dir)
lsf_err_file = os.path.join(family_dir, "auto_rfsearch.err")
lsf_out_file = os.path.join(family_dir, "auto_rfsearch.out")
cmd = ("bsub -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -q production-rh7 "
"-J %s \"cd %s && rfsearch.pl -cnompi -q production-rh7 -relax\"")
# If rfmake is set to True, runs rfmake following rfsearch, otherwise run rfsearch
# only by default
if rfmake is True:
cmd = ("bsub -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -q production-rh7 "
"-J %s \"cd %s && rfsearch.pl -cnompi -q production-rh7 -relax && rfmake.pl\"")
subprocess.call(cmd % (MEMORY, MEMORY, lsf_out_file, lsf_err_file,
CPU, LSF_GROUP, rfam_acc, family_dir), shell=True)
# ----------------------------------------------------------------------------------
def submit_new_rfmake_job(family_dir):
"""
Submits a new lsf job that runs rfsearch to update SCORES for a new release.
If no threshold is set with rfsearch.pl, it uses existing thresholds by default.
family_dir: The physical location of the family directory
rfmake: If True, run rfmake after rfsearch completes. Default False
return: None
"""
# use the pre-process command to change directory to family_dir
rfam_acc = os.path.basename(family_dir)
lsf_err_file = os.path.join(family_dir, "auto_rfmake.err")
lsf_out_file = os.path.join(family_dir, "auto_rfmake.out")
cmd = ("bsub -M %s -R \"rusage[mem=%s]\" -o %s -e %s -n %s -g %s -q production-rh7 "
"-J %s \"cd %s && rfmake.pl\"")
subprocess.call(cmd % (MEMORY, MEMORY, lsf_out_file, lsf_err_file,
CPU, LSF_GROUP, rfam_acc, family_dir), shell=True)
# ----------------------------------------------------------------------------------
def load_rfam_accessions_from_file(accession_list):
"""
This function parses a .txt file containing Rfam accessions and returns those
accession_list: This is a .txt file containing a list of Rfam accessions
return: list of Rfam family accessions
"""
fp = open(accession_list, 'r')
accessions = [x.strip() for x in fp]
fp.close()
return accessions
# ----------------------------------------------------------------------------------
def checkout_and_search_family(rfam_acc, dest_dir, rfmake=False):
"""
This function combines family checkout (rfco.pl) and re-scoring of hits
using rfsearch.pl. If the family directory already exists, then the
checkout step will be ignored
rfam_acc: A valid Rfam family accession (RFXXXXX)
dest_dir: A valid destination directory, where to checkout the family
rfmake: If True, run rfmake after rfsearch completes. Default False
return: void
"""
# get family directory
family_dir = os.path.join(dest_dir, rfam_acc)
# checkout family if not done already
if not os.path.exists(family_dir):
os.chdir(dest_dir)
checkout_family(rfam_acc)
submit_new_rfsearch_job(family_dir, rfmake)
# ----------------------------------------------------------------------------------
def parse_arguments():
"""
Uses python's argparse to parse the command line arguments
return: Argparse parser object
"""
# create a new argument parser object
parser = argparse.ArgumentParser(description='Update scores for new release')
# group required arguments together
req_args = parser.add_argument_group("required arguments")
req_args.add_argument('--dest-dir', help='destination directory where to checkout families',
type=str, required=True)
mutually_exclusive_args = parser.add_mutually_exclusive_group()
mutually_exclusive_args.add_argument('-f', help='a file containing a list of Rfam family accessions', type=str)
mutually_exclusive_args.add_argument('--all', help='runs rfsearch on all families', action="store_true")
mutually_exclusive_args.add_argument('--acc', help="a valid rfam family accession RFXXXXX",
type=str, default=None)
parser.add_argument('--rfmake', help='run rfmake after rfsearch completion', action="store_true")
parser.add_argument('-v', help='runs validation checks', action="store_true")
parser.add_argument('--report', help='generates search reports', action="store_true")
# this is mutually exclusive with --acc option
parser.add_argument('--exclude-type', help='type(s) of ncRNAs to exclude', type=str, default=None)
return parser
# ----------------------------------------------------------------------------------
def is_valid_family(dest_dir, rfam_acc):
"""
Checks if the job ran successfully by checking if .err file is empty and
that Success keyword exists in .out file. As an additional sanity check, we
look for the rfsearch.log file as an indication that rfsearch actually ran.
return: True if the family is valid, False otherwise
"""
family_dir = os.path.join(dest_dir, rfam_acc)
# If log file does not exist rfsearch did not run for some reason
if not os.path.exists(os.path.join(family_dir, "rfsearch.log")):
return False
# check if lsf .err file is empty
if not os.path.getsize(os.path.join(family_dir, "auto_rfsearch.err")) == 0:
return check_rfsearch_log_success(family_dir)
# check if success in .out file
lsf_out_file = os.path.join(family_dir, "auto_rfsearch.out")
process = Popen(['grep', 'Success', lsf_out_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
if output.find("Successfully completed.") == -1:
return False
return True
# ----------------------------------------------------------------------------------
def get_missing_seeds_seedoutlist(num_seed_db, seedoutlist):
"""
Parses the seedoutlist file and compares the number of seed sequences
obtained from the database and the number of seed hits in the outlist file
num_seed_db: The number of seed sequences found in the database
seedoutlist: The SEED specific outlist file
return (boolean): True if the number is consistent, False otherwise.
"""
unique_seeds = extract_unique_seeds_from_seedoutlist(seedoutlist)
seed_count = len(unique_seeds.keys())
return num_seed_db - seed_count
# ----------------------------------------------------------------------------------
def check_rfsearch_log_success(family_dir):
"""
Checks if the rfsearch.log file contains the success string # [ok] in
order to mark the family as successfully completed.
"""
rfsearch_log_file = os.path.join(family_dir, "rfsearch.log")
process = Popen(['tail', '-1', rfsearch_log_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
if output.find("# [ok]") == -1:
return False
return True
# ----------------------------------------------------------------------------------
def count_hits(scores_file):
"""
Function to count SEED and FULL hits in outlist and species files at three
different thresholds (above ga, below ga, below rev)
scores_file: This is either the species or the outlist files from the family
directories
return: A dictionary with SEED and FULL counts at different thresholds
"""
# check point flags
flag_curr = 0
flag_rev = 0
# initialization of counts
counts = {"seed_above_ga": 0,
"full_above_ga": 0,
"full_below_ga": 0,
"seed_below_ga": 0,
"seed_below_rev": 0,
"full_below_rev": 0}
# load file for easy parsing
fp = open(scores_file, 'r')
# generate stats
for line in fp:
# make flag_curr = 1 when we reach that line
if line.find("CURRENT THRESHOLD") != -1:
flag_curr = 1
continue
# when we reach the reversed sequence line set the flag to 1
if line.find("BEST REVERSED") != -1:
flag_rev = 1
continue
# we are above the
if flag_curr == 0 and flag_rev == 0:
if line.find("SEED") != -1:
counts["seed_above_ga"] += 1
elif line.find("FULL") != -1:
counts["full_above_ga"] += 1
# we are somewhere in between current threshold and reversed cutoff
elif flag_curr == 1 and flag_rev == 0:
if line.find("SEED") != -1:
counts["seed_below_ga"] += 1
elif line.find("FULL") != -1:
counts["full_below_ga"] += 1
elif flag_curr == 1 and flag_rev == 1:
if line.find("SEED") != -1:
counts["seed_below_rev"] += 1
elif line.find("FULL") != -1:
counts["full_below_rev"] += 1
fp.close()
return counts
# ----------------------------------------------------------------------------------
def extract_unique_seeds_from_seedoutlist(seedoutlist):
"""
Extracts all unique SEED accessions in the form of rfamseq_acc/start-end.
Ignores duplicated hits.
"""
seeds_found = {}
fp = open(seedoutlist, 'r')
for line in fp:
if line[0] != '#':
line = [x for x in line.strip().split(' ') if x != '']
if line[3] not in seeds_found:
seeds_found[line[3]] = float(line[0])
fp.close()
return seeds_found
# ----------------------------------------------------------------------------------
def generate_search_stats(family_dir, scores_file='species', tag_miRNA=True):
"""
Function to generate useful search stats per family
family_dir: A valid Rfam family checkout directory where pre-computed searches
were ran
scores_file: A string specifying the scores file to parse (outlist, species)
return: report string
"""
rfam_acc = os.path.basename(family_dir)
# check point flags
flag_curr = 0
flag_rev = 0
elements = None
prev_line = None
seen_ga = False
seen_rev_before_ga = False
ga_bit_score = 0.0
rev_bit_score = 0.0
ga_rev_seq_gap = 0 # gap in sequences between GA/REV thresholds
is_miRNA = 0
seed_above_ga = None
last_seed_seen = None
seq_position = 0
last_seed_pos = 0
seed_above_ga_pos = 0
ga_position = 0
rev_position = 0
position = 1 # keeps hold of the index position with respect to the entire outlist
review_family = False
full_check = False
unique_seeds = {}
# initialization of counts
counts = {"seed_above_ga": 0,
"full_above_ga": 0,
"full_below_ga": 0,
"seed_below_ga": 0,
"seed_below_rev": 0,
"full_below_rev": 0,
"other_below_ga": 0}
# fetch miRNA accessions from the database
miRNAs = {}
if tag_miRNA is True:
miRNAs.update(db.fetch_type_specific_rfam_accessions("miRNA", return_type="dict"))
if rfam_acc in miRNAs:
is_miRNA = 1
# get some useful numbers from the database
num_seed_seqs_db = db.get_number_of_seed_sequences(rfam_acc)
num_full_hits_db = db.get_number_of_full_hits(rfam_acc)
unique_ncbi_ids_db = db.get_family_unique_ncbi_ids(rfam_acc)
seedoutlist = os.path.join(family_dir, "seedoutlist")
missing_seed_seqs_so = get_missing_seeds_seedoutlist(num_seed_seqs_db, seedoutlist)
scores_fp = open(os.path.join(family_dir, scores_file), 'r')
# this will basically read the first line which is a header so no harm
line = scores_fp.readline()
prev_line = line
ncbi_ids_from_hits = set()
# generate stats
for line in scores_fp:
position += 1 # starts from 1 because we read the 1st line out of the loop
# make flag_curr = 1 when we reach that line
if line.find("CURRENT GA THRESHOLD") != -1:
flag_curr = 1
# if we reached this point, it means we saw GA
seen_ga = True
ga_position = position
# get all the elements of the last score line above the GA threshold
seed_above_ga = last_seed_seen
seed_above_ga_pos = last_seed_pos
# get GA threshold
elements = line.split(' ')
ga_bit_score = float(elements[-3])
continue
# when we reach the reversed sequence line set the flag to 1
if line.find("BEST REVERSED") != -1:
flag_rev = 1
rev_position = position
# check if GA is false at this point. If yes, this means we saw REV first.
# setting flag to True
if seen_ga is False:
seen_rev_before_ga = True
continue
if line[0] != '#':
# increase sequence position
# seq_position += 1
elements = [x for x in line.strip().split(' ') if x != '']
# first line after hitting REV line
if flag_rev == 1 and rev_bit_score == 0.0:
rev_bit_score = float(elements[0])
# add id to ncbi_ids
ncbi_ids_from_hits.add(elements[5])
# we are above the GA
if flag_curr == 0 and flag_rev == 0:
if elements[2] == "SEED":
# make sure the sequences is not in the dictionary and that it starts from 1
if elements[3] not in unique_seeds:
counts["seed_above_ga"] += 1
unique_seeds[elements[3]] = (elements[8], elements[9])
elif elements[2] == "FULL" or elements[2] == "FULL-SEED":
counts["full_above_ga"] += 1
# we are somewhere in between current threshold and reversed cutoff
elif flag_curr == 1 and flag_rev == 0:
if elements[2] == "SEED":
if elements[3] not in unique_seeds:
counts["seed_below_ga"] += 1
unique_seeds[elements[3]] = (elements[8], elements[9])
elif flag_curr == 1 and flag_rev == 1:
if elements[2] == "SEED":
if elements[3] not in unique_seeds:
counts["seed_below_rev"] += 1
unique_seeds[elements[3]] = (elements[8], elements[9])
# if between GA and REV count sequences
if ((flag_curr == 1 and flag_rev == 0) or (flag_curr == 0 and flag_rev == 1)):
ga_rev_seq_gap += 1
# always stores the last seed up to the current iteration point
# at the end of the file, last_seed_seen will be holding the last SEED sequence
# seen in the outlist file
if elements[2] == "SEED":
last_seed_seen = elements
# sets position
last_seed_pos = position
# current line becomes previous at the end of each iteration
prev_line = line
scores_fp.close()
# computes the number of any missing SEED sequences. That is SEEDs that do not appear in the outlist
missing_seed_seqs_o = abs(
num_seed_seqs_db - (counts["seed_above_ga"] + counts["seed_below_ga"] + counts["seed_below_rev"]))
# compute the total number of ncbi_ids including
# total_ncbi_ids_found = len(list(set(unique_ncbi_ids_db).union(ncbi_ids_from_hits)))
# calulates the number of new ncbi ids added to the full region after a new search
# new_ncbi_ids_found = abs(total_ncbi_ids_found - len(unique_ncbi_ids_db))
# ABS(NFULL_OLD-NFULL_NEW) > 0.1 * NFULL_OLD
# full_diff = abs(num_full_hits_db - (counts["full_above_ga"] + counts["full_below_ga"]))
# compute GA/REV bit score difference
ga_rev_bitscore_diff = abs(ga_bit_score - rev_bit_score)
# if full_diff > (0.1 * num_full_hits_db):
# full_check = True
# constraints to be met for reviewing families
if (seen_rev_before_ga or (counts["seed_below_ga"] > 0) or (counts["seed_below_rev"] > 0)):
review_family = True
fields = [rfam_acc, str(num_seed_seqs_db), str(counts["seed_above_ga"]), str(counts["seed_below_ga"]),
str(counts["seed_below_rev"]), str(missing_seed_seqs_o), str(missing_seed_seqs_so),
str(ga_bit_score), str(rev_bit_score), str(ga_rev_bitscore_diff), str(ga_rev_seq_gap),
str(int(seen_rev_before_ga)), seed_above_ga[0], str(seed_above_ga_pos),
str(ga_position), str(rev_position), last_seed_seen[0], str(last_seed_pos),
str(int(review_family))]
"""
fields = [rfam_acc, str(num_seed_seqs_db), str(counts["seed_above_ga"]), str(counts["seed_below_ga"]),
str(counts["seed_below_rev"]), str(missing_seed_seqs_o), str(missing_seed_seqs_so),
str(num_full_hits_db), str(counts["full_above_ga"]), str(len(unique_ncbi_ids_db)),
str(new_ncbi_ids_found), str(ga_bit_score), str(rev_bit_score), str(ga_rev_bitscore_diff),
str(ga_rev_seq_gap), str(int(seen_rev_before_ga)), seed_above_ga[0], str(seed_above_ga_pos),
str(ga_position), str(rev_position), last_seed_seen[0], str(last_seed_pos), str(int(review_family))]
"""
if tag_miRNA is True:
fields.append(str(is_miRNA))
print ('\t'.join(fields))
# ----------------------------------------------------------------------------------
def write_family_report_file(family_dir, scores_file="species"):
"""
Function to generate a report about the outcome of a new search
family_dir: A valid location of an Rfam family checkout
scores_file: This is a string which specifies the file to parse (outlist | species)
It parses species file by default.
return (int): A number specifying the curation priority for a specific family, where
3: critical, 2: critical but not erroneous, 1: check seed, 0: no attention needed
"""
priority = 0
# fetch number of seed sequences from the database
rfam_acc = os.path.basename(family_dir)
no_seed_seqs = db.get_number_of_seed_sequences(rfam_acc)
scores_file_loc = os.path.join(family_dir, scores_file)
counts = count_hits(scores_file_loc)
report_fp = open(os.path.join(family_dir, "search_report.txt"), 'w')
# sum all seed counts to get total number of seed sequences
counted_seed_seqs = counts["seed_above_ga"] + counts["seed_below_ga"] + counts["seed_below_rev"]
# Critical SEED issues
if counts["seed_below_rev"] != 0:
report_fp.write("CRITICAL: %s SEED sequences below reversed cutoff\n" % str(counts["seed_below_rev"]))
priority = 3
if counts["seed_below_ga"] > counts["seed_above_ga"]:
percentage = float(counts["seed_below_ga"] * 100) / float(no_seed_seqs)
report_fp.write("CRITICAL: More SEED sequences below GA than above. %s\n" % percentage)
if priority < 2:
priority = 2
if counted_seed_seqs != no_seed_seqs:
report_fp.write(
"WARNING: The number of SEED sequences in the database does not match the number in the alignment\n\n")
priority = 3
# TODO - Develop code to check taxonomic distribution
# TODO - Use information from FULL hits too
# some useful information
report_fp.write("Total number of SEED sequences in DB: %s\n" % no_seed_seqs)
report_fp.write("Total number of SEED sequences counted: %s\n" % counted_seed_seqs)
report_fp.write("%s SEED sequences are above GA\n" % counts["seed_above_ga"])
report_fp.write("%s SEED sequences are below GA\n" % counts["seed_below_ga"])
report_fp.write("%s SEED sequences are below the reversed cutoff\n" % counts["seed_below_rev"])
report_fp.close()
return priority
# ----------------------------------------------------------------------------------
def extract_scores_from_outlist_file(outlist):
"""
:param outlist:
:return:
"""
scores = {'SEED': [], 'FULL': [], 'OTHER': []}
outlist_fp = open(outlist, 'r')
for line in outlist_fp:
if line[0] != '#':
line = [x for x in line.strip().split(' ') if x!='']
scores[line[2]].append(float(line[0]))
else:
# if we reached REVERSED line, we treat everything as TNs
# break and return
if line.find("BEST REVERSED") != -1:
break
outlist_fp.close()
return scores
# --------------------------------------------------------------
def print_report_header(extended=True):
"""
Prints the report header
extended (boolean): If true, prints all the columns, otherwise just the
short version
returns: void
"""
if extended is True:
print (
"RFAM_ACC\tnum_seed_seqs\tseed_above_GA\tseed_below_ga\tseed_below_rev\tmissing_seeds_outlist\t".upper()),
print ("missing_seeds_seedoutlist\tnum_full_DB\tfull_above_ga\tUNIQUE_NCBI_ID_DB\tNOVEL_NCBI_IDs\t".upper()),
print (
"ga_bit_SCORE\trev_bit_score\tGA_REV_SCORE_diff\tga_rev_seq_gap\tREV_before_GA\tseed_above_ga_score\t".upper()),
print ("seed_above_ga_pos\tga_pos\trev_pos\tlast_seed_score\tlast_seed_pos\treview_family\tis_miRNA\n".upper()),
else:
print (
"RFAM_ACC\tnum_seed_seqs\tseed_above_GA\tseed_below_ga\tseed_below_rev\tmissing_seeds_outlist\t".upper()),
print ("missing_seeds_seedoutlist\tga_bit_SCORE\trev_bit_score\tGA_REV_SCORE_diff\tga_rev_seq_gap\t".upper()),
print ("REV_before_GA\tseed_above_ga_score\tseed_above_ga_pos\tga_pos\trev_pos\tlast_seed_score\t".upper()),
print ("last_seed_pos\treview_family\tis_miRNA\n".upper()),
# ----------------------------------------------------------------------------------
if __name__ == '__main__':
outlist = "../data/../outlist"
print (extract_scores_from_outlist_file(outlist))
"""
# create a new argument parser object
parser = parse_arguments()
args = parser.parse_args()
if args.acc and not args.v and not args.report:
# check accession provided is valid
if args.acc[0:2] == 'RF' and len(args.acc) == 7:
os.chdir(args.dest_dir)
checkout_and_search_family(args.acc, args.dest_dir, rfmake=args.rfmake)
elif args.f and not args.v:
if not os.path.isfile(args.f):
print ("The file location you provided does not exist!\n")
sys.exit()
# move to destination directory
os.chdir(args.dest_dir)
accessions = load_rfam_accessions_from_file(args.f)
\"""
# get number of job batches we need to submit
# casting to int chops off decimals and ceil rounds up to nearest int
if len(accessions) > MAX_JOB_COUNT:
no_batches = int(math.ceil(len(accessions)/MAX_JOB_COUNT))
i = 0
while i < no_batches:
lidx = i * MAX_JOB_COUNT # left index
ridx = (i+1) * MAX_JOB_COUNT # right index
# get exactly MAX_JOB_COUNT items
if i < no_batches - 1:
new_batch = accessions[lidx:ridx]
# get remaining accessions for last batch
else:
new_batch = accessions[lidx:]
# call function to submit batch
# while monitoring is True:
# cluster monitoring function to be called here
i+1 # this is done when the monitoring loop becomes false which is a signal to submit another batch
\"""
for rfam_acc in accessions:
checkout_and_search_family(rfam_acc, args.dest_dir, rfmake=args.rfmake)
# run rfsearch on all families in the database
elif args.all and not args.v and not args.report and not args.rfmake:
# fetch Rfam family accessions from the database
# call checkout_and_search_family for every family in the list
# fetches all rfam accessions from the database in DESC order based on the number of sequences in SEEDs
rfam_acc_list = db.fetch_rfam_accs_sorted(order='DESC')
for rfam_acc in rfam_acc_list:
checkout_and_search_family(rfam_acc, args.dest_dir, rfmake=args.rfmake)
# validate rfsearch runs
elif args.v:
# validate a single family
if args.acc:
if not is_valid_family(args.dest_dir, args.acc):
print ("The family %s does not validate!" % args.acc)
# validate runs for all accessions in the input file
elif args.f:
validation_file = os.path.join(args.dest_dir, "validation.log")
fp = open(validation_file, 'w')
accessions = load_rfam_accessions_from_file(args.f)
for rfam_acc in accessions:
if not is_valid_family(args.dest_dir, rfam_acc):
fp.write(rfam_acc + '\n')
fp.close()
if os.path.getsize(validation_file) == 0:
print ("Validation process completed! All searches completed successfully!")
else:
print ("Validation process completed! Check validation.log for erroneous searches!")
# validate all families in the directory, but don't generate any reports
elif args.all and not args.report:
validation_file = os.path.join(args.dest_dir, "validation.log")
fp = open(validation_file, 'w')
accessions = [x for x in os.listdir(args.dest_dir) if os.path.isdir(os.path.join(args.dest_dir, x))]
for rfam_acc in accessions:
if not is_valid_family(args.dest_dir, rfam_acc):
fp.write(rfam_acc + '\n')
fp.close()
if os.path.getsize(validation_file) == 0:
print ("Validation process completed! All searches completed successfully!")
else:
print ("Validation process completed! Check validation.log for erroneous searches!")
# generate reports
elif args.report:
# print report header
print_report_header(extended=False)
# generate report for a specific family
if args.acc:
# check if searches where validated
if not os.path.exists(os.path.join(args.dest_dir, "validation.log")):
sys.exit("WARNING: This search may be invalid. Run validation and try again!")
family_dir = os.path.join(args.dest_dir, args.acc)
generate_search_stats(family_dir, scores_file='species', tag_miRNA=True)
# generate reports for all families in the destination directory
elif args.all:
families = [x for x in os.listdir(args.dest_dir) if os.path.isdir(os.path.join(args.dest_dir, x))]
# fetch Rfam family accessions to exclude if defined
exclude_accs = {}
if args.exclude_type:
exclude_accs = db.fetch_type_specific_rfam_accessions(args.exclude_type, return_type="dict")
for family in families:
# families of which searches did not complete
# remove the database on
if family not in exclude_accs and family not in family_exceptions:
family_dir = os.path.join(args.dest_dir, family)
generate_search_stats(family_dir, scores_file='species', tag_miRNA=True)
# run rfmake
elif args.rfmake:
# run rfmake on all families
if args.all:
families = [x for x in os.listdir(args.dest_dir) if os.path.isdir(os.path.join(args.dest_dir, x))]
for family in families:
family_dir = os.path.join(args.dest_dir, family)
submit_new_rfmake_job(family_dir)
# run rfmake for a specific family
elif args.acc:
family_dir = os.path.join(args.dest_dir, args.acc)
submit_new_rfmake_job(family_dir)
# run rfmake for all accessions in the file
elif args.f:
fp = open(args.f, r)
families = [x.strip() for x in fp]
fp.close()
for family in families:
family_dir = os.path.join(args.dest_dir, family)
submit_new_rfmake_job(family_dir)
"""
|
[
"subprocess.Popen",
"argparse.ArgumentParser",
"os.path.basename",
"utils.db_utils.get_number_of_seed_sequences",
"os.path.exists",
"utils.db_utils.get_family_unique_ncbi_ids",
"utils.db_utils.fetch_type_specific_rfam_accessions",
"subprocess.call",
"utils.db_utils.get_number_of_full_hits",
"os.path.join",
"os.chdir"
] |
[((1526, 1558), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (1541, 1558), False, 'import subprocess\n'), ((2151, 2179), 'os.path.basename', 'os.path.basename', (['family_dir'], {}), '(family_dir)\n', (2167, 2179), False, 'import os\n'), ((2199, 2244), 'os.path.join', 'os.path.join', (['family_dir', '"""auto_rfsearch.err"""'], {}), "(family_dir, 'auto_rfsearch.err')\n", (2211, 2244), False, 'import os\n'), ((2264, 2309), 'os.path.join', 'os.path.join', (['family_dir', '"""auto_rfsearch.out"""'], {}), "(family_dir, 'auto_rfsearch.out')\n", (2276, 2309), False, 'import os\n'), ((2804, 2925), 'subprocess.call', 'subprocess.call', (['(cmd % (MEMORY, MEMORY, lsf_out_file, lsf_err_file, CPU, LSF_GROUP,\n rfam_acc, family_dir))'], {'shell': '(True)'}), '(cmd % (MEMORY, MEMORY, lsf_out_file, lsf_err_file, CPU,\n LSF_GROUP, rfam_acc, family_dir), shell=True)\n', (2819, 2925), False, 'import subprocess\n'), ((3496, 3524), 'os.path.basename', 'os.path.basename', (['family_dir'], {}), '(family_dir)\n', (3512, 3524), False, 'import os\n'), ((3544, 3587), 'os.path.join', 'os.path.join', (['family_dir', '"""auto_rfmake.err"""'], {}), "(family_dir, 'auto_rfmake.err')\n", (3556, 3587), False, 'import os\n'), ((3607, 3650), 'os.path.join', 'os.path.join', (['family_dir', '"""auto_rfmake.out"""'], {}), "(family_dir, 'auto_rfmake.out')\n", (3619, 3650), False, 'import os\n'), ((3789, 3910), 'subprocess.call', 'subprocess.call', (['(cmd % (MEMORY, MEMORY, lsf_out_file, lsf_err_file, CPU, LSF_GROUP,\n rfam_acc, family_dir))'], {'shell': '(True)'}), '(cmd % (MEMORY, MEMORY, lsf_out_file, lsf_err_file, CPU,\n LSF_GROUP, rfam_acc, family_dir), shell=True)\n', (3804, 3910), False, 'import subprocess\n'), ((5026, 5058), 'os.path.join', 'os.path.join', (['dest_dir', 'rfam_acc'], {}), '(dest_dir, rfam_acc)\n', (5038, 5058), False, 'import os\n'), ((5533, 5601), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Update scores for new release"""'}), "(description='Update scores for new release')\n", (5556, 5601), False, 'import argparse\n'), ((7217, 7249), 'os.path.join', 'os.path.join', (['dest_dir', 'rfam_acc'], {}), '(dest_dir, rfam_acc)\n', (7229, 7249), False, 'import os\n'), ((7640, 7685), 'os.path.join', 'os.path.join', (['family_dir', '"""auto_rfsearch.out"""'], {}), "(family_dir, 'auto_rfsearch.out')\n", (7652, 7685), False, 'import os\n'), ((7701, 7779), 'subprocess.Popen', 'Popen', (["['grep', 'Success', lsf_out_file]"], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), "(['grep', 'Success', lsf_out_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)\n", (7706, 7779), False, 'from subprocess import Popen, PIPE\n'), ((8876, 8916), 'os.path.join', 'os.path.join', (['family_dir', '"""rfsearch.log"""'], {}), "(family_dir, 'rfsearch.log')\n", (8888, 8916), False, 'import os\n'), ((8931, 9009), 'subprocess.Popen', 'Popen', (["['tail', '-1', rfsearch_log_file]"], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), "(['tail', '-1', rfsearch_log_file], stdin=PIPE, stdout=PIPE, stderr=PIPE)\n", (8936, 9009), False, 'from subprocess import Popen, PIPE\n'), ((12120, 12148), 'os.path.basename', 'os.path.basename', (['family_dir'], {}), '(family_dir)\n', (12136, 12148), False, 'import os\n'), ((13332, 13373), 'utils.db_utils.get_number_of_seed_sequences', 'db.get_number_of_seed_sequences', (['rfam_acc'], {}), '(rfam_acc)\n', (13363, 13373), True, 'from utils import db_utils as db\n'), ((13397, 13433), 'utils.db_utils.get_number_of_full_hits', 'db.get_number_of_full_hits', (['rfam_acc'], {}), '(rfam_acc)\n', (13423, 13433), True, 'from utils import db_utils as db\n'), ((13459, 13498), 'utils.db_utils.get_family_unique_ncbi_ids', 'db.get_family_unique_ncbi_ids', (['rfam_acc'], {}), '(rfam_acc)\n', (13488, 13498), True, 'from utils import db_utils as db\n'), ((13518, 13557), 'os.path.join', 'os.path.join', (['family_dir', '"""seedoutlist"""'], {}), "(family_dir, 'seedoutlist')\n", (13530, 13557), False, 'import os\n'), ((20170, 20198), 'os.path.basename', 'os.path.basename', (['family_dir'], {}), '(family_dir)\n', (20186, 20198), False, 'import os\n'), ((20218, 20259), 'utils.db_utils.get_number_of_seed_sequences', 'db.get_number_of_seed_sequences', (['rfam_acc'], {}), '(rfam_acc)\n', (20249, 20259), True, 'from utils import db_utils as db\n'), ((20283, 20320), 'os.path.join', 'os.path.join', (['family_dir', 'scores_file'], {}), '(family_dir, scores_file)\n', (20295, 20320), False, 'import os\n'), ((5112, 5138), 'os.path.exists', 'os.path.exists', (['family_dir'], {}), '(family_dir)\n', (5126, 5138), False, 'import os\n'), ((5148, 5166), 'os.chdir', 'os.chdir', (['dest_dir'], {}), '(dest_dir)\n', (5156, 5166), False, 'import os\n'), ((13668, 13705), 'os.path.join', 'os.path.join', (['family_dir', 'scores_file'], {}), '(family_dir, scores_file)\n', (13680, 13705), False, 'import os\n'), ((20384, 20429), 'os.path.join', 'os.path.join', (['family_dir', '"""search_report.txt"""'], {}), "(family_dir, 'search_report.txt')\n", (20396, 20429), False, 'import os\n'), ((7347, 7387), 'os.path.join', 'os.path.join', (['family_dir', '"""rfsearch.log"""'], {}), "(family_dir, 'rfsearch.log')\n", (7359, 7387), False, 'import os\n'), ((13142, 13209), 'utils.db_utils.fetch_type_specific_rfam_accessions', 'db.fetch_type_specific_rfam_accessions', (['"""miRNA"""'], {'return_type': '"""dict"""'}), "('miRNA', return_type='dict')\n", (13180, 13209), True, 'from utils import db_utils as db\n'), ((7477, 7522), 'os.path.join', 'os.path.join', (['family_dir', '"""auto_rfsearch.err"""'], {}), "(family_dir, 'auto_rfsearch.err')\n", (7489, 7522), False, 'import os\n')]
|
from django.urls import path
from LLINS_API import views
from rest_framework.urlpatterns import format_suffix_patterns
from django.contrib import admin
urlpatterns = [
path('', admin.site.urls),
path('patients/', views.patient_data_list),
path('patints/<int:pk>/', views.patient_data_detail),
path('nets/', views.nets_list),
]
|
[
"django.urls.path"
] |
[((172, 197), 'django.urls.path', 'path', (['""""""', 'admin.site.urls'], {}), "('', admin.site.urls)\n", (176, 197), False, 'from django.urls import path\n'), ((203, 245), 'django.urls.path', 'path', (['"""patients/"""', 'views.patient_data_list'], {}), "('patients/', views.patient_data_list)\n", (207, 245), False, 'from django.urls import path\n'), ((251, 303), 'django.urls.path', 'path', (['"""patints/<int:pk>/"""', 'views.patient_data_detail'], {}), "('patints/<int:pk>/', views.patient_data_detail)\n", (255, 303), False, 'from django.urls import path\n'), ((309, 339), 'django.urls.path', 'path', (['"""nets/"""', 'views.nets_list'], {}), "('nets/', views.nets_list)\n", (313, 339), False, 'from django.urls import path\n')]
|
from django import forms
class UpdateReviewForm(forms.Form):
"""
UpdateReviewForm Valida los datos del request.data al modificar un review
Args:
forms (Form): Form de django
Atributes:
observacion (TextField): Campo para validar que se especifca la observacion
"""
observacion = forms.CharField(
required=True,
error_messages={
"required": "No especificaste la observacion"
}
)
|
[
"django.forms.CharField"
] |
[((324, 422), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)', 'error_messages': "{'required': 'No especificaste la observacion'}"}), "(required=True, error_messages={'required':\n 'No especificaste la observacion'})\n", (339, 422), False, 'from django import forms\n')]
|
# to do 发送邮件,以及需要增加用例的执行结果
import time
import ApplicationPerformance.sendReport as sendReport
import ApplicationPerformance.applicationperformance.launchTime as launchTime # MAC
# import ApplicationPerformance.applicationperformance.launchTime as launchTime # Windows
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class WebAutomation(object):
# 启动浏览
def startBrowser(self, browsername, testurl, *browserconfigure):
if "谷歌" in browsername:
driver = webdriver.Chrome()
return driver
elif "火狐" in browsername:
if browserconfigure[0] != "": # 判断是否有配置路径
driver = webdriver.Firefox(webdriver.FirefoxProfile(browserconfigure[0])) # 带着配置启动火狐浏览器(比如增加Xpth插件等。)
return driver
else:
driver = webdriver.Firefox()
return driver
else:
print("您的测试用例中,存在无法识别的浏览器名称,请检查用例。")
# 双击操作
def operateDoubleClick(self, operatetype, element, driver, caseid):
if operatetype == "双击_id":
try:
driver.find_element_by_id(element).double_click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "双击_xpath":
try:
driver.find_element_by_xpath(element).double_click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "双击_textname": # 点击textname
try:
driver.find_elements_by_name(element)[0].double_click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "双击_classname":
try:
driver.find_elements_by_class_name(element)[0].double_click() # 点击xpath
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "双击_linkname":
try:
driver.find_elements_by_link_text(element)[0].double_click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
else:
casereport = "用例编号:%s,执行不通过,该用例的元素属性或参数可能有问题,请检查该用例。" % (caseid)
return casereport
# 右点击击操作
def operateRightClick(self, operatetype, element, driver, caseid):
if operatetype == "右击_id":
try:
driver.find_element_by_id(element).context_click().perform()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "右击_xpath":
try:
driver.find_element_by_xpath(element).context_click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "右击_textname": # 点击textname
try:
driver.find_elements_by_name(element)[0].context_click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "右击_classname":
try:
driver.find_elements_by_class_name(element)[0].context_click() # 点击xpath
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "右击_linkname":
try:
driver.find_elements_by_link_text(element)[0].context_click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
else:
casereport = "用例编号:%s,执行不通过,该用例的元素属性或参数可能有问题,请检查该用例。" % (caseid)
return casereport
# 左点击击操作
def operateClick(self, operatetype, element, driver, caseid):
if operatetype == "点击_id":
try:
driver.find_element_by_id(element).click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "点击_xpath":
try:
driver.find_element_by_xpath(element).click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "点击_textname": # 点击textname
try:
driver.find_elements_by_name(element)[0].click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "点击_classname":
try:
driver.find_elements_by_class_name(element)[0].click() # 点击xpath
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "点击_linkname":
try:
driver.find_elements_by_link_text(element)[0].click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
# 扩展性 查找元素方法
elif operatetype == "点击_cssid":
try:
driver.find_element_by_css_selector("#%s" % (element)).click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "点击_cssname":
try:
driver.find_element_by_css_selector("a[name=\"%s\"]" % (element)).click()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
else:
casereport = "用例编号:%s,执行不通过,该用例的元素属性或参数可能有问题,请检查该用例。" % (caseid)
return casereport
# 检查元素是否存在
def operateCheckElement(self, operatetype, element, driver, caseid):
if operatetype == "查找_id":
try:
driver.find_element_by_id(element)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "查找_xpath":
try:
driver.find_element_by_xpath(element)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "查找_textname": # 查找textname
try:
driver.find_elements_by_name(element)[0]
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "查找_classname":
try:
driver.find_elements_by_class_name(element)[0]
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "查找_linkname":
try:
driver.find_elements_by_link_text(element)[0]
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "if包含_id":
try:
driver.find_element_by_id(element)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "if包含_xpath":
try:
driver.find_element_by_xpath(element)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "if包含_textname":
try:
driver.find_elements_by_name(element)[0]
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "if包含_classname":
try:
driver.find_elements_by_class_name(element)[0]
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "if包含_linkname":
try:
driver.find_elements_by_link_text(element)[0]
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
else:
casereport = "用例编号:%s,执行不通过,该用例的元素属性或参数可能有问题,请检查该用例。" % (caseid)
return casereport
# 清空输入框
def clearInput(self, operatetype, element, driver, caseid):
if operatetype == "清空输入框_id":
try:
driver.find_element_by_id(element).clear()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "清空输入框_xpath":
try:
driver.find_element_by_xpath(element).clear()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "清空输入框_textname":
try:
driver.find_elements_by_name(element)[0].clear()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
else:
casereport = "用例编号:%s,执行不通过,该用例的元素属性或参数可能有问题,请检查该用例。" % (caseid)
return casereport
# 输入操作
def operateInput(self, operatetype, element, driver, caseid, *parameter):
if operatetype == "输入_id":
try:
driver.find_element_by_id(element).send_keys(parameter[0])
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "输入_xpath":
try:
driver.find_element_by_xpath(element).send_keys(parameter[0])
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "输入_textname":
try:
driver.find_elements_by_name(element)[0].send_keys(parameter[0])
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
# 扩展性 查找元素方法
elif operatetype == "输入_cssid":
try:
driver.find_element_by_css_selector("#%s" % (element)).send_keys(parameter[0])
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "输入_cssname":
try:
driver.find_element_by_css_selector("a[name=\"%s\"]" % (element)).send_keys(parameter[0])
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
else:
casereport = "用例编号:%s,执行不通过,该用例的元素属性或参数可能有问题,请检查该用例。" % (caseid)
return casereport
# Android物理按键操作
def operatePhysicsKye(self, operatetype, element, driver, caseid):
if operatetype == "按enter_id":
try:
driver.find_element_by_id(element).send_keys(Keys.ENTER)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "按enter_xpath":
try:
driver.find_element_by_xpath(element).send_keys(Keys.ENTER)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "按enter_textname":
try:
driver.find_elements_by_name(element)[0].send_keys(Keys.ENTER)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "pagedown_id":
try:
driver.find_element_by_id(element).send_keys(Keys.PAGE_DOWN)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "pagedown_xpath":
try:
driver.find_element_by_xpath(element).send_keys(Keys.PAGE_DOWN)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "pagedown_textname":
try:
driver.find_elements_by_name(element)[0].send_keys(Keys.PAGE_DOWN)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "pageup_id":
try:
driver.find_element_by_id(element).send_keys(Keys.PAGE_UP)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "pageup_xpath":
try:
driver.find_element_by_xpath(element).send_keys(Keys.PAGE_UP)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "pageup_textname":
try:
driver.find_elements_by_name(element)[0].send_keys(Keys.PAGE_UP)
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "浏览器全屏":
try:
driver.maximize_window()
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
elif operatetype == "设置分辨率":
try:
windowslist = element.split(',')
driver.set_window_size(int(windowslist[0]), int(windowslist[1]))
casereport = "用例编号:%s,执行通过。" % (caseid)
return casereport
except:
casereport = "用例编号:%s,执行不通过。" % (caseid)
return casereport
else:
casereport = "用例编号:%s,执行不通过,该用例的元素属性或参数可能有问题,请检查该用例。" % (caseid)
return casereport
# 执行用例
def runCase(self):
deviceinfo = launchTime.ReadExcel().readeExcelData('browserinfo')
startautomationtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # 开始自动化用例时间
for i in range(1, deviceinfo.get('caserows')):
devicesinfocase = deviceinfo.get('excledata_sheel').row_values(i)
browsername = devicesinfocase[0]
browserconfigure = devicesinfocase[1]
testurl = devicesinfocase[2]
browserstatus = devicesinfocase[3]
print(devicesinfocase)
eventid = time.strftime('%Y%m%d%H%M%S', time.localtime())
if "Y" in browserstatus:
driver = WebAutomation().startBrowser(browsername, browserconfigure)
time.sleep(5)
driver.get(testurl)
casedata = launchTime.ReadExcel().readeExcelData('browseefuncase') # 读取自动化用例数据
endcasenumber = []
casenumber = []
for j in range(1, casedata.get('caserows')): # Excel中的测试用例数据,使用for遍历每一行的数据,进行判断执行对应的操作
excelcasedata = casedata.get('excledata_sheel').row_values(
j)
operatetype = excelcasedata[1]
if "if" in operatetype:
casenumber.append(j)
if "end" in operatetype:
endcasenumber.append(j)
x = 1
ifnumber = 0
try:
casecount = casedata.get('caserows')-1 # 用例总数
while x <= casecount:
excelcasedata = casedata.get('excledata_sheel').row_values(x)
x = x + 1
try:
caseid = int(excelcasedata[0]) # 用例编号
except:
caseid = excelcasedata[0]
operatetype = excelcasedata[1] # 操作类型
element = excelcasedata[2] # 元素
parameter = str(excelcasedata[3]) # 参数 必须要转成字符串,要不然在使用send_keys(必须要是字符串类型)时无法使用
rundescribe = excelcasedata[6] # 步骤描述
caseexecute = excelcasedata[7] # 用例状态
driver.implicitly_wait(60)
startonecasetime = time.time()
if excelcasedata[5] == "": # 等待时间
waittime = 2
else:
waittime = int(excelcasedata[5])
if "Y" in caseexecute:
if operatetype == "等待时间":
time.sleep(waittime)
casereport = "用例编号:%s,执行通过。" % (caseid)
print(casereport)
elif operatetype == "点击_id":
print(WebAutomation().operateClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "点击_xpath":
print(WebAutomation().operateClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "点击_textname":
print(WebAutomation().operateClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "点击_linkname":
print(WebAutomation().operateClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "点击_classname":
print(WebAutomation().operateClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "点击_cssid":
print(WebAutomation().operateClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "点击_cssname":
print(WebAutomation().operateClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "输入_id":
print(WebAutomation().operateInput(operatetype, element, driver, caseid, parameter))
time.sleep(waittime)
elif operatetype == "输入_xpath":
print(WebAutomation().operateInput(operatetype, element, driver, caseid, parameter))
time.sleep(waittime)
elif operatetype == "输入_textname":
print(WebAutomation().operateInput(operatetype, element, driver, caseid, parameter))
time.sleep(waittime)
elif operatetype == "输入_cssid":
print(WebAutomation().operateInput(operatetype, element, driver, caseid, parameter))
time.sleep(waittime)
elif operatetype == "输入_cssname":
print(WebAutomation().operateInput(operatetype, element, driver, caseid, parameter))
time.sleep(waittime)
elif operatetype == "清空输入框_id":
print(WebAutomation().clearInput(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "清空输入框_xpath":
print(WebAutomation().clearInput(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "清空输入框_textname":
print(WebAutomation().clearInput(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "查找_id":
print(WebAutomation().operateCheckElement(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "查找_xpath":
print(WebAutomation().operateCheckElement(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "查找_textname":
print(WebAutomation().operateCheckElement(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "查找_linkname":
print(WebAutomation().operateCheckElement(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "end":
casereport = "用例编号:%s,执行通过。" % (caseid)
print(casereport)
elif "if" in operatetype:
if operatetype == "if包含_id":
casereport = WebAutomation().operateCheckElement(operatetype, element,
driver, caseid)
if "执行通过" in casereport:
print(casereport)
else:
print(casereport)
if len(endcasenumber) == len(casenumber):
x = endcasenumber[ifnumber]
else:
print("当前用例中的if和and不等,请检查用例")
x = endcasenumber[-1]
elif "if包含_xpath":
casereport = WebAutomation().operateCheckElement(operatetype, element,
driver, caseid)
if "执行通过" in casereport:
print(casereport)
else:
print(casereport)
if len(endcasenumber) == len(casenumber):
x = endcasenumber[ifnumber]
else:
print("当前用例中的if和and不等,请检查用例")
x = endcasenumber[-1]
elif "if包含_classname":
casereport = WebAutomation().operateCheckElement(operatetype, element,
driver, caseid)
if "执行通过" in casereport:
print(casereport)
else:
print(casereport)
if len(endcasenumber) == len(casenumber):
x = endcasenumber[ifnumber]
else:
print("当前用例中的if和and不等,请检查用例")
x = endcasenumber[-1]
elif "if包含_textname":
casereport = WebAutomation().operateCheckElement(operatetype, element,
driver, caseid)
if "执行通过" in casereport:
print(casereport)
else:
print(casereport)
if len(endcasenumber) == len(casenumber):
x = endcasenumber[ifnumber]
else:
print("当前用例中的if和and不等,请检查用例")
x = endcasenumber[-1]
elif "if包含_linkname":
casereport = WebAutomation().operateCheckElement(operatetype, element,
driver, caseid)
if "执行通过" in casereport:
print(casereport)
else:
print(casereport)
if len(endcasenumber) == len(casenumber):
x = endcasenumber[ifnumber]
else:
print("当前用例中的if和and不等,请检查用例")
x = endcasenumber[-1]
else:
casereport = "用例编号:%s操作类型错误,该用例不执行。" % (caseid)
print(casereport)
ifnumber = ifnumber + 1
elif operatetype == "查找_classname":
print(WebAutomation().operateCheckElement(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "右击_id":
print(WebAutomation().operateRightClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "右击_xpath":
print(WebAutomation().operateRightClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "右击_textname":
print(WebAutomation().operateRightClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "右击_linkname":
print(WebAutomation().operateRightClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "右击_classname":
print(WebAutomation().operateRightClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "双击_id":
print(WebAutomation().operateDoubleClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "双击_xpath":
print(WebAutomation().operateDoubleClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "双击_textname":
print(WebAutomation().operateDoubleClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "双击_linkname":
print(WebAutomation().operateDoubleClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "双击_classname":
print(WebAutomation().operateDoubleClick(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "按enter_id":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "按enter_xpath":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "按enter_textname":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "pagedown_id":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "pagedown_xpath":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "pagedown_textname":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "pageup_id":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "pageup_xpath":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "pageup_textname":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "浏览器全屏":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
elif operatetype == "设置分辨率":
print(WebAutomation().operatePhysicsKye(operatetype, element, driver, caseid))
time.sleep(waittime)
else:
casereport = "用例编号:%s操作类型错误,该用例不执行。" % (caseid)
print(casereport)
else:
casereport = "用例编号:%s,执行状态为No,故不执行。" % (caseid)
print(casereport)
endonecasetime = time.time()
runonecasetime = round(endonecasetime - startonecasetime, 2)
savedata = "insert into automationquery_automation_function_web (`browsername`,`browserconfigure`,`browserstatus`,`operatetype`,`element`,`parameter`,`waittime`,`rundescribe`,`caseexecute`,`runcasetime`,`caseid`,`eventid`,`casereport`,`createdtime`,`updatetime`)VALUES('%s','%s','%s','%s',\'''%s\''','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
browsername, browserconfigure, browserstatus, operatetype, element, parameter, waittime,
rundescribe,
caseexecute,
runonecasetime, caseid, eventid, casereport,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
try:
launchTime.MysqlConnect().saveDatatoMysql("%s" % (savedata))
time.sleep(1)
except:
print("数据库连接失败,保存数据失败。")
except:
driver.close()
driver.quit()
driver.close()
driver.quit()
else:
print("浏览%s,状态为不执行,故该浏览器上不运行用例。" % (devicesinfocase[0]))
savedata = "insert into automationquery_automation_function_web (`browsername`,`browserconfigure`,`browserstatus`,`operatetype`,`element`,`parameter`,`waittime`,`rundescribe`,`caseexecute`,`runcasetime`,`caseid`,`eventid`,`casereport`,`createdtime`,`updatetime`)VALUES('%s','%s','%s','%s',\'''%s\''','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" % (
browsername, browserconfigure, browserstatus, "", "", "", "",
"",
"",
"", "", eventid, casereport,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
try:
launchTime.MysqlConnect().saveDatatoMysql("%s" % (savedata))
time.sleep(1)
except:
print("数据库连接失败,保存数据失败。")
tomail = "<EMAIL>,<EMAIL>"
ccemail = "<EMAIL>"
print(sendReport.SendReport().senderEmail(tomail, ccemail, startautomationtime, casecount))
if __name__ == "__main__":
WebAutomation().runCase()
|
[
"ApplicationPerformance.sendReport.SendReport",
"selenium.webdriver.Firefox",
"ApplicationPerformance.applicationperformance.launchTime.ReadExcel",
"selenium.webdriver.FirefoxProfile",
"time.sleep",
"time.time",
"selenium.webdriver.Chrome",
"ApplicationPerformance.applicationperformance.launchTime.MysqlConnect",
"time.localtime"
] |
[((514, 532), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (530, 532), False, 'from selenium import webdriver\n'), ((17975, 17991), 'time.localtime', 'time.localtime', ([], {}), '()\n', (17989, 17991), False, 'import time\n'), ((17857, 17879), 'ApplicationPerformance.applicationperformance.launchTime.ReadExcel', 'launchTime.ReadExcel', ([], {}), '()\n', (17877, 17879), True, 'import ApplicationPerformance.applicationperformance.launchTime as launchTime\n'), ((18409, 18425), 'time.localtime', 'time.localtime', ([], {}), '()\n', (18423, 18425), False, 'import time\n'), ((18565, 18578), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (18575, 18578), False, 'import time\n'), ((840, 859), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (857, 859), False, 'from selenium import webdriver\n'), ((37060, 37073), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (37070, 37073), False, 'import time\n'), ((37220, 37243), 'ApplicationPerformance.sendReport.SendReport', 'sendReport.SendReport', ([], {}), '()\n', (37241, 37243), True, 'import ApplicationPerformance.sendReport as sendReport\n'), ((691, 736), 'selenium.webdriver.FirefoxProfile', 'webdriver.FirefoxProfile', (['browserconfigure[0]'], {}), '(browserconfigure[0])\n', (715, 736), False, 'from selenium import webdriver\n'), ((18642, 18664), 'ApplicationPerformance.applicationperformance.launchTime.ReadExcel', 'launchTime.ReadExcel', ([], {}), '()\n', (18662, 18664), True, 'import ApplicationPerformance.applicationperformance.launchTime as launchTime\n'), ((20150, 20161), 'time.time', 'time.time', ([], {}), '()\n', (20159, 20161), False, 'import time\n'), ((34834, 34845), 'time.time', 'time.time', ([], {}), '()\n', (34843, 34845), False, 'import time\n'), ((35894, 35907), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (35904, 35907), False, 'import time\n'), ((36845, 36861), 'time.localtime', 'time.localtime', ([], {}), '()\n', (36859, 36861), False, 'import time\n'), ((36919, 36935), 'time.localtime', 'time.localtime', ([], {}), '()\n', (36933, 36935), False, 'import time\n'), ((36979, 37004), 'ApplicationPerformance.applicationperformance.launchTime.MysqlConnect', 'launchTime.MysqlConnect', ([], {}), '()\n', (37002, 37004), True, 'import ApplicationPerformance.applicationperformance.launchTime as launchTime\n'), ((20486, 20506), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (20496, 20506), False, 'import time\n'), ((20824, 20844), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (20834, 20844), False, 'import time\n'), ((35647, 35663), 'time.localtime', 'time.localtime', ([], {}), '()\n', (35661, 35663), False, 'import time\n'), ((35729, 35745), 'time.localtime', 'time.localtime', ([], {}), '()\n', (35743, 35745), False, 'import time\n'), ((35805, 35830), 'ApplicationPerformance.applicationperformance.launchTime.MysqlConnect', 'launchTime.MysqlConnect', ([], {}), '()\n', (35828, 35830), True, 'import ApplicationPerformance.applicationperformance.launchTime as launchTime\n'), ((21043, 21063), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (21053, 21063), False, 'import time\n'), ((21265, 21285), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (21275, 21285), False, 'import time\n'), ((21487, 21507), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (21497, 21507), False, 'import time\n'), ((21710, 21730), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (21720, 21730), False, 'import time\n'), ((21929, 21949), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (21939, 21949), False, 'import time\n'), ((22150, 22170), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (22160, 22170), False, 'import time\n'), ((22377, 22397), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (22387, 22397), False, 'import time\n'), ((22607, 22627), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (22617, 22627), False, 'import time\n'), ((22840, 22860), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (22850, 22860), False, 'import time\n'), ((23070, 23090), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (23080, 23090), False, 'import time\n'), ((23302, 23322), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (23312, 23322), False, 'import time\n'), ((23519, 23539), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (23529, 23539), False, 'import time\n'), ((23739, 23759), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (23749, 23759), False, 'import time\n'), ((23962, 23982), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (23972, 23982), False, 'import time\n'), ((24185, 24205), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (24195, 24205), False, 'import time\n'), ((24411, 24431), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (24421, 24431), False, 'import time\n'), ((24640, 24660), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (24650, 24660), False, 'import time\n'), ((24869, 24889), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (24879, 24889), False, 'import time\n'), ((29697, 29717), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (29707, 29717), False, 'import time\n'), ((29918, 29938), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (29928, 29938), False, 'import time\n'), ((30142, 30162), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (30152, 30162), False, 'import time\n'), ((30369, 30389), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (30379, 30389), False, 'import time\n'), ((30596, 30616), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (30606, 30616), False, 'import time\n'), ((30824, 30844), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (30834, 30844), False, 'import time\n'), ((31046, 31066), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (31056, 31066), False, 'import time\n'), ((31271, 31291), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (31281, 31291), False, 'import time\n'), ((31499, 31519), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (31509, 31519), False, 'import time\n'), ((31727, 31747), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (31737, 31747), False, 'import time\n'), ((31956, 31976), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (31966, 31976), False, 'import time\n'), ((32181, 32201), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (32191, 32201), False, 'import time\n'), ((32409, 32429), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (32419, 32429), False, 'import time\n'), ((32640, 32660), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (32650, 32660), False, 'import time\n'), ((32867, 32887), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (32877, 32887), False, 'import time\n'), ((33097, 33117), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (33107, 33117), False, 'import time\n'), ((33330, 33350), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (33340, 33350), False, 'import time\n'), ((33555, 33575), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (33565, 33575), False, 'import time\n'), ((33783, 33803), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (33793, 33803), False, 'import time\n'), ((34014, 34034), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (34024, 34034), False, 'import time\n'), ((34235, 34255), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (34245, 34255), False, 'import time\n'), ((34456, 34476), 'time.sleep', 'time.sleep', (['waittime'], {}), '(waittime)\n', (34466, 34476), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 13:50:54 2020
This is the load to load data based on occupancy maps
@author: cheng
"""
import numpy as np
import time
import os
from augmentation import rotation
from maps import Maps
from occupancy import circle_group_grid
def loaddata(dataset_list, args, datatype="train"):
# Store the data across datasets
# All the datasets are merged for training
if datatype=="train" or datatype=="test":
offsets = np.empty((0, args.obs_seq+args.pred_seq-1, 8))
traj_data = np.empty((0, args.obs_seq+args.pred_seq, 4))
occupancy = np.empty((0, args.obs_seq+args.pred_seq-1, args.enviro_pdim[0], args.enviro_pdim[1], 3))
if dataset_list[0] == "train_merged":
# ToDo change this to make compatible with linus
data = np.load("../processed_data/train/%s.npz"%(dataset_list[0]))
_offsets, _traj_data, _occupancy = data["offsets"], data["traj_data"], data["occupancy"]
print(dataset_list[0], "contains %.0f trajectories"%len(_offsets))
offsets = np.concatenate((offsets, _offsets), axis=0)
traj_data = np.concatenate((traj_data, _traj_data), axis=0)
occupancy = np.concatenate((occupancy, _occupancy), axis=0)
else:
for i, dataset in enumerate(dataset_list):
# Only take the orinal data
# ToDo, here needs to be test if augumentation will boost the performance
if dataset != "train_merged":
# ToDo change this to make compatible with linus
data = np.load("../processed_data/train/%s.npz"%(dataset))
_offsets, _traj_data, _occupancy = data["offsets"], data["traj_data"], data["occupancy"]
print(dataset, "contains %.0f trajectories"%len(_offsets))
offsets = np.concatenate((offsets, _offsets), axis=0)
traj_data = np.concatenate((traj_data, _traj_data), axis=0)
occupancy = np.concatenate((occupancy, _occupancy), axis=0)
# NOTE: When load the challenge data, there is no need to merge them
# The submission requires each challenge data set (in total 20) to be separated
# Hence, each time only one challenge data set is called
elif datatype == "challenge":
offsets = np.empty((0, args.obs_seq-1, 8))
traj_data = np.empty((0, args.obs_seq, 4))
occupancy = np.empty((0, args.obs_seq-1, args.enviro_pdim[0], args.enviro_pdim[1], 3))
for dataset in dataset_list:
# ToDo change this to make compatible with linus
data = np.load("../processed_data/challenge/%s.npz"%(dataset))
_offsets, _traj_data, _occupancy = data["offsets"], data["traj_data"], data["occupancy"]
offsets = np.concatenate((offsets, _offsets), axis=0)
traj_data = np.concatenate((traj_data, _traj_data), axis=0)
occupancy = np.concatenate((occupancy, _occupancy), axis=0)
elif datatype=="test":
assert len(dataset_list)==1, print("Only one untouched dataset is left fot testing!")
elif datatype=="challenge":
assert len(dataset_list)==1, print("predict one by one")
if datatype=="train":
# ToDo change this to make compatible with linus
if not os.path.exists("../processed_data/train/train_merged.npz"):
# Save the merged training data
# ToDo change this to make compatible with linus
np.savez("../processed_data/train/train_merged.npz",
offsets=offsets,
traj_data = traj_data,
occupancy = occupancy)
return offsets, traj_data, occupancy
def preprocess_data(seq_length, size, dirname, path=None, data=None, aug_num=1, save=True):
'''
Parameters
----------
seq_length : int
This is the complete length of each trajectory offset and occupancy,
Note: one-step difference for the offset and occupancy and traj_data.
size : [height, width, channels]
The occupancy grid size and channels:
orientation, speed and position for the neighbors in the vicinity
dirname : string
"train" or "challenge"
path : string, optional
only for extract offsets, traj_data, and occupancy from the original data files
data : numpy, optional
it is the predicted complete trajectories after the first prediction,
it is used to calculate the occupancy in the predicted time.
aug_num : int, optional
the number for augmenting the data by rotation.
save : boolen, optional
Only save the processed training data. The default is True.
Returns
-------
offsets : numpy array
[frameId, userId, x, y, delta_x, delta_y, theata, velocity].
traj_data : numpy array
[frameId, userId, x, y]
Note: this is one-step longer
occupancy : numpy array
[height, width, channels].
'''
start = time.time()
if np.all(data)==None:
data = np.genfromtxt(path, delimiter='')
# challenge dataset have nan for prediction time steps
data = data[~np.isnan(data).any(axis=1)]
dataname = path.split('\\')[-1].split('.')[0]
print("process data %s ..."%dataname)
for r in range(aug_num):
# Agument the data by orientating if the agumentation number if more than one
if r > 0:
data[:, 2:4] = rotation(data[:, 2:4], r/aug_num)
# Get the environment maps
maps = Maps(data)
traj_map = maps.trajectory_map()
orient_map, speed_map = maps.motion_map(max_speed=10)
map_info = [traj_map, orient_map, speed_map]
enviro_maps = concat_maps(map_info)
print("enviro_maps shape", enviro_maps.shape)
offsets = np.reshape(maps.offsets, (-1,seq_length,8))
print("offsets shape", offsets.shape)
traj_data = np.reshape(maps.sorted_data, (-1, seq_length+1, 4))
print("traj_data shape", traj_data.shape)
occupancy = circle_group_grid(offsets, maps.sorted_data, size)
print("occupancy shape", occupancy.shape)
if save:
if r == 0:
# Save the original one
np.savez("../processed_data/%s/%s"%(dirname, dataname),
offsets=offsets,
traj_data = traj_data,
occupancy = occupancy)
end = time.time()
else:
# Save the rotated one(s)
np.savez("../processed_data/%s/%s_%.0f"%(dirname, dataname, r),
offsets=offsets,
traj_data = traj_data,
occupancy = occupancy)
end = time.time()
print("It takes ", round(end-start, 2), "seconds!\n")
else:
return offsets, traj_data, occupancy
def concat_maps(map_info):
# save the map information into different channels
enviro_maps = np.empty((map_info[0].shape[0], map_info[0].shape[1], len(map_info)))
for i, map in enumerate(map_info):
enviro_maps[:, :, i] = map
return enviro_maps
|
[
"numpy.load",
"numpy.concatenate",
"occupancy.circle_group_grid",
"numpy.empty",
"os.path.exists",
"numpy.genfromtxt",
"numpy.isnan",
"time.time",
"augmentation.rotation",
"numpy.reshape",
"maps.Maps",
"numpy.savez",
"numpy.all"
] |
[((5232, 5243), 'time.time', 'time.time', ([], {}), '()\n', (5241, 5243), False, 'import time\n'), ((483, 533), 'numpy.empty', 'np.empty', (['(0, args.obs_seq + args.pred_seq - 1, 8)'], {}), '((0, args.obs_seq + args.pred_seq - 1, 8))\n', (491, 533), True, 'import numpy as np\n'), ((550, 596), 'numpy.empty', 'np.empty', (['(0, args.obs_seq + args.pred_seq, 4)'], {}), '((0, args.obs_seq + args.pred_seq, 4))\n', (558, 596), True, 'import numpy as np\n'), ((615, 712), 'numpy.empty', 'np.empty', (['(0, args.obs_seq + args.pred_seq - 1, args.enviro_pdim[0], args.enviro_pdim\n [1], 3)'], {}), '((0, args.obs_seq + args.pred_seq - 1, args.enviro_pdim[0], args.\n enviro_pdim[1], 3))\n', (623, 712), True, 'import numpy as np\n'), ((5251, 5263), 'numpy.all', 'np.all', (['data'], {}), '(data)\n', (5257, 5263), True, 'import numpy as np\n'), ((5286, 5319), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""""""'}), "(path, delimiter='')\n", (5299, 5319), True, 'import numpy as np\n'), ((5837, 5847), 'maps.Maps', 'Maps', (['data'], {}), '(data)\n', (5841, 5847), False, 'from maps import Maps\n'), ((6140, 6185), 'numpy.reshape', 'np.reshape', (['maps.offsets', '(-1, seq_length, 8)'], {}), '(maps.offsets, (-1, seq_length, 8))\n', (6150, 6185), True, 'import numpy as np\n'), ((6250, 6303), 'numpy.reshape', 'np.reshape', (['maps.sorted_data', '(-1, seq_length + 1, 4)'], {}), '(maps.sorted_data, (-1, seq_length + 1, 4))\n', (6260, 6303), True, 'import numpy as np\n'), ((6373, 6423), 'occupancy.circle_group_grid', 'circle_group_grid', (['offsets', 'maps.sorted_data', 'size'], {}), '(offsets, maps.sorted_data, size)\n', (6390, 6423), False, 'from occupancy import circle_group_grid\n'), ((839, 898), 'numpy.load', 'np.load', (["('../processed_data/train/%s.npz' % dataset_list[0])"], {}), "('../processed_data/train/%s.npz' % dataset_list[0])\n", (846, 898), True, 'import numpy as np\n'), ((1118, 1161), 'numpy.concatenate', 'np.concatenate', (['(offsets, _offsets)'], {'axis': '(0)'}), '((offsets, _offsets), axis=0)\n', (1132, 1161), True, 'import numpy as np\n'), ((1186, 1233), 'numpy.concatenate', 'np.concatenate', (['(traj_data, _traj_data)'], {'axis': '(0)'}), '((traj_data, _traj_data), axis=0)\n', (1200, 1233), True, 'import numpy as np\n'), ((1258, 1305), 'numpy.concatenate', 'np.concatenate', (['(occupancy, _occupancy)'], {'axis': '(0)'}), '((occupancy, _occupancy), axis=0)\n', (1272, 1305), True, 'import numpy as np\n'), ((2485, 2519), 'numpy.empty', 'np.empty', (['(0, args.obs_seq - 1, 8)'], {}), '((0, args.obs_seq - 1, 8))\n', (2493, 2519), True, 'import numpy as np\n'), ((2538, 2568), 'numpy.empty', 'np.empty', (['(0, args.obs_seq, 4)'], {}), '((0, args.obs_seq, 4))\n', (2546, 2568), True, 'import numpy as np\n'), ((2589, 2665), 'numpy.empty', 'np.empty', (['(0, args.obs_seq - 1, args.enviro_pdim[0], args.enviro_pdim[1], 3)'], {}), '((0, args.obs_seq - 1, args.enviro_pdim[0], args.enviro_pdim[1], 3))\n', (2597, 2665), True, 'import numpy as np\n'), ((3530, 3588), 'os.path.exists', 'os.path.exists', (['"""../processed_data/train/train_merged.npz"""'], {}), "('../processed_data/train/train_merged.npz')\n", (3544, 3588), False, 'import os\n'), ((3707, 3822), 'numpy.savez', 'np.savez', (['"""../processed_data/train/train_merged.npz"""'], {'offsets': 'offsets', 'traj_data': 'traj_data', 'occupancy': 'occupancy'}), "('../processed_data/train/train_merged.npz', offsets=offsets,\n traj_data=traj_data, occupancy=occupancy)\n", (3715, 3822), True, 'import numpy as np\n'), ((5736, 5771), 'augmentation.rotation', 'rotation', (['data[:, 2:4]', '(r / aug_num)'], {}), '(data[:, 2:4], r / aug_num)\n', (5744, 5771), False, 'from augmentation import rotation\n'), ((2789, 2844), 'numpy.load', 'np.load', (["('../processed_data/challenge/%s.npz' % dataset)"], {}), "('../processed_data/challenge/%s.npz' % dataset)\n", (2796, 2844), True, 'import numpy as np\n'), ((2968, 3011), 'numpy.concatenate', 'np.concatenate', (['(offsets, _offsets)'], {'axis': '(0)'}), '((offsets, _offsets), axis=0)\n', (2982, 3011), True, 'import numpy as np\n'), ((3036, 3083), 'numpy.concatenate', 'np.concatenate', (['(traj_data, _traj_data)'], {'axis': '(0)'}), '((traj_data, _traj_data), axis=0)\n', (3050, 3083), True, 'import numpy as np\n'), ((3108, 3155), 'numpy.concatenate', 'np.concatenate', (['(occupancy, _occupancy)'], {'axis': '(0)'}), '((occupancy, _occupancy), axis=0)\n', (3122, 3155), True, 'import numpy as np\n'), ((6585, 6705), 'numpy.savez', 'np.savez', (["('../processed_data/%s/%s' % (dirname, dataname))"], {'offsets': 'offsets', 'traj_data': 'traj_data', 'occupancy': 'occupancy'}), "('../processed_data/%s/%s' % (dirname, dataname), offsets=offsets,\n traj_data=traj_data, occupancy=occupancy)\n", (6593, 6705), True, 'import numpy as np\n'), ((6802, 6813), 'time.time', 'time.time', ([], {}), '()\n', (6811, 6813), False, 'import time\n'), ((6908, 7037), 'numpy.savez', 'np.savez', (["('../processed_data/%s/%s_%.0f' % (dirname, dataname, r))"], {'offsets': 'offsets', 'traj_data': 'traj_data', 'occupancy': 'occupancy'}), "('../processed_data/%s/%s_%.0f' % (dirname, dataname, r), offsets=\n offsets, traj_data=traj_data, occupancy=occupancy)\n", (6916, 7037), True, 'import numpy as np\n'), ((7133, 7144), 'time.time', 'time.time', ([], {}), '()\n', (7142, 7144), False, 'import time\n'), ((1661, 1712), 'numpy.load', 'np.load', (["('../processed_data/train/%s.npz' % dataset)"], {}), "('../processed_data/train/%s.npz' % dataset)\n", (1668, 1712), True, 'import numpy as np\n'), ((1956, 1999), 'numpy.concatenate', 'np.concatenate', (['(offsets, _offsets)'], {'axis': '(0)'}), '((offsets, _offsets), axis=0)\n', (1970, 1999), True, 'import numpy as np\n'), ((2032, 2079), 'numpy.concatenate', 'np.concatenate', (['(traj_data, _traj_data)'], {'axis': '(0)'}), '((traj_data, _traj_data), axis=0)\n', (2046, 2079), True, 'import numpy as np\n'), ((2112, 2159), 'numpy.concatenate', 'np.concatenate', (['(occupancy, _occupancy)'], {'axis': '(0)'}), '((occupancy, _occupancy), axis=0)\n', (2126, 2159), True, 'import numpy as np\n'), ((5412, 5426), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (5420, 5426), True, 'import numpy as np\n')]
|
"""Test functions for util.tm_util"""
import unittest
from ample.testing import test_funcs
from ample.util import ample_util, tm_util
@unittest.skipUnless(test_funcs.found_exe("TMscore" + ample_util.EXE_EXT), "TMscore exec missing")
class TestTM(unittest.TestCase):
def test_gaps_1(self):
gaps = tm_util.TMscore("TMscore", wdir=".")._find_gaps("AAAA---AA--AA")
ref_gaps = [False, False, False, False, True, True, True, False, False, True, True, False, False]
self.assertEqual(ref_gaps, gaps)
def test_gaps_2(self):
gaps = tm_util.TMscore("TMscore", wdir=".")._find_gaps("---AA--AA")
ref_gaps = [True, True, True, False, False, True, True, False, False]
self.assertEqual(ref_gaps, gaps)
def test_gaps_3(self):
gaps = tm_util.TMscore("TMscore", wdir=".")._find_gaps("-AAA--")
ref_gaps = [True, False, False, False, True, True]
self.assertEqual(ref_gaps, gaps)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"ample.testing.test_funcs.found_exe",
"ample.util.tm_util.TMscore"
] |
[((158, 210), 'ample.testing.test_funcs.found_exe', 'test_funcs.found_exe', (["('TMscore' + ample_util.EXE_EXT)"], {}), "('TMscore' + ample_util.EXE_EXT)\n", (178, 210), False, 'from ample.testing import test_funcs\n'), ((980, 995), 'unittest.main', 'unittest.main', ([], {}), '()\n', (993, 995), False, 'import unittest\n'), ((311, 347), 'ample.util.tm_util.TMscore', 'tm_util.TMscore', (['"""TMscore"""'], {'wdir': '"""."""'}), "('TMscore', wdir='.')\n", (326, 347), False, 'from ample.util import ample_util, tm_util\n'), ((566, 602), 'ample.util.tm_util.TMscore', 'tm_util.TMscore', (['"""TMscore"""'], {'wdir': '"""."""'}), "('TMscore', wdir='.')\n", (581, 602), False, 'from ample.util import ample_util, tm_util\n'), ((789, 825), 'ample.util.tm_util.TMscore', 'tm_util.TMscore', (['"""TMscore"""'], {'wdir': '"""."""'}), "('TMscore', wdir='.')\n", (804, 825), False, 'from ample.util import ample_util, tm_util\n')]
|
# Copyright 2017 Google Inc. and Skytruth Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from collections import namedtuple
import logging
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.metrics as metrics
import utility
class ModelBase(object):
__metaclass__ = abc.ABCMeta
@property
def number_of_steps(self):
"""Number of training examples to use"""
return 500000
@property
def use_ranges_for_training(self):
"""Choose features overlapping with provided ranges during training"""
return False
@property
def batch_size(self):
return 64
@property
def max_window_duration_seconds(self):
""" Window max duration in seconds. A value of zero indicates that
we would instead like to choose a fixed-length window. """
return None
# We often allocate a much smaller buffer than would fit the specified time
# sampled at 5 mins intervals, on the basis that the sample is almost
# always much more sparse.
@property
def window_max_points(self):
return None
@property
def min_viable_timeslice_length(self):
return 500
@property
def max_replication_factor(self):
return 100.0
def __init__(self, num_feature_dimensions, vessel_metadata):
self.num_feature_dimensions = num_feature_dimensions
if vessel_metadata:
self.vessel_metadata = vessel_metadata
self.fishing_ranges_map = vessel_metadata.fishing_ranges_map
else:
self.vessel_metadata = None
self.fishing_ranges_map = None
self.training_objectives = None
def build_training_file_list(self, base_feature_path, split):
boundary = 1 if (split == utility.TRAINING_SPLIT) else self.batch_size
random_state = np.random.RandomState()
training_mmsis = self.vessel_metadata.weighted_training_list(
random_state,
split,
self.max_replication_factor,
boundary=boundary)
return [
'%s/%s.tfrecord' % (base_feature_path, mmsi)
for mmsi in training_mmsis
]
@staticmethod
def read_metadata(all_available_mmsis,
metadata_file,
fishing_ranges,
fishing_upweight=1.0):
return utility.read_vessel_multiclass_metadata(
all_available_mmsis, metadata_file, fishing_ranges,
fishing_upweight)
@abc.abstractmethod
def build_training_net(self, features, timestamps, mmsis):
"""Build net suitable for training model
Args:
features : features to feed into net
timestamps: a list of timestamps, one for each feature point.
mmsis: a list of mmsis, one for each batch element.
Returns:
TrainNetInfo
"""
optimizer = trainers = None
return optimizer, trainers
@abc.abstractmethod
def build_inference_net(self, features, timestamps, mmsis):
"""Build net suitable for running inference on model
Args:
features : features to feed into net
timestamps: a list of timestamps, one for each feature point.
mmsis: a list of mmsis, one for each batch element.
Returns:
A list of objects derived from EvaluationBase providing
functionality to log evaluation statistics as well as to
return the results of inference as JSON.
"""
return []
|
[
"utility.read_vessel_multiclass_metadata",
"numpy.random.RandomState"
] |
[((2400, 2423), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (2421, 2423), True, 'import numpy as np\n'), ((2931, 3044), 'utility.read_vessel_multiclass_metadata', 'utility.read_vessel_multiclass_metadata', (['all_available_mmsis', 'metadata_file', 'fishing_ranges', 'fishing_upweight'], {}), '(all_available_mmsis, metadata_file,\n fishing_ranges, fishing_upweight)\n', (2970, 3044), False, 'import utility\n')]
|
"""Support for Google Places API."""
from datetime import timedelta
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_API_KEY, CONF_ID, CONF_NAME
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
import logging
import populartimes
import voluptuous as vol
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
}
)
SCAN_INTERVAL = timedelta(minutes=10)
def setup_platform(hass, config, add_entities, discovery_info=None):
api_key = config["api_key"]
id = config["id"]
name = config["name"]
add_entities([PopularTimesSensor(api_key, id, name)], True)
class PopularTimesSensor(Entity):
def __init__(self, api_key, id, name):
self._api_key = api_key
self._id = id
self._name = name
self._state = None
self._attributes = {
"maps_name": None,
"address": None,
"popularity_monday": None,
"popularity_tuesday": None,
"popularity_wednesday": None,
"popularity_thursday": None,
"popularity_friday": None,
"popularity_saturday": None,
"popularity_sunday": None,
}
@property
def name(self):
return self._name
@property
def state(self):
return self._state
@property
def unit_of_measurement(self):
return "%"
@property
def state_attributes(self):
return self._attributes
def update(self):
"""Get the latest data from Google Places API."""
try:
result = populartimes.get_id(self._api_key, self._id)
self._attributes["address"] = result["address"]
self._attributes["maps_name"] = result["name"]
self._attributes["popularity_monday"] = result["populartimes"][0]["data"]
self._attributes["popularity_tuesday"] = result["populartimes"][1]["data"]
self._attributes["popularity_wednesday"] = result["populartimes"][2]["data"]
self._attributes["popularity_thursday"] = result["populartimes"][3]["data"]
self._attributes["popularity_friday"] = result["populartimes"][4]["data"]
self._attributes["popularity_saturday"] = result["populartimes"][5]["data"]
self._attributes["popularity_sunday"] = result["populartimes"][6]["data"]
popularity = result.get("current_popularity", 0)
self._state = popularity
except:
_LOGGER.error("No popularity info returned by the populartimes library.")
|
[
"voluptuous.Required",
"populartimes.get_id",
"datetime.timedelta",
"logging.getLogger"
] |
[((365, 392), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (382, 392), False, 'import logging\n'), ((600, 621), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (609, 621), False, 'from datetime import timedelta\n'), ((450, 476), 'voluptuous.Required', 'vol.Required', (['CONF_API_KEY'], {}), '(CONF_API_KEY)\n', (462, 476), True, 'import voluptuous as vol\n'), ((497, 518), 'voluptuous.Required', 'vol.Required', (['CONF_ID'], {}), '(CONF_ID)\n', (509, 518), True, 'import voluptuous as vol\n'), ((539, 562), 'voluptuous.Required', 'vol.Required', (['CONF_NAME'], {}), '(CONF_NAME)\n', (551, 562), True, 'import voluptuous as vol\n'), ((1791, 1835), 'populartimes.get_id', 'populartimes.get_id', (['self._api_key', 'self._id'], {}), '(self._api_key, self._id)\n', (1810, 1835), False, 'import populartimes\n')]
|
from typing import Iterable
from eth2spec.test.helpers.constants import ALTAIR, MINIMAL, MAINNET, PHASE0
from eth2spec.test.altair.transition import (
test_transition as test_altair_transition,
test_activations_and_exits as test_altair_activations_and_exits,
test_leaking as test_altair_leaking,
test_slashing as test_altair_slashing,
test_operations as test_altair_operations,
)
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests
def create_provider(tests_src, preset_name: str, pre_fork_name: str, post_fork_name: str) -> gen_typing.TestProvider:
def prepare_fn() -> None:
return
def cases_fn() -> Iterable[gen_typing.TestCase]:
return generate_from_tests(
runner_name='transition',
handler_name='core',
src=tests_src,
fork_name=post_fork_name,
phase=pre_fork_name,
preset_name=preset_name,
)
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
TRANSITION_TESTS = (
(PHASE0, ALTAIR, test_altair_transition),
(PHASE0, ALTAIR, test_altair_activations_and_exits),
(PHASE0, ALTAIR, test_altair_leaking),
(PHASE0, ALTAIR, test_altair_slashing),
(PHASE0, ALTAIR, test_altair_operations),
)
if __name__ == "__main__":
for pre_fork, post_fork, transition_test_module in TRANSITION_TESTS:
gen_runner.run_generator("transition", [
create_provider(transition_test_module, MINIMAL, pre_fork, post_fork),
create_provider(transition_test_module, MAINNET, pre_fork, post_fork),
])
|
[
"eth2spec.gen_helpers.gen_base.gen_typing.TestProvider",
"eth2spec.gen_helpers.gen_from_tests.gen.generate_from_tests"
] |
[((1023, 1087), 'eth2spec.gen_helpers.gen_base.gen_typing.TestProvider', 'gen_typing.TestProvider', ([], {'prepare': 'prepare_fn', 'make_cases': 'cases_fn'}), '(prepare=prepare_fn, make_cases=cases_fn)\n', (1046, 1087), False, 'from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing\n'), ((774, 937), 'eth2spec.gen_helpers.gen_from_tests.gen.generate_from_tests', 'generate_from_tests', ([], {'runner_name': '"""transition"""', 'handler_name': '"""core"""', 'src': 'tests_src', 'fork_name': 'post_fork_name', 'phase': 'pre_fork_name', 'preset_name': 'preset_name'}), "(runner_name='transition', handler_name='core', src=\n tests_src, fork_name=post_fork_name, phase=pre_fork_name, preset_name=\n preset_name)\n", (793, 937), False, 'from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests\n')]
|
# -*- coding: utf-8 -*-
import time
from layout import Layout
from component import *
class Layout_222(Layout):
def __init__(self):
super(Layout_222, self).__init__(color = "black")
self.ch1 = 18 # component height 1
self.ch2 = 26 # component height 2
self.sh1 = 2 # separator height 1
self.bar = 25
# Offsets
self.row_1_y = self.ch1
self.sep_2_y = self.row_1_y + self.ch2
self.row_2_y = self.sep_2_y + self.sh1
self.sep_3_y = self.row_2_y + self.ch2
self.row_3_y = self.sep_3_y + self.sh1
self.sep_4_y = self.row_3_y + self.ch2
self.row_4_y = self.sep_4_y + self.sh1
# Build the layout
self.cdate = Component(72, self.ch1, font_size=14, bg_color=0, align=1)
self.cdate.set_position(0, 0)
self.cdate.set(time.strftime('%d-%b'))
self.ctime = Component(56, self.ch1, font_size=14, bg_color=0, align=1)
self.ctime.set_position(72, 0)
self.ctime.set(time.strftime('%H:%M'))
# self.ctime.draw_borders()
self.wi = Component(self.ch2, self.ch2, font_size=20, image='tap-water1.jpg')
self.wi.set_position(4, self.row_1_y)
# self.wi.draw_borders()
self.wv = Component(68, self.ch2, font_size=18)
self.wv.set_position(30, self.row_1_y)
self.wu = Component(self.ch2, self.ch2, font_size=16)
self.wu.set_position(98, self.row_1_y)
self.wu.set_text("Lit", 0, align=0)
self.gi = Component(self.ch2, self.ch2, font_size=20, image='gas_32x32.png')
# self.gi = Component(self.ch2, self.ch2, font_size=16)
# self.gi.set_text("1h", align=1)
self.gi.set_position(4, self.row_2_y)
self.gv = Component(68, self.ch2, font_size=18, format_string="{0:.2f}")
self.gv.set_position(30, self.row_2_y)
self.gu = Component(self.ch2, self.ch2, font_size=16)
self.gu.set_position(98, self.row_2_y)
self.gu.set_text("m" + u'\u00B3', 0, align=0)
self.ei = Component(self.ch2, self.ch2, font_size=20, image='plug1.png')
self.ei.set_position(4, self.row_3_y)
self.ev = Component(68, self.ch2, font_size=18, format_string="{0:.3f}")
self.ev.set_position(30, self.row_3_y)
self.eu = Component(self.ch2, self.ch2, font_size=16)
self.eu.set_position(98, self.row_3_y)
self.eu.set_text("kW", 0, align=0)
self.egraph = BarGraph(128, self.bar, bg_color=0)
self.egraph.set_position(2, self.row_4_y)
self.egraph.update()
# --------------------------------------------------
# Add components to the layout
self.add([self.cdate, self.ctime])
self.add([self.wi, self.wv, self.wu])
self.add([self.gi, self.gv, self.gu])
self.add([self.ei, self.ev, self.eu])
self.add([self.egraph])
self.clear_all()
def clear_all(self):
self.wv.set(0)
self.gv.set(0.0)
self.ev.set(0.0)
self.egraph.clear_bars()
def set_date_time(self):
# tdate = time.strftime('%d-%b-%y')
self.cdate.set(time.strftime('%d-%b'))
self.ctime.set(time.strftime('%H:%M'))
if __name__ == '__main__':
from lcd import LCD
# Display Layout instance
L2 = Layout_222()
# Random values for test
L2.wv.set(890)
L2.gv.set(2.64)
L2.ev.set(0.0)
# LCD instance
lcd = LCD(False)
lcd.draw(L2)
for i in range(18):
L2.egraph.set_bar(i, i+1)
L2.set_date_time()
lcd.update(L2)
L2.egraph.set_bar(23,12.0)
for i in range(5):
L2.wv.add(1)
L2.gv.add(0.01)
L2.ev.add(0.001)
L2.set_date_time()
L2.egraph.set_bar(18+i, 12 - (4 + i))
lcd.update(L2)
raw_input()
L2.clear_all()
lcd.draw(L2)
idx = 0
for j in range(4):
for i in range(6):
L2.wv.add(1)
L2.gv.add(0.01)
L2.set_date_time()
L2.egraph.set_bar(idx, float(2.11*(i+1)))
# print float(2.11*(i+1))
lcd.update(L2)
idx += 1
raw_input()
lcd.close()
|
[
"time.strftime",
"lcd.LCD"
] |
[((3500, 3510), 'lcd.LCD', 'LCD', (['(False)'], {}), '(False)\n', (3503, 3510), False, 'from lcd import LCD\n'), ((875, 897), 'time.strftime', 'time.strftime', (['"""%d-%b"""'], {}), "('%d-%b')\n", (888, 897), False, 'import time\n'), ((1044, 1066), 'time.strftime', 'time.strftime', (['"""%H:%M"""'], {}), "('%H:%M')\n", (1057, 1066), False, 'import time\n'), ((3192, 3214), 'time.strftime', 'time.strftime', (['"""%d-%b"""'], {}), "('%d-%b')\n", (3205, 3214), False, 'import time\n'), ((3239, 3261), 'time.strftime', 'time.strftime', (['"""%H:%M"""'], {}), "('%H:%M')\n", (3252, 3261), False, 'import time\n')]
|
import xml.etree.ElementTree as ET
def parse_xml(anno_path):
CLASSES = ('background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
index_map = dict(zip(CLASSES, range(len(CLASSES))))
tree = ET.parse(anno_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
# print(width,height)
def validate_label(xmin, ymin, xmax, ymax, width, height):
"""Validate labels."""
assert 0 <= xmin < width, "xmin must in [0, {}), given {}".format(width, xmin)
assert 0 <= ymin < height, "ymin must in [0, {}), given {}".format(height, ymin)
assert xmin < xmax <= width, "xmax must in (xmin, {}], given {}".format(width, xmax)
assert ymin < ymax <= height, "ymax must in (ymin, {}], given {}".format(height, ymax)
label = []
for obj in root.iter('object'):
difficult = int(obj.find('difficult').text)
cls_name = obj.find('name').text.strip().lower()
if cls_name not in CLASSES:
continue
cls_id = index_map[cls_name]
xml_box = obj.find('bndbox')
xmin = (int(xml_box.find('xmin').text) - 1)
ymin = (int(xml_box.find('ymin').text) - 1)
xmax = (int(xml_box.find('xmax').text) - 1)
ymax = (int(xml_box.find('ymax').text) - 1)
try:
validate_label(xmin, ymin, xmax, ymax, width, height)
except AssertionError as e:
raise RuntimeError("Invalid label at {}, {}".format(anno_path, e))
# label.append([xmin, ymin, xmax, ymax, cls_id, difficult])
label.append([xmin, ymin, xmax, ymax, cls_id])
return label
|
[
"xml.etree.ElementTree.parse"
] |
[((390, 409), 'xml.etree.ElementTree.parse', 'ET.parse', (['anno_path'], {}), '(anno_path)\n', (398, 409), True, 'import xml.etree.ElementTree as ET\n')]
|
# encoding: UTF-8
from tests.base import TestCase
from vilya.models.issue import Issue
from vilya.models.project_issue import ProjectIssue
class TestProjectIssue(TestCase):
def test_add_issue(self):
p = ProjectIssue.add('test', 'test description', 'test', project=1)
assert isinstance(p, ProjectIssue)
assert p.title == 'test'
assert p.description == 'test description'
assert p.project_id == 1
p.delete()
def test_get_issue(self):
p = ProjectIssue.add('test', 'test description', 'test', project=1)
r = ProjectIssue.get(p.project_id, issue_id=p.issue_id)
assert isinstance(r, ProjectIssue)
assert r.project_id == 1
r = ProjectIssue.get(p.project_id, number=p.number)
assert isinstance(r, ProjectIssue)
assert r.project_id == 1
r = Issue.get_cached_issue(p.issue_id)
assert isinstance(r, ProjectIssue)
assert r.title == 'test'
assert r.description == 'test description'
assert r.project_id == 1
p2 = ProjectIssue.add(
'test2', 'test2 description', 'test', project=1,
assignee='assignee')
p3 = ProjectIssue.add(
'test3', 'test3 description', 'test', project=1,
assignee='assignee')
p4 = ProjectIssue.add(
'test4', 'test4 description', 'test', project=1, assignee='test')
p5 = ProjectIssue.add(
'test5', 'test5 description', 'test1', project=2, assignee='test')
rs = ProjectIssue._gets_by_project_id(1)
assert len(rs) == 4
rs = ProjectIssue._get_issues_by_project_id(1)
assert all([isinstance(i, ProjectIssue) for i in rs])
assert len(rs) == 4
rs = ProjectIssue.gets_by_assignee_id(1, 'assignee')
assert all([isinstance(i, ProjectIssue) for i in rs])
assert len(rs) == 2
rs = ProjectIssue.gets_by_creator_id(1, 'test')
assert all([isinstance(i, ProjectIssue) for i in rs])
assert len(rs) == 4
for p in [p, p2, p3, p4, p5]:
p.delete()
def test_n_issue(self):
p1 = ProjectIssue.add(
'test1', 'test1 description', 'test', project=1,
assignee='assignee')
p1.close('test')
p2 = ProjectIssue.add(
'test2', 'test2 description', 'test', project=1,
assignee='assignee')
p2.close('test')
p3 = ProjectIssue.add(
'test3', 'test3 description', 'test', project=1,
assignee='assignee')
p4 = ProjectIssue.add(
'test4', 'test4 description', 'test', project=1,
assignee='test')
p5 = ProjectIssue.add(
'test5', 'test5 description', 'test1', project=2,
assignee='test')
count = ProjectIssue.get_count_by_project_id(1)
assert count == 4
count = ProjectIssue.get_count_by_project_id(1, 'open')
assert count == 2
count = ProjectIssue.get_count_by_project_id(1, 'closed')
assert count == 2
count = ProjectIssue.get_count_by_assignee_id(1, 'assignee')
assert count == 3
count = ProjectIssue.get_count_by_assignee_id(1, 'assignee', 'open')
assert count == 1
count = ProjectIssue.get_count_by_assignee_id(1, 'assignee', 'closed')
assert count == 2
count = ProjectIssue.get_count_by_creator_id(1, 'test')
assert count == 4
count = ProjectIssue.get_count_by_creator_id(1, 'test', 'open')
assert count == 2
count = ProjectIssue.get_count_by_creator_id(1, 'test', 'closed')
assert count == 2
r = ProjectIssue.get(p1.project_id, p1.issue_id)
assert isinstance(r, ProjectIssue)
assert r.n_closed_issues == 2
assert r.n_open_issues == 2
for p in [p1, p2, p3, p4, p5]:
p.delete()
def test_open_and_close_issue(self):
p1 = ProjectIssue.add('test1', 'test1 description', 'test', project=1)
p2 = ProjectIssue.add('test2', 'test2 description', 'test', project=1)
p3 = ProjectIssue.add('test3', 'test3 description', 'test', project=1)
count = ProjectIssue.get_count_by_project_id(1)
assert count == 3
p1.close('test')
count = ProjectIssue.get_count_by_project_id(1, 'open')
assert count == 2
p1.open()
count = ProjectIssue.get_count_by_project_id(1, 'open')
assert count == 3
for p in [p1, p2, p3]:
p.delete()
def test_add_tags(self):
target_id = project_id = 1
p = ProjectIssue.add(
'test', 'test description', 'test', project=project_id)
assert isinstance(p, ProjectIssue)
assert p.title == 'test'
assert p.description == 'test description'
assert p.project_id == 1
tags = ['tag1', 'tag2', 'tag3']
p.add_tags(tags, target_id)
assert len(p.tags) == len(tags)
tag_names = [t.name for t in p.tags]
assert set(tags) & set(tag_names) == set(tags)
p.delete()
def test_gets_by_issue_ids(self):
project_id = 1
p = ProjectIssue.add(
'test', 'test description', 'test', project=project_id)
assert isinstance(p, ProjectIssue)
assert p.title == 'test'
assert p.description == 'test description'
assert p.project_id == 1
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state=None)
assert len(project_issues) == 1
pissue = project_issues[0]
assert isinstance(pissue, ProjectIssue)
assert pissue.project_id == project_id
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state="open")
assert len(project_issues) == 1
pissue = project_issues[0]
assert isinstance(pissue, ProjectIssue)
assert pissue.project_id == project_id
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state="closed")
assert len(project_issues) == 0
pissue.close("test")
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state="open")
assert len(project_issues) == 0
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state="closed")
assert len(project_issues) == 1
pissue = project_issues[0]
assert isinstance(pissue, ProjectIssue)
assert pissue.project_id == project_id
p.delete()
def test_gets_by_project_ids(self):
p1 = ProjectIssue.add('test1', 'desp', 'test', project=1)
p2 = ProjectIssue.add('test2', 'desp', 'test2', project=2)
p3 = ProjectIssue.add('test3', 'desp', 'test3', project=2)
issues = ProjectIssue.gets_by_project_ids([1, 2])
assert len(issues), 3
for p in [p1, p2, p3]:
p.delete()
|
[
"vilya.models.project_issue.ProjectIssue.gets_by_creator_id",
"vilya.models.project_issue.ProjectIssue._gets_by_issue_ids",
"vilya.models.project_issue.ProjectIssue.gets_by_assignee_id",
"vilya.models.project_issue.ProjectIssue._get_issues_by_project_id",
"vilya.models.project_issue.ProjectIssue.get_count_by_assignee_id",
"vilya.models.project_issue.ProjectIssue.gets_by_project_ids",
"vilya.models.project_issue.ProjectIssue._gets_by_project_id",
"vilya.models.issue.Issue.get_cached_issue",
"vilya.models.project_issue.ProjectIssue.get",
"vilya.models.project_issue.ProjectIssue.get_count_by_creator_id",
"vilya.models.project_issue.ProjectIssue.add",
"vilya.models.project_issue.ProjectIssue.get_count_by_project_id"
] |
[((220, 283), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test"""', '"""test description"""', '"""test"""'], {'project': '(1)'}), "('test', 'test description', 'test', project=1)\n", (236, 283), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((506, 569), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test"""', '"""test description"""', '"""test"""'], {'project': '(1)'}), "('test', 'test description', 'test', project=1)\n", (522, 569), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((582, 633), 'vilya.models.project_issue.ProjectIssue.get', 'ProjectIssue.get', (['p.project_id'], {'issue_id': 'p.issue_id'}), '(p.project_id, issue_id=p.issue_id)\n', (598, 633), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((723, 770), 'vilya.models.project_issue.ProjectIssue.get', 'ProjectIssue.get', (['p.project_id'], {'number': 'p.number'}), '(p.project_id, number=p.number)\n', (739, 770), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((860, 894), 'vilya.models.issue.Issue.get_cached_issue', 'Issue.get_cached_issue', (['p.issue_id'], {}), '(p.issue_id)\n', (882, 894), False, 'from vilya.models.issue import Issue\n'), ((1069, 1160), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test2"""', '"""test2 description"""', '"""test"""'], {'project': '(1)', 'assignee': '"""assignee"""'}), "('test2', 'test2 description', 'test', project=1, assignee=\n 'assignee')\n", (1085, 1160), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((1194, 1285), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test3"""', '"""test3 description"""', '"""test"""'], {'project': '(1)', 'assignee': '"""assignee"""'}), "('test3', 'test3 description', 'test', project=1, assignee=\n 'assignee')\n", (1210, 1285), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((1319, 1406), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test4"""', '"""test4 description"""', '"""test"""'], {'project': '(1)', 'assignee': '"""test"""'}), "('test4', 'test4 description', 'test', project=1, assignee=\n 'test')\n", (1335, 1406), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((1428, 1516), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test5"""', '"""test5 description"""', '"""test1"""'], {'project': '(2)', 'assignee': '"""test"""'}), "('test5', 'test5 description', 'test1', project=2, assignee\n ='test')\n", (1444, 1516), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((1539, 1574), 'vilya.models.project_issue.ProjectIssue._gets_by_project_id', 'ProjectIssue._gets_by_project_id', (['(1)'], {}), '(1)\n', (1571, 1574), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((1617, 1658), 'vilya.models.project_issue.ProjectIssue._get_issues_by_project_id', 'ProjectIssue._get_issues_by_project_id', (['(1)'], {}), '(1)\n', (1655, 1658), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((1763, 1810), 'vilya.models.project_issue.ProjectIssue.gets_by_assignee_id', 'ProjectIssue.gets_by_assignee_id', (['(1)', '"""assignee"""'], {}), "(1, 'assignee')\n", (1795, 1810), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((1915, 1957), 'vilya.models.project_issue.ProjectIssue.gets_by_creator_id', 'ProjectIssue.gets_by_creator_id', (['(1)', '"""test"""'], {}), "(1, 'test')\n", (1946, 1957), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((2152, 2243), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test1"""', '"""test1 description"""', '"""test"""'], {'project': '(1)', 'assignee': '"""assignee"""'}), "('test1', 'test1 description', 'test', project=1, assignee=\n 'assignee')\n", (2168, 2243), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((2302, 2393), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test2"""', '"""test2 description"""', '"""test"""'], {'project': '(1)', 'assignee': '"""assignee"""'}), "('test2', 'test2 description', 'test', project=1, assignee=\n 'assignee')\n", (2318, 2393), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((2452, 2543), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test3"""', '"""test3 description"""', '"""test"""'], {'project': '(1)', 'assignee': '"""assignee"""'}), "('test3', 'test3 description', 'test', project=1, assignee=\n 'assignee')\n", (2468, 2543), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((2577, 2664), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test4"""', '"""test4 description"""', '"""test"""'], {'project': '(1)', 'assignee': '"""test"""'}), "('test4', 'test4 description', 'test', project=1, assignee=\n 'test')\n", (2593, 2664), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((2698, 2786), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test5"""', '"""test5 description"""', '"""test1"""'], {'project': '(2)', 'assignee': '"""test"""'}), "('test5', 'test5 description', 'test1', project=2, assignee\n ='test')\n", (2714, 2786), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((2824, 2863), 'vilya.models.project_issue.ProjectIssue.get_count_by_project_id', 'ProjectIssue.get_count_by_project_id', (['(1)'], {}), '(1)\n', (2860, 2863), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((2906, 2953), 'vilya.models.project_issue.ProjectIssue.get_count_by_project_id', 'ProjectIssue.get_count_by_project_id', (['(1)', '"""open"""'], {}), "(1, 'open')\n", (2942, 2953), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((2996, 3045), 'vilya.models.project_issue.ProjectIssue.get_count_by_project_id', 'ProjectIssue.get_count_by_project_id', (['(1)', '"""closed"""'], {}), "(1, 'closed')\n", (3032, 3045), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((3089, 3141), 'vilya.models.project_issue.ProjectIssue.get_count_by_assignee_id', 'ProjectIssue.get_count_by_assignee_id', (['(1)', '"""assignee"""'], {}), "(1, 'assignee')\n", (3126, 3141), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((3184, 3244), 'vilya.models.project_issue.ProjectIssue.get_count_by_assignee_id', 'ProjectIssue.get_count_by_assignee_id', (['(1)', '"""assignee"""', '"""open"""'], {}), "(1, 'assignee', 'open')\n", (3221, 3244), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((3287, 3349), 'vilya.models.project_issue.ProjectIssue.get_count_by_assignee_id', 'ProjectIssue.get_count_by_assignee_id', (['(1)', '"""assignee"""', '"""closed"""'], {}), "(1, 'assignee', 'closed')\n", (3324, 3349), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((3393, 3440), 'vilya.models.project_issue.ProjectIssue.get_count_by_creator_id', 'ProjectIssue.get_count_by_creator_id', (['(1)', '"""test"""'], {}), "(1, 'test')\n", (3429, 3440), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((3483, 3538), 'vilya.models.project_issue.ProjectIssue.get_count_by_creator_id', 'ProjectIssue.get_count_by_creator_id', (['(1)', '"""test"""', '"""open"""'], {}), "(1, 'test', 'open')\n", (3519, 3538), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((3581, 3638), 'vilya.models.project_issue.ProjectIssue.get_count_by_creator_id', 'ProjectIssue.get_count_by_creator_id', (['(1)', '"""test"""', '"""closed"""'], {}), "(1, 'test', 'closed')\n", (3617, 3638), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((3678, 3722), 'vilya.models.project_issue.ProjectIssue.get', 'ProjectIssue.get', (['p1.project_id', 'p1.issue_id'], {}), '(p1.project_id, p1.issue_id)\n', (3694, 3722), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((3958, 4023), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test1"""', '"""test1 description"""', '"""test"""'], {'project': '(1)'}), "('test1', 'test1 description', 'test', project=1)\n", (3974, 4023), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((4037, 4102), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test2"""', '"""test2 description"""', '"""test"""'], {'project': '(1)'}), "('test2', 'test2 description', 'test', project=1)\n", (4053, 4102), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((4116, 4181), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test3"""', '"""test3 description"""', '"""test"""'], {'project': '(1)'}), "('test3', 'test3 description', 'test', project=1)\n", (4132, 4181), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((4199, 4238), 'vilya.models.project_issue.ProjectIssue.get_count_by_project_id', 'ProjectIssue.get_count_by_project_id', (['(1)'], {}), '(1)\n', (4235, 4238), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((4306, 4353), 'vilya.models.project_issue.ProjectIssue.get_count_by_project_id', 'ProjectIssue.get_count_by_project_id', (['(1)', '"""open"""'], {}), "(1, 'open')\n", (4342, 4353), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((4414, 4461), 'vilya.models.project_issue.ProjectIssue.get_count_by_project_id', 'ProjectIssue.get_count_by_project_id', (['(1)', '"""open"""'], {}), "(1, 'open')\n", (4450, 4461), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((4620, 4692), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test"""', '"""test description"""', '"""test"""'], {'project': 'project_id'}), "('test', 'test description', 'test', project=project_id)\n", (4636, 4692), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((5178, 5250), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test"""', '"""test description"""', '"""test"""'], {'project': 'project_id'}), "('test', 'test description', 'test', project=project_id)\n", (5194, 5250), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((5450, 5507), 'vilya.models.project_issue.ProjectIssue._gets_by_issue_ids', 'ProjectIssue._gets_by_issue_ids', (['[p.issue_id]'], {'state': 'None'}), '([p.issue_id], state=None)\n', (5481, 5507), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((5717, 5776), 'vilya.models.project_issue.ProjectIssue._gets_by_issue_ids', 'ProjectIssue._gets_by_issue_ids', (['[p.issue_id]'], {'state': '"""open"""'}), "([p.issue_id], state='open')\n", (5748, 5776), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((5986, 6047), 'vilya.models.project_issue.ProjectIssue._gets_by_issue_ids', 'ProjectIssue._gets_by_issue_ids', (['[p.issue_id]'], {'state': '"""closed"""'}), "([p.issue_id], state='closed')\n", (6017, 6047), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((6157, 6216), 'vilya.models.project_issue.ProjectIssue._gets_by_issue_ids', 'ProjectIssue._gets_by_issue_ids', (['[p.issue_id]'], {'state': '"""open"""'}), "([p.issue_id], state='open')\n", (6188, 6216), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((6296, 6357), 'vilya.models.project_issue.ProjectIssue._gets_by_issue_ids', 'ProjectIssue._gets_by_issue_ids', (['[p.issue_id]'], {'state': '"""closed"""'}), "([p.issue_id], state='closed')\n", (6327, 6357), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((6614, 6666), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test1"""', '"""desp"""', '"""test"""'], {'project': '(1)'}), "('test1', 'desp', 'test', project=1)\n", (6630, 6666), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((6680, 6733), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test2"""', '"""desp"""', '"""test2"""'], {'project': '(2)'}), "('test2', 'desp', 'test2', project=2)\n", (6696, 6733), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((6747, 6800), 'vilya.models.project_issue.ProjectIssue.add', 'ProjectIssue.add', (['"""test3"""', '"""desp"""', '"""test3"""'], {'project': '(2)'}), "('test3', 'desp', 'test3', project=2)\n", (6763, 6800), False, 'from vilya.models.project_issue import ProjectIssue\n'), ((6819, 6859), 'vilya.models.project_issue.ProjectIssue.gets_by_project_ids', 'ProjectIssue.gets_by_project_ids', (['[1, 2]'], {}), '([1, 2])\n', (6851, 6859), False, 'from vilya.models.project_issue import ProjectIssue\n')]
|
# https://www.terraform.io/docs/configuration/locals.html
import terrascript
import terrascript.aws
import terrascript.aws.d
from shared import assert_equals_json
def test():
"""Data (008)"""
config = terrascript.Terrascript()
config += terrascript.aws.aws(version='~> 2.0', region='us-east-1')
config += terrascript.aws.d.aws_ami('example', most_recent=True, owners=['self'],
tags=dict(Name="app-server", Tested="true"))
assert_equals_json(config, 'test_008.tf.json')
|
[
"terrascript.aws.aws",
"shared.assert_equals_json",
"terrascript.Terrascript"
] |
[((213, 238), 'terrascript.Terrascript', 'terrascript.Terrascript', ([], {}), '()\n', (236, 238), False, 'import terrascript\n'), ((254, 311), 'terrascript.aws.aws', 'terrascript.aws.aws', ([], {'version': '"""~> 2.0"""', 'region': '"""us-east-1"""'}), "(version='~> 2.0', region='us-east-1')\n", (273, 311), False, 'import terrascript\n'), ((489, 535), 'shared.assert_equals_json', 'assert_equals_json', (['config', '"""test_008.tf.json"""'], {}), "(config, 'test_008.tf.json')\n", (507, 535), False, 'from shared import assert_equals_json\n')]
|
# cluster_features.py
#
# Based on snippets here:
# http://scikit-learn.org/dev/auto_examples/cluster/plot_cluster_iris.html#sphx-glr-auto-examples-cluster-plot-cluster-iris-py
from __future__ import print_function
import time
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
def get_payment_data(csv_filename):
df = pd.read_csv(csv_filename, header = 0)
# put the original column names in a python list
feature_names = list(df.columns.values)
# create a numpy array with the numeric values for input into scikit-learn
numpy_array = df.as_matrix()
Y = numpy_array[:,24]
X = numpy_array[:, [i for i in xrange(np.shape(numpy_array)[1]-1)]]
return (X, Y, feature_names)
if __name__ == "__main__":
(X, Y, feature_names) = get_payment_data("default_on_payment.csv")
print('Shape of the inputs: %s, shape of the labels: %s' % (str(X.shape), str(Y.shape)))
# split into a training and testing set
# Training instances: 22,500
# Test instances: 7500
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=42)
print('Train set inputs: %s' % (str(X_train.shape)))
print('Test set inputs %s' % (str(X_test.shape)))
print('Train set labels: %s' % (str(Y_train.shape)))
print('Test set labels: %s' % (str(Y_test.shape)))
# ---------------------------------------------------------------------------
# Scaling
# ----------------------------------------------------------------------------
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# ---------------------------------------------------------------------------
# PCA Transformation of Features
# ----------------------------------------------------------------------------
pca = PCA(n_components=3)
X_train_new = pca.fit_transform(X_train, y=None)
fig = plt.figure(1)
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
#ax.scatter(X_train_new[:, 0], X_train_new[:, 1], X_train_new[:, 2],c=labels.astype(np.float))
ax.scatter(X_train_new[:, 0], X_train_new[:, 1], X_train_new[:, 2])
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('PC0')
ax.set_ylabel('PC1')
ax.set_zlabel('PC2')
plt.show()
# ---------------------------------------------------------------------------
# K-Means Clustering
# ----------------------------------------------------------------------------
num_clusters = 2
classifier_KMC = KMeans(n_clusters = num_clusters, n_jobs=-1, random_state=1)
start_time = time.time()
classifier_KMC.fit(X_train, y=None)
end_time = time.time()
labels1 = classifier_KMC.labels_
# Classify the train and test set vectors
train_labels = classifier_KMC.predict(X_train)
test_labels = classifier_KMC.predict(X_test)
# Returns 68.9% on training set
accuracy_KMC_train = accuracy_score(Y_train, train_labels)
accuracy_KMC_test = accuracy_score(Y_test, test_labels)
# Plotting
fig = plt.figure(1)
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
classifier_KMC.fit(X_train)
labels = classifier_KMC.labels_
#ax.scatter(X_train[:, 1], X_train[:, 2], X_train[:, 3], X_train[:, 3],c=labels.astype(np.float))
ax.scatter(X_train[:, 0], X_train[:, 1],c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('F0')
ax.set_ylabel('F1')
ax.set_zlabel('F2')
plt.show()
## Plot the ground truth
#fig = plt.figure(1)
#plt.clf()
#ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
#plt.cla()
# predictions_KMC = classifier_KMC.predict(X_test)
|
[
"sklearn.cross_validation.train_test_split",
"sklearn.preprocessing.StandardScaler",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"sklearn.cluster.KMeans",
"sklearn.metrics.accuracy_score",
"time.time",
"numpy.shape",
"matplotlib.pyplot.figure",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.cla"
] |
[((695, 730), 'pandas.read_csv', 'pd.read_csv', (['csv_filename'], {'header': '(0)'}), '(csv_filename, header=0)\n', (706, 730), True, 'import pandas as pd\n'), ((1418, 1473), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(X, Y, test_size=0.25, random_state=42)\n', (1434, 1473), False, 'from sklearn.cross_validation import train_test_split\n'), ((1894, 1910), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1908, 1910), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2231, 2250), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (2234, 2250), False, 'from sklearn.decomposition import PCA\n'), ((2319, 2332), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2329, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2342, 2394), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'rect': '[0, 0, 0.95, 1]', 'elev': '(48)', 'azim': '(134)'}), '(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)\n', (2348, 2394), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((2749, 2759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2757, 2759), True, 'import matplotlib.pyplot as plt\n'), ((2994, 3052), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_clusters', 'n_jobs': '(-1)', 'random_state': '(1)'}), '(n_clusters=num_clusters, n_jobs=-1, random_state=1)\n', (3000, 3052), False, 'from sklearn.cluster import KMeans\n'), ((3073, 3084), 'time.time', 'time.time', ([], {}), '()\n', (3082, 3084), False, 'import time\n'), ((3140, 3151), 'time.time', 'time.time', ([], {}), '()\n', (3149, 3151), False, 'import time\n'), ((3399, 3436), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_train', 'train_labels'], {}), '(Y_train, train_labels)\n', (3413, 3436), False, 'from sklearn.metrics import accuracy_score\n'), ((3461, 3496), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'test_labels'], {}), '(Y_test, test_labels)\n', (3475, 3496), False, 'from sklearn.metrics import accuracy_score\n'), ((3526, 3539), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3536, 3539), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3553), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3551, 3553), True, 'import matplotlib.pyplot as plt\n'), ((3563, 3615), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'rect': '[0, 0, 0.95, 1]', 'elev': '(48)', 'azim': '(134)'}), '(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)\n', (3569, 3615), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((3619, 3628), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3626, 3628), True, 'import matplotlib.pyplot as plt\n'), ((4051, 4061), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4059, 4061), True, 'import matplotlib.pyplot as plt\n'), ((1013, 1034), 'numpy.shape', 'np.shape', (['numpy_array'], {}), '(numpy_array)\n', (1021, 1034), True, 'import numpy as np\n')]
|
from typing import List
from wai.json.object import StrictJSONObject
from wai.json.object.property import ArrayProperty, StringProperty, EnumProperty
class CategoriesModSpec(StrictJSONObject['CategoriesModSpec']):
"""
A specification of which images to modify the categories
for, and which categories to modify for those images.
"""
# The method to use to modify the categories
method: str = EnumProperty(
values=("add", "remove")
)
# The images to modify the categories for
images: List[str] = ArrayProperty(
element_property=StringProperty(min_length=1),
min_elements=1,
unique_elements=True
)
# The categories to add/remove from the images
categories: List[str] = ArrayProperty(
element_property=StringProperty(min_length=1),
min_elements=1,
unique_elements=True
)
|
[
"wai.json.object.property.StringProperty",
"wai.json.object.property.EnumProperty"
] |
[((419, 457), 'wai.json.object.property.EnumProperty', 'EnumProperty', ([], {'values': "('add', 'remove')"}), "(values=('add', 'remove'))\n", (431, 457), False, 'from wai.json.object.property import ArrayProperty, StringProperty, EnumProperty\n'), ((583, 611), 'wai.json.object.property.StringProperty', 'StringProperty', ([], {'min_length': '(1)'}), '(min_length=1)\n', (597, 611), False, 'from wai.json.object.property import ArrayProperty, StringProperty, EnumProperty\n'), ((792, 820), 'wai.json.object.property.StringProperty', 'StringProperty', ([], {'min_length': '(1)'}), '(min_length=1)\n', (806, 820), False, 'from wai.json.object.property import ArrayProperty, StringProperty, EnumProperty\n')]
|
import numpy as np
from numba import jit
import pyflann
from petsc4py import PETSc
from mpi4py import MPI
from speclus4py.types import DataObject, DataType, GraphType, OperatorType, OperatorContainer
@jit(nopython=True)
def get_global_index(x, y, ydim):
return y + x * ydim
@jit(nopython=True)
def get_global_index_volumetric(x, y, z, xdim, ydim):
return x + xdim * (y + z * ydim)
@jit(nopython=True)
def compute_gradient(v1, v2, sigma: float):
abs = np.abs(v1 - v2)
return np.exp(-abs * abs / (2. * sigma * sigma))
@jit(nopython=True)
def compute_gradient_norm(v1, v2, sigma: float):
norm = np.linalg.norm(v1 - v2)
return np.exp(-norm * norm / (2. * sigma * sigma))
class OperatorAssembler(DataObject, OperatorContainer):
def __init__(self, comm=MPI.COMM_WORLD, verbose=False):
DataObject.__init__(self, comm, verbose)
OperatorContainer.__init__(self)
self.__graph_type = GraphType.DIRECTED
@property
def graph_type(self) -> GraphType:
return self.__graph_type
@graph_type.setter
def graph_type(self, t: GraphType):
self.__graph_type = t
def setSimilarityFunc(self, fn, params):
self.__similarity_measure_fn = fn
self.__similarity_measure_params = params
def reset(self):
OperatorContainer.reset(self)
def __construct_adjacency_matrix_general_data(self):
data = self.getData()[0]
# determine dimension of a problem
N = data.shape[0]
# building index (FLANN - Fast Library for Approximate Nearest Neighbors)
pyflann.set_distance_type('euclidean')
flann = pyflann.FLANN()
flann.build_index(data)
# create matrix object
self.mat_adj = PETSc.Mat()
self.mat_adj.create(self.comm)
self.mat_adj.setSizes([N, N])
self.mat_adj.setType(self.mat_type)
if self.graph_type == GraphType.DIRECTED:
self.__construct_adjacency_matrix_general_data_directed_graph(flann)
else:
self.__construct_adjacency_matrix_general_data_undirected_graph(flann)
# finalizing assembly of adjacency matrix
self.mat_adj.assemble()
del flann
def __construct_adjacency_matrix_general_data_directed_graph(self, flann):
self.mat_adj.setPreallocationNNZ(self.connectivity)
self.mat_adj.setFromOptions()
self.mat_adj.setUp()
# Get function for measuring similarity and its parameters
sim_func, sim_func_params = self.getSimilarityMeasure()
if sim_func is None:
sim_func = compute_gradient_norm
if sim_func_params == PETSc.DEFAULT:
sim_func_params = 0.5
data = self.getData()[0]
# building adjacency matrix of similarity graph
i_start, i_end = self.mat_adj.getOwnershipRange()
for I in range(i_start, i_end):
v1 = data[I]
# find nearest neighbours to sample v1
# sometimes self-adjoint vertex is included, thus finding n+1 nearest neighbours
result, dist = flann.nn_index(v1, self.connectivity + 1)
used_nn = 0
for J in range(0, self.connectivity + 1):
idx = result[0, J]
if idx != I and used_nn < self.connectivity:
v2 = data[result[0, J]]
g = sim_func(v1, v2, sim_func_params)
if g > 0.:
self.mat_adj[I, idx] = g
used_nn += 1
elif used_nn >= self.connectivity:
break
def __construct_adjacency_matrix_general_data_undirected_graph(self, flann):
self.mat_adj.setFromOptions()
self.mat_adj.setUp()
# Get function for measuring similarity and its parameters
sim_func, sim_func_params = self.getSimilarityMeasure()
if sim_func is None:
sim_func = compute_gradient_norm
if sim_func_params == PETSc.DEFAULT:
sim_func_params = 0.5
data = self.getData()[0]
# building adjacency matrix of similarity graph
i_start, i_end = self.mat_adj.getOwnershipRange()
for I in range(i_start, i_end):
v1 = data[I]
# find nearest neighbours to sample v1
# sometimes self-adjoint vertex is included, thus finding n+1 nearest neighbours
result, dist = flann.nn_index(v1, self.connectivity + 1)
for J in range(0, self.connectivity + 1):
idx = result[0, J]
if idx != I:
v2 = data[result[0, J]]
g = sim_func(v1, v2, sim_func_params)
if g > 0.:
self.mat_adj[I, idx] = g
self.mat_adj[idx, I] = g
def __construct_adjacency_matrix_vol_img(self):
if self.connectivity != 6 and self.connectivity != 18 and self.connectivity != 26:
raise Exception('Connectivity (con) must be set to 6, 18, or 26')
# Get function for measuring similarity and its parameters
sim_func, sim_func_params = self.getSimilarityMeasure()
if sim_func is None:
sim_func = compute_gradient
if sim_func_params == PETSc.DEFAULT:
sim_func_params = 0.5
data = self.getData()[0]
# determine dimension of a problem
dims = data.GetDimensions()
dim_x = dims[0] - 1
dim_y = dims[1] - 1
dim_z = dims[2] - 1
N = dim_x * dim_y * dim_z
# create matrix object
self.mat_adj = PETSc.Mat()
self.mat_adj.create(self.comm)
self.mat_adj.setSizes([N, N])
self.mat_adj.setType(self.mat_type)
self.mat_adj.setPreallocationNNZ(self.connectivity)
self.mat_adj.setFromOptions()
self.mat_adj.setUp()
# compute local derivatives on structured non-uniform grid that is determined using sigma and
# connectivity of derivatives (6, 18, or 26)
data_scalars = data.GetCellData().GetScalars()
i_start, i_end = self.mat_adj.getOwnershipRange()
for I in range(i_start, i_end):
# determine (x, y, z)-coordinates
z = I // (dim_x * dim_y)
i = I - z * dim_x * dim_y
y = i // dim_x
x = i - y * dim_x
p1 = get_global_index_volumetric(x, y, z, dim_x, dim_y)
v1 = data_scalars.GetTuple1(p1) / 255.
if z > 0:
if self.connectivity > 6 and y > 0:
if self.connectivity == 26 and x > 0:
p2 = get_global_index_volumetric(x - 1, y - 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y - 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity == 26 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y - 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x > 0:
p2 = get_global_index_volumetric(x - 1, y, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and y < dim_y - 1:
if self.connectivity == 26 and x > 0:
p2 = get_global_index_volumetric(x - 1, y + 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y + 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity == 26 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y + 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if y > 0:
if self.connectivity > 6 and x > 0:
p2 = get_global_index_volumetric(x - 1, y - 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y - 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y - 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if x > 0:
p2 = get_global_index_volumetric(x - 1, y, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if y < dim_y - 1:
if self.connectivity > 6 and x > 0:
p2 = get_global_index_volumetric(x - 1, y + 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y + 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y + 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if z < dim_z - 1:
if self.connectivity > 6 and y > 0:
if self.connectivity == 26 and x > 0:
p2 = get_global_index_volumetric(x - 1, y - 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y - 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity == 26 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y - 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x > 0:
p2 = get_global_index_volumetric(x - 1, y, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and y < dim_y - 1:
if self.connectivity == 26 and x > 0:
p2 = get_global_index_volumetric(x - 1, y + 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y + 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity == 26 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y + 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
# finalizing assembly of adjacency matrix
self.mat_adj.assemble()
def __construct_adjacency_matrix_img(self):
if self.connectivity != 4 and self.connectivity != 8:
PETSc.Sys.Print('Connectivity (con) must be set to 4 or 8')
raise PETSc.Error(62)
rows = self.data.shape[0]
cols = self.data.shape[1]
N = rows * cols
# Get function for measuring similarity and its parameters
sim_func, sim_func_params = self.getSimilarityMeasure()
if sim_func is None:
if len(self.data.shape) == 3:
sim_func = compute_gradient_norm
else:
sim_func = compute_gradient
if sim_func_params == PETSc.DEFAULT:
sim_func_params = 0.5
data = self.getData()[0]
# create matrix object
self.mat_adj = PETSc.Mat()
self.mat_adj.create(self.comm)
self.mat_adj.setSizes([N, N])
self.mat_adj.setType(self.mat_type)
self.mat_adj.setPreallocationNNZ(self.connectivity)
self.mat_adj.setFromOptions()
self.mat_adj.setUp()
i_start, i_end = self.mat_adj.getOwnershipRange()
for I in range(i_start, i_end):
# determine (x, y) coordinates
x = I // cols
y = I - x * cols
p1 = I
v1 = self.data[x, y] / 255.
if x > 0:
if y > 0 and self.connectivity == 8:
p2 = get_global_index(x - 1, y - 1, cols)
v2 = data[x - 1, y - 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
p2 = get_global_index(x - 1, y, cols)
v2 = data[x - 1, y] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if y < cols - 1 and self.connectivity == 8:
p2 = get_global_index(x - 1, y + 1, cols)
v2 = data[x - 1, y + 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if y > 0:
p2 = get_global_index(x, y - 1, cols)
v2 = data[x, y - 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if y < cols - 1:
p2 = get_global_index(x, y + 1, cols)
v2 = data[x, y + 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if x < rows - 1:
if y > 0 and self.connectivity == 8:
p2 = get_global_index(x + 1, y - 1, cols)
v2 = data[x + 1, y - 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
p2 = get_global_index(x + 1, y, cols)
v2 = data[x + 1, y] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if y < cols - 1 and self.connectivity == 8:
p2 = get_global_index(x + 1, y + 1, cols)
v2 = data[x + 1, y + 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
# finalizing assembly of adjacency matrix
self.mat_adj.assemble()
def assembly(self):
self.reset()
data_type = self.getData()[1]
if self.fn_similarity_params is not None and self.verbose:
if type(self.fn_similarity_params) == float:
str_params = ', param=%.2f' % self.fn_similarity_params
else:
str_params = ', params=['
str_params += ''.join('{}, '.format(k) for k in self.fn_similarity_params)
str_params = str_params[:-2] + ']'
else:
str_params = ''
if data_type == DataType.IMG:
if self.connectivity == PETSc.DEFAULT:
self.connectivity = 4
if self.verbose:
s = 'Construct operator (%s, GRAPH_%s) for image: connectivity=%d'
v = (self.operator_type.name, GraphType.UNDIRECTED.name, self.connectivity)
PETSc.Sys.Print(s % v + str_params)
self.__construct_adjacency_matrix_img()
elif data_type == DataType.VOL_IMG:
if self.connectivity == PETSc.DEFAULT:
self.connectivity = 6
if self.verbose:
s = 'Construct operator (%s, GRAPH_%s) for volumetric image: connectivity=%d'
v = (self.operator_type.name, self.graph_type.name, self.connectivity)
PETSc.Sys.Print(s % v + str_params)
self.__construct_adjacency_matrix_vol_img()
else:
if self.connectivity == PETSc.DEFAULT:
self.connectivity = 3
if self.verbose:
s = 'Construct operator (%s, GRAPH_%s) for general data: connectivity=%d'
v = (self.operator_type.name, self.graph_type.name, self.connectivity)
PETSc.Sys.Print(s % v + str_params)
self.__construct_adjacency_matrix_general_data()
# if data_type == DataType.IMG:
# if self.connectivity == PETSc.DEFAULT:
# self.connectivity = 4
#
# if self.verbose:
# PETSc.Sys.Print(
# 'Construct operator (%s) for image: connectivity=%d, sigma=%2g'
# % (self.operator_type.name, self.connectivity, self.sigma)
# )
#
# self.__construct_adjacency_matrix_img()
# elif data_type == DataType.VOL_IMG: # volumetric image
# if self.connectivity == PETSc.DEFAULT:
# self.connectivity = 6
#
# if self.verbose:
# if self.fn_similarity_params is not None:
# s = 'Construct operator (%s, GRAPH_ %s) for volumetric image: connectivity=%d, '
# v = (self.operator_type.name, self.graph_type.name, self.connectivity)
# sv = s % v
# if type(self.fn_similarity_params) == float:
# sp = 'param=%.2f' % self.fn_similarity_params
# else:
# sp = 'params=('
# sp += ''.join('{}, '.format(k) for k in self.fn_similarity_params)
# sp = sp[:-2] + ')'
# sv += sp
# else:
# s = 'Construct operator (%s, GRAPH_%s) for volumetric image: connectivity=%d params=None'
# v = (self.operator_type.name, self.graph_type.name, self.connectivity)
# sv = s % v
# PETSc.Sys.Print(sv)
#
# exit(-1)
#
# self.__construct_adjacency_matrix_vol_img()
# else:
# if self.connectivity == PETSc.DEFAULT:
# self.connectivity = 6
#
# if self.verbose:
# PETSc.Sys.Print(
# 'Construct operator (%s) for general data: connectivity=%d, params=%2g'
# % (self.operator_type.name, self.connectivity, self.__similarity_measure_params)
# )
#
# self.__construct_adjacency_matrix_general_data()
N = self.mat_adj.getSize()[0]
# compute degree matrix D_i = deg(v_i)
self.vec_diag = self.mat_adj.createVecLeft()
self.mat_adj.getRowSum(self.vec_diag)
if self.operator_type != OperatorType.MARKOV_1 or self.operator_type != OperatorType.MARKOV_2:
self.mat_op = PETSc.Mat().createAIJ((N, N), comm=self.comm)
self.mat_op.setPreallocationNNZ(self.connectivity + 1)
self.mat_op.setFromOptions()
self.mat_op.setUp()
self.mat_op.setDiagonal(self.vec_diag)
self.mat_op.assemble()
# L = D - A
self.mat_op.axpy(-1., self.mat_adj)
else: # P = D^-1 A (MARKOV_1) or Ng, Weiss (MARKOV_2)
self.mat_op = self.mat_adj.duplicate()
self.mat_op.setFromOptions()
self.mat_op.setType(self.mat_type)
self.mat_op.setUp()
self.mat_op.copy(self.mat_op)
if self.operator_type != OperatorType.LAPLACIAN_UNNORMALIZED:
tmp_vec = self.vec_diag.duplicate()
self.vec_diag.copy(tmp_vec)
if self.operator_type == OperatorType.LAPLACIAN_NORMALIZED or self.operator_type == OperatorType.MARKOV_2:
tmp_vec.sqrtabs()
tmp_vec.reciprocal()
self.mat_op.diagonalScale(tmp_vec, tmp_vec)
elif self.operator_type == OperatorType.MARKOV_1:
tmp_vec.reciprocal()
self.mat_op.diagonalScale(tmp_vec)
else: # L_rw
tmp_vec.reciprocal()
self.mat_op.diagonalScale(tmp_vec) # left diagonal scale
del tmp_vec
self.mat_op.assemble()
|
[
"petsc4py.PETSc.Mat",
"speclus4py.types.DataObject.__init__",
"speclus4py.types.OperatorContainer.__init__",
"numpy.abs",
"speclus4py.types.OperatorContainer.reset",
"petsc4py.PETSc.Sys.Print",
"numba.jit",
"numpy.exp",
"numpy.linalg.norm",
"pyflann.FLANN",
"pyflann.set_distance_type",
"petsc4py.PETSc.Error"
] |
[((205, 223), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (208, 223), False, 'from numba import jit\n'), ((285, 303), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (288, 303), False, 'from numba import jit\n'), ((398, 416), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (401, 416), False, 'from numba import jit\n'), ((543, 561), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (546, 561), False, 'from numba import jit\n'), ((471, 486), 'numpy.abs', 'np.abs', (['(v1 - v2)'], {}), '(v1 - v2)\n', (477, 486), True, 'import numpy as np\n'), ((498, 540), 'numpy.exp', 'np.exp', (['(-abs * abs / (2.0 * sigma * sigma))'], {}), '(-abs * abs / (2.0 * sigma * sigma))\n', (504, 540), True, 'import numpy as np\n'), ((622, 645), 'numpy.linalg.norm', 'np.linalg.norm', (['(v1 - v2)'], {}), '(v1 - v2)\n', (636, 645), True, 'import numpy as np\n'), ((657, 701), 'numpy.exp', 'np.exp', (['(-norm * norm / (2.0 * sigma * sigma))'], {}), '(-norm * norm / (2.0 * sigma * sigma))\n', (663, 701), True, 'import numpy as np\n'), ((827, 867), 'speclus4py.types.DataObject.__init__', 'DataObject.__init__', (['self', 'comm', 'verbose'], {}), '(self, comm, verbose)\n', (846, 867), False, 'from speclus4py.types import DataObject, DataType, GraphType, OperatorType, OperatorContainer\n'), ((876, 908), 'speclus4py.types.OperatorContainer.__init__', 'OperatorContainer.__init__', (['self'], {}), '(self)\n', (902, 908), False, 'from speclus4py.types import DataObject, DataType, GraphType, OperatorType, OperatorContainer\n'), ((1306, 1335), 'speclus4py.types.OperatorContainer.reset', 'OperatorContainer.reset', (['self'], {}), '(self)\n', (1329, 1335), False, 'from speclus4py.types import DataObject, DataType, GraphType, OperatorType, OperatorContainer\n'), ((1587, 1625), 'pyflann.set_distance_type', 'pyflann.set_distance_type', (['"""euclidean"""'], {}), "('euclidean')\n", (1612, 1625), False, 'import pyflann\n'), ((1642, 1657), 'pyflann.FLANN', 'pyflann.FLANN', ([], {}), '()\n', (1655, 1657), False, 'import pyflann\n'), ((1745, 1756), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ([], {}), '()\n', (1754, 1756), False, 'from petsc4py import PETSc\n'), ((5595, 5606), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ([], {}), '()\n', (5604, 5606), False, 'from petsc4py import PETSc\n'), ((15149, 15160), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ([], {}), '()\n', (15158, 15160), False, 'from petsc4py import PETSc\n'), ((14480, 14539), 'petsc4py.PETSc.Sys.Print', 'PETSc.Sys.Print', (['"""Connectivity (con) must be set to 4 or 8"""'], {}), "('Connectivity (con) must be set to 4 or 8')\n", (14495, 14539), False, 'from petsc4py import PETSc\n'), ((14558, 14573), 'petsc4py.PETSc.Error', 'PETSc.Error', (['(62)'], {}), '(62)\n', (14569, 14573), False, 'from petsc4py import PETSc\n'), ((18402, 18437), 'petsc4py.PETSc.Sys.Print', 'PETSc.Sys.Print', (['(s % v + str_params)'], {}), '(s % v + str_params)\n', (18417, 18437), False, 'from petsc4py import PETSc\n'), ((18853, 18888), 'petsc4py.PETSc.Sys.Print', 'PETSc.Sys.Print', (['(s % v + str_params)'], {}), '(s % v + str_params)\n', (18868, 18888), False, 'from petsc4py import PETSc\n'), ((19275, 19310), 'petsc4py.PETSc.Sys.Print', 'PETSc.Sys.Print', (['(s % v + str_params)'], {}), '(s % v + str_params)\n', (19290, 19310), False, 'from petsc4py import PETSc\n'), ((21894, 21905), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ([], {}), '()\n', (21903, 21905), False, 'from petsc4py import PETSc\n')]
|
from util.inputReader import read_as_strings
def part1(slope_grid):
trees = 0
slope = 1
for i, line in enumerate(slope_grid):
if i % 2 == 0 and line[(int(i / 2) * slope) % len(line)] == '#':
trees += 1
return trees
grid = read_as_strings("../inputs/2020_03.txt")
print("part1:", part1(grid))
print("part2:", 278 * 90 * 88 * 98 * 45)
|
[
"util.inputReader.read_as_strings"
] |
[((263, 303), 'util.inputReader.read_as_strings', 'read_as_strings', (['"""../inputs/2020_03.txt"""'], {}), "('../inputs/2020_03.txt')\n", (278, 303), False, 'from util.inputReader import read_as_strings\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for suite code."""
from absl.testing import absltest
from absl.testing import parameterized
import mock
import suite
DAVIS_PATH = '/tmp/davis'
class SuiteTest(parameterized.TestCase):
@parameterized.named_parameters(('none', None),
('easy', 'easy'),
('medium', 'medium'),
('hard', 'hard'))
@mock.patch.object(suite, 'pixels')
@mock.patch.object(suite, 'suite')
def test_suite_load_with_difficulty(self, difficulty, mock_dm_suite,
mock_pixels):
domain_name = 'cartpole'
task_name = 'balance'
suite.load(
domain_name,
task_name,
difficulty,
background_dataset_path=DAVIS_PATH)
mock_dm_suite.load.assert_called_with(
domain_name,
task_name,
environment_kwargs=None,
task_kwargs=None,
visualize_reward=False)
mock_pixels.Wrapper.assert_called_with(
mock.ANY,
observation_key='pixels',
pixels_only=True,
render_kwargs={'camera_id': 0})
if __name__ == '__main__':
absltest.main()
|
[
"suite.load",
"mock.patch.object",
"absl.testing.parameterized.named_parameters",
"absl.testing.absltest.main"
] |
[((808, 916), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('none', None)", "('easy', 'easy')", "('medium', 'medium')", "('hard', 'hard')"], {}), "(('none', None), ('easy', 'easy'), ('medium',\n 'medium'), ('hard', 'hard'))\n", (838, 916), False, 'from absl.testing import parameterized\n'), ((1018, 1052), 'mock.patch.object', 'mock.patch.object', (['suite', '"""pixels"""'], {}), "(suite, 'pixels')\n", (1035, 1052), False, 'import mock\n'), ((1056, 1089), 'mock.patch.object', 'mock.patch.object', (['suite', '"""suite"""'], {}), "(suite, 'suite')\n", (1073, 1089), False, 'import mock\n'), ((1757, 1772), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (1770, 1772), False, 'from absl.testing import absltest\n'), ((1272, 1359), 'suite.load', 'suite.load', (['domain_name', 'task_name', 'difficulty'], {'background_dataset_path': 'DAVIS_PATH'}), '(domain_name, task_name, difficulty, background_dataset_path=\n DAVIS_PATH)\n', (1282, 1359), False, 'import suite\n')]
|
from Qt import QtWidgets
from Qt import QtCore
from Qt import QtGui
from . import const
from . import uiUtil
from .. import util
from .. import box
from .. import core
from functools import partial
import re
ReEqual = re.compile("^\s*[=]\s*")
class ParamCreator(QtWidgets.QDialog):
ParamTypes = {"bool": bool, "int": int, "float": float, "str": str}
def __init__(self, parent=None):
super(ParamCreator, self).__init__(parent=parent)
self.__type = None
self.__name = None
main_layout = QtWidgets.QVBoxLayout()
param_layout = QtWidgets.QHBoxLayout()
param_layout.addWidget(QtWidgets.QLabel("Type :"))
self.__type_combo = QtWidgets.QComboBox()
self.__type_combo.addItems(ParamCreator.ParamTypes.keys())
param_layout.addWidget(self.__type_combo)
param_layout.addWidget(QtWidgets.QLabel("Name :"))
self.__name_line = QtWidgets.QLineEdit()
param_layout.addWidget(self.__name_line)
main_layout.addLayout(param_layout)
button_layout = QtWidgets.QHBoxLayout()
self.__add = QtWidgets.QPushButton("Add")
self.__cancel = QtWidgets.QPushButton("Cancel")
button_layout.addWidget(self.__add)
button_layout.addWidget(self.__cancel)
main_layout.addLayout(button_layout)
self.__add.clicked.connect(self.accept)
self.__cancel.clicked.connect(self.reject)
self.__type_combo.currentIndexChanged.connect(self.__typeChanged)
self.__name_line.editingFinished.connect(self.__nameChanged)
self.__name_line.textEdited.connect(self.__nameCheck)
self.setLayout(main_layout)
def exec_(self):
self.__name = None
self.__type_combo.setCurrentIndex(0)
self.__typeChanged(0)
self.__name_line.setText("")
self.__nameCheck("")
return super(ParamCreator, self).exec_()
def __typeChanged(self, a):
self.__type = ParamCreator.ParamTypes[self.__type_combo.itemText(a)]
def __nameChanged(self):
self.__name = str(self.__name_line.text())
def __nameCheck(self, text):
self.__add.setEnabled((True if text else False))
def getType(self):
return self.__type
def getName(self):
return self.__name
class ParamEnum(QtWidgets.QComboBox):
Changed = QtCore.Signal()
def __init__(self, param, parent=None):
super(ParamEnum, self).__init__(parent=parent)
self.__param = param
self.addItems(param.getLabels())
self.setCurrentIndex(param.get())
self.currentIndexChanged.connect(self.__indexChanged)
def __indexChanged(self, index):
self.__param.set(index)
self.Changed.emit()
class ParamStatus():
Value = 0
Expression = 1
ExpressionError = 2
NormalColor = "#1D1D1D"
ExpressionColor = "#0A4646"
ExpressionErrorColor = "#640A28"
class CheckBox(QtWidgets.QCheckBox):
def __init__(self, checkable, parent=None):
super(CheckBox, self).__init__(parent=parent)
self.__checkable = checkable
def nextCheckState(self):
if self.__checkable:
self.setChecked(not self.isChecked())
else:
self.setChecked(self.isChecked())
def setCheckable(self, v):
self.__checkable = v
class ParamCheck(QtWidgets.QWidget):
Changed = QtCore.Signal()
def __init__(self, param, parent=None):
super(ParamCheck, self).__init__(parent=parent)
self.__normal_style = "QCheckBox::indicator{ border: 1px solid #1D1D1D; }"
self.__exp_style = "QCheckBox::indicator{ margin 2px; border: 3px solid %s; }"
self.__current_state = ParamStatus.Value
self.__param = param
layout = QtWidgets.QHBoxLayout()
self.setLayout(layout)
self.__check_box = CheckBox(True, self)
self.__check_box.toggled.connect(self.__toggled)
self.__exp_line = QtWidgets.QLineEdit(self)
self.__exp_line.hide()
layout.addWidget(self.__check_box)
layout.addWidget(self.__exp_line)
self.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.__check_box.blockSignals(True)
self.__check_box.setChecked(self.__param.get())
self.__check_box.blockSignals(False)
self.__exp_line.editingFinished.connect(self.__expFinished)
self.refresh()
def contextMenuEvent(self, evnt):
menu = QtWidgets.QMenu(self)
set_action = menu.addAction("Set Expression")
delete_action = menu.addAction("Delete Expression")
menu.popup(self.mapToGlobal(evnt.pos()))
set_action.triggered.connect(self.__startSetExpression)
delete_action.triggered.connect(self.__deleteExpression)
def __toggled(self, v):
self.__param.set(v)
self.Changed.emit()
def __startSetExpression(self):
self.__check_box.hide()
self.__exp_line.show()
if self.__param.hasExpression():
self.__exp_line.setText(self.__param.getExpression())
else:
self.__exp_line.setText("= ")
self.__exp_line.setFocus(QtCore.Qt.OtherFocusReason)
def __deleteExpression(self):
self.__check_box.show()
self.__exp_line.hide()
self.__param.setExpression(None)
self.refresh()
def refresh(self):
if not self.__param.hasExpression():
self.__current_state = ParamStatus.Value
self.__check_box.setCheckable(True)
elif self.__param.validExpression():
self.__check_box.setCheckable(False)
self.__current_state = ParamStatus.Expression
else:
self.__check_box.setCheckable(False)
self.__current_state = ParamStatus.ExpressionError
self.__setBackgroundColor()
def __setBackgroundColor(self):
if self.__current_state == ParamStatus.Value:
s = self.__normal_style
elif self.__current_state == ParamStatus.Expression:
s = self.__exp_style % ParamStatus.ExpressionColor
elif self.__current_state == ParamStatus.ExpressionError:
s = self.__exp_style % ParamStatus.ExpressionErrorColor
self.setStyleSheet(s)
def __expFinished(self):
txt = self.__exp_line.text()
if txt:
self.__param.setExpression(str(txt))
else:
self.__param.setExpression(None)
self.Changed.emit()
self.__check_box.setChecked(self.__param.get())
self.__check_box.show()
self.__exp_line.hide()
self.refresh()
class ParamLine(QtWidgets.QLineEdit):
Changed = QtCore.Signal()
def __init__(self, param, parent=None, isInt=False, isFloat=False):
super(ParamLine, self).__init__(parent=parent)
self.__style = "QLineEdit{ background-color: %s; border: 1px solid #1D1D1D; }"
self.__current_state = ParamStatus.Value
self.__param = param
if isInt:
self.textEdited.connect(self.__intOnly)
self.setAlignment(QtCore.Qt.AlignRight)
self.editingFinished.connect(self.__intFinished)
elif isFloat:
self.textEdited.connect(self.__floatOnly)
self.setAlignment(QtCore.Qt.AlignRight)
self.editingFinished.connect(self.__floatFinished)
else:
self.editingFinished.connect(self.__strFinished)
self.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.blockSignals(True)
self.setText(str(self.__param.get()))
self.blockSignals(False)
self.refresh()
def contextMenuEvent(self, evnt):
menu = QtWidgets.QMenu(self)
set_action = menu.addAction("Set Expression")
delete_action = menu.addAction("Delete Expression")
menu.popup(self.mapToGlobal(evnt.pos()))
set_action.triggered.connect(self.__startSetExpression)
delete_action.triggered.connect(self.__deleteExpression)
def __startSetExpression(self):
if self.__param.hasExpression():
self.setText(self.__param.getExpression())
else:
self.setText("= ")
self.setEditFocus(True)
def __deleteExpression(self):
self.__param.setExpression(None)
self.refresh()
def refresh(self):
if not self.__param.hasExpression():
self.__current_state = ParamStatus.Value
elif self.__param.validExpression():
self.__current_state = ParamStatus.Expression
else:
self.__current_state = ParamStatus.ExpressionError
self.blockSignals(True)
if self.hasFocus() and self.__param.hasExpression():
self.setText(str(self.__param.getExpression()))
else:
self.setText(str(self.__param.get()))
self.blockSignals(False)
self.__setBackgroundColor()
def focusInEvent(self, evnt):
super(ParamLine, self).focusInEvent(evnt)
self.refresh()
def focusOutEvent(self, evnt):
super(ParamLine, self).focusOutEvent(evnt)
self.refresh()
def __setBackgroundColor(self):
if self.__current_state == ParamStatus.Value:
s = self.__style % ParamStatus.NormalColor
elif self.__current_state == ParamStatus.Expression:
s = self.__style % ParamStatus.ExpressionColor
elif self.__current_state == ParamStatus.ExpressionError:
s = self.__style % ParamStatus.ExpressionErrorColor
self.setStyleSheet(s)
def __intOnly(self, txt):
if not ReEqual.search(txt):
self.setText(Parameter.RegexInt.sub("", txt))
def __floatOnly(self, txt):
if not ReEqual.search(txt):
self.setText(Parameter.RegexFloat.sub("", txt))
def __intFinished(self):
txt = str(self.text())
if ReEqual.search(txt):
if self.__param.setExpression(txt):
self.Changed.emit()
return
try:
int(txt)
except:
self.setText(str(self.__param.get()))
else:
self.__param.setExpression(None)
if not self.__param.set(int(txt)):
self.setText(str(self.__param.get()))
self.Changed.emit()
def __floatFinished(self):
txt = str(self.text())
if ReEqual.search(txt):
if self.__param.setExpression(txt):
self.Changed.emit()
return
try:
float(txt)
except:
self.setText(str(self.__param.get()))
else:
self.__param.setExpression(None)
if not self.__param.set(float(txt)):
self.setText(str(self.__param.get()))
self.Changed.emit()
def __strFinished(self):
txt = str(self.text())
if ReEqual.search(txt):
self.__param.setExpression(txt)
else:
self.__param.setExpression(None)
self.__param.set(txt)
self.Changed.emit()
class ColorPicker(QtCore.QObject):
Changed = QtCore.Signal()
def __init__(self, r, g, b):
super(ColorPicker, self).__init__()
self.__r = r
self.__g = g
self.__b = b
self.__label = None
self.__button = None
self.__style = "QPushButton{ background-color: rgb(%s, %s, %s); border: 1px solid #1D1D1D; }"
self.initialize()
self.refresh()
def widgets(self):
return [self.__label, self.__button]
def initialize(self):
self.__label = QtWidgets.QLabel("Color")
self.__label.setMinimumWidth(const.ParamLabelMinimumWidth)
self.__label.setMaximumWidth(const.ParamLabelMaximumWidth)
self.__button = QtWidgets.QPushButton()
self.__button.setFixedSize(18, 18)
self.__button.clicked.connect(self.__pickColor)
def refresh(self):
self.__button.setStyleSheet(self.__style % (self.__r.get(), self.__g.get(), self.__b.get()))
self.Changed.emit()
def __pickColor(self):
color = QtWidgets.QColorDialog.getColor(QtGui.QColor(self.__r.get(), self.__g.get(), self.__b.get()), self.__button)
if not color.isValid():
return
self.__r.set(color.red())
self.__g.set(color.green())
self.__b.set(color.blue())
self.refresh()
class Parameter(QtCore.QObject):
RegexInt = re.compile("[^0-9-]")
RegexFloat = re.compile("[^0-9-.]")
ParameterEdited = QtCore.Signal()
DeleteRequest = QtCore.Signal(object)
def __init__(self, param, deletable=False):
super(Parameter, self).__init__()
self.__label = None
self.__param = param
self.__val_edit = None
self.__delete_button = None
self.__need_to_refresh = True
self.__deletable = deletable
self.__initialize()
def widgets(self):
widgets = [self.__label, self.__val_edit]
if self.__delete_button:
widgets.append(self.__delete_button)
return widgets
def refresh(self):
if self.__need_to_refresh:
self.__val_edit.refresh()
def __initialize(self):
self.__label = QtWidgets.QLabel(self.__param.name())
tc = self.__param.typeClass()
if tc == bool:
self.__val_edit = ParamCheck(self.__param)
self.__val_edit.Changed.connect(self.__editedEmit)
elif tc == int:
self.__val_edit = ParamLine(self.__param, isInt=True)
self.__val_edit.Changed.connect(self.__editedEmit)
elif tc == float:
self.__val_edit = ParamLine(self.__param, isFloat=True)
self.__val_edit.Changed.connect(self.__editedEmit)
elif tc == str:
self.__val_edit = ParamLine(self.__param)
self.__val_edit.Changed.connect(self.__editedEmit)
elif tc == core.PBEnum:
self.__val_edit = ParamEnum(self.__param)
self.__val_edit.Changed.connect(self.__editedEmit)
self.__need_to_refresh = False
if self.__deletable:
self.__delete_button = QtWidgets.QPushButton()
self.__delete_button.setObjectName("RemoveButton")
self.__delete_button.setFixedSize(14, 14)
self.__delete_button.setFocusPolicy(QtCore.Qt.NoFocus)
self.__delete_button.clicked.connect(self.__deleteParam)
def __deleteParam(self):
self.DeleteRequest.emit(self.__param)
def __editedEmit(self):
self.ParameterEdited.emit()
class ParamEditor(QtWidgets.QWidget):
BlockRenamed = QtCore.Signal(object, str)
DeleteRequest = QtCore.Signal(object, object)
NodeRefreshRequest = QtCore.Signal(object)
def __init__(self, parent=None):
super(ParamEditor, self).__init__()
self.__bloc = None
self.__param_layout = None
self.__block_type_label = None
self.__name_label = None
self.__block_name = None
self.__add_param_button = None
self.__param_creator = None
self.__params = []
self.__initialize()
self.__refresh()
def setBlock(self, bloc):
if self.__bloc == bloc:
return
self.__bloc = bloc
self.__refresh()
def forceRefresh(self):
self.__refresh()
def __initialize(self):
# scroll area
main_layout = QtWidgets.QVBoxLayout()
self.setLayout(main_layout)
contents_widget = QtWidgets.QWidget()
contents_layout = QtWidgets.QVBoxLayout()
contents_layout.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
contents_widget.setLayout(contents_layout)
scroll_area = QtWidgets.QScrollArea()
scroll_area.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
scroll_area.setWidgetResizable(True)
scroll_area.setWidget(contents_widget)
main_layout.addWidget(scroll_area)
# parameters
self.__param_layout = QtWidgets.QGridLayout()
self.__param_layout.setSpacing(10)
self.__param_layout.setVerticalSpacing(5)
self.__block_type_label = QtWidgets.QLabel()
self.__block_type_label.setAlignment(QtCore.Qt.AlignCenter)
name_layout = QtWidgets.QHBoxLayout()
self.__block_name = QtWidgets.QLineEdit()
self.__name_label = QtWidgets.QLabel("Name")
self.__name_label.setMinimumWidth(const.ParamLabelMinimumWidth + 4)
self.__name_label.setMaximumWidth(const.ParamLabelMaximumWidth)
name_layout.addWidget(self.__name_label)
name_layout.addWidget(self.__block_name)
name_layout.setAlignment(QtCore.Qt.AlignLeft)
name_layout.addStretch(10)
self.__block_name.setMaximumWidth(const.ParamEditorBlockNameMaximumWidth)
add_layout = QtWidgets.QHBoxLayout()
self.__add_param_button = QtWidgets.QPushButton()
self.__add_param_button.setObjectName("AddButton")
self.__add_param_button.setFixedSize(18, 18)
self.__add_param_button.setFocusPolicy(QtCore.Qt.NoFocus)
add_layout.setAlignment(QtCore.Qt.AlignCenter)
add_layout.addWidget(self.__add_param_button)
contents_layout.addWidget(self.__block_type_label)
contents_layout.addLayout(name_layout)
contents_layout.addLayout(self.__param_layout)
contents_layout.addLayout(add_layout)
self.__add_param_button.hide()
self.__param_creator = ParamCreator(self)
self.__add_param_button.clicked.connect(self.__addParam)
self.__block_name.editingFinished.connect(self.__renameBloc)
self.__block_name.textEdited.connect(self.__nameCheck)
def __nameCheck(self, txt):
self.__block_name.setText(util.ReForbiddenName.sub("", txt))
def __renameBloc(self):
if not self.__bloc:
self.__block_name.setText(self.__block.name())
parent_box = self.__bloc.parent()
if not parent_box:
self.__block_name.setText(self.__block.name())
old_name = self.__bloc.name()
new_name = self.__block_name.text()
if old_name == new_name:
return
if not util.ValidateName(new_name):
self.__block_name.setText(old_name)
return
uniq_name = parent_box.getUniqueName(self.__bloc, new_name)
self.__bloc.rename(uniq_name)
self.__block_name.setText(uniq_name)
if old_name == uniq_name:
return
self.BlockRenamed.emit(self.__bloc, uniq_name)
def __refresh(self):
self.__params = []
self.__clearLayout(self.__param_layout)
if self.__bloc is None:
self.__block_type_label.setText("")
self.__block_name.setText("")
self.__block_type_label.hide()
self.__name_label.hide()
self.__block_name.hide()
self.__add_param_button.hide()
else:
self.__block_type_label.show()
self.__name_label.show()
self.__block_name.show()
self.__block_name.setText(self.__bloc.name())
self.__block_type_label.setText("<{}>".format(self.__bloc.__class__.__name__))
if self.__bloc and self.__bloc.expandable():
self.__add_param_button.show()
else:
self.__add_param_button.hide()
if isinstance(self.__bloc, box.SceneContext):
self.__block_name.setEnabled(False)
else:
self.__block_name.setEnabled(True)
self.__build_params()
def __build_params(self):
r = 0
if self.__bloc is None:
return
if self.__bloc.isBlank():
pm = ColorPicker(self.__bloc.param("r"), self.__bloc.param("g"), self.__bloc.param("b"))
pm.Changed.connect(partial(self.NodeRefreshRequest.emit, self.__bloc))
self.__params.append(pm)
for c, pw in enumerate(pm.widgets()):
self.__param_layout.addWidget(pw, r, c)
r += 1
return
to_disable = set()
for ip in self.__bloc.inputs():
if ip.hasLinkedParam() and ip.isConnected():
to_disable.add(ip.linkedParam().name())
for p in self.__bloc.params(includeExtraParam=False):
pm = Parameter(p)
pm.ParameterEdited.connect(self.__update_all_params)
pm.DeleteRequest.connect(self.__deleteParam)
self.__params.append(pm)
enable = p.name() not in to_disable
for c, pw in enumerate(pm.widgets()):
pw.setEnabled(enable)
self.__param_layout.addWidget(pw, r, c)
r += 1
for p in self.__bloc.extraParams():
pm = Parameter(p, deletable=True)
pm.ParameterEdited.connect(self.__update_all_params)
pm.DeleteRequest.connect(self.__deleteParam)
self.__params.append(pm)
enable = p.name() not in to_disable
for c, pw in enumerate(pm.widgets()):
pw.setEnabled(enable)
self.__param_layout.addWidget(pw, r, c)
r += 1
def __addParam(self):
if self.__param_creator.exec_() == QtWidgets.QDialog.Accepted:
type_class = self.__param_creator.getType()
name = self.__param_creator.getName()
if type_class and name:
self.__bloc.addExtraParam(type_class, name=name)
self.__refresh()
def __deleteParam(self, param):
self.__bloc.removeParam(param)
self.__refresh()
def __update_all_params(self):
for p in self.__params:
p.refresh()
def __clearLayout(self, layout):
while (True):
item = layout.takeAt(0)
if item:
l = item.layout()
w = item.widget()
if l:
self.__clearLayout(l)
if w:
layout.removeWidget(w)
w.setParent(None)
else:
break
|
[
"Qt.QtCore.Signal",
"functools.partial",
"Qt.QtWidgets.QLabel",
"Qt.QtWidgets.QHBoxLayout",
"Qt.QtWidgets.QLineEdit",
"Qt.QtWidgets.QVBoxLayout",
"Qt.QtWidgets.QGridLayout",
"Qt.QtWidgets.QPushButton",
"Qt.QtWidgets.QWidget",
"Qt.QtWidgets.QScrollArea",
"Qt.QtWidgets.QMenu",
"Qt.QtWidgets.QComboBox",
"re.compile"
] |
[((220, 246), 're.compile', 're.compile', (['"""^\\\\s*[=]\\\\s*"""'], {}), "('^\\\\s*[=]\\\\s*')\n", (230, 246), False, 'import re\n'), ((2342, 2357), 'Qt.QtCore.Signal', 'QtCore.Signal', ([], {}), '()\n', (2355, 2357), False, 'from Qt import QtCore\n'), ((3370, 3385), 'Qt.QtCore.Signal', 'QtCore.Signal', ([], {}), '()\n', (3383, 3385), False, 'from Qt import QtCore\n'), ((6639, 6654), 'Qt.QtCore.Signal', 'QtCore.Signal', ([], {}), '()\n', (6652, 6654), False, 'from Qt import QtCore\n'), ((11065, 11080), 'Qt.QtCore.Signal', 'QtCore.Signal', ([], {}), '()\n', (11078, 11080), False, 'from Qt import QtCore\n'), ((12393, 12414), 're.compile', 're.compile', (['"""[^0-9-]"""'], {}), "('[^0-9-]')\n", (12403, 12414), False, 'import re\n'), ((12432, 12454), 're.compile', 're.compile', (['"""[^0-9-.]"""'], {}), "('[^0-9-.]')\n", (12442, 12454), False, 'import re\n'), ((12477, 12492), 'Qt.QtCore.Signal', 'QtCore.Signal', ([], {}), '()\n', (12490, 12492), False, 'from Qt import QtCore\n'), ((12513, 12534), 'Qt.QtCore.Signal', 'QtCore.Signal', (['object'], {}), '(object)\n', (12526, 12534), False, 'from Qt import QtCore\n'), ((14585, 14611), 'Qt.QtCore.Signal', 'QtCore.Signal', (['object', 'str'], {}), '(object, str)\n', (14598, 14611), False, 'from Qt import QtCore\n'), ((14632, 14661), 'Qt.QtCore.Signal', 'QtCore.Signal', (['object', 'object'], {}), '(object, object)\n', (14645, 14661), False, 'from Qt import QtCore\n'), ((14687, 14708), 'Qt.QtCore.Signal', 'QtCore.Signal', (['object'], {}), '(object)\n', (14700, 14708), False, 'from Qt import QtCore\n'), ((531, 554), 'Qt.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (552, 554), False, 'from Qt import QtWidgets\n'), ((579, 602), 'Qt.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (600, 602), False, 'from Qt import QtWidgets\n'), ((690, 711), 'Qt.QtWidgets.QComboBox', 'QtWidgets.QComboBox', ([], {}), '()\n', (709, 711), False, 'from Qt import QtWidgets\n'), ((915, 936), 'Qt.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (934, 936), False, 'from Qt import QtWidgets\n'), ((1055, 1078), 'Qt.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1076, 1078), False, 'from Qt import QtWidgets\n'), ((1100, 1128), 'Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Add"""'], {}), "('Add')\n", (1121, 1128), False, 'from Qt import QtWidgets\n'), ((1153, 1184), 'Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Cancel"""'], {}), "('Cancel')\n", (1174, 1184), False, 'from Qt import QtWidgets\n'), ((3753, 3776), 'Qt.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (3774, 3776), False, 'from Qt import QtWidgets\n'), ((3939, 3964), 'Qt.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self'], {}), '(self)\n', (3958, 3964), False, 'from Qt import QtWidgets\n'), ((4437, 4458), 'Qt.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self'], {}), '(self)\n', (4452, 4458), False, 'from Qt import QtWidgets\n'), ((7652, 7673), 'Qt.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self'], {}), '(self)\n', (7667, 7673), False, 'from Qt import QtWidgets\n'), ((11549, 11574), 'Qt.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Color"""'], {}), "('Color')\n", (11565, 11574), False, 'from Qt import QtWidgets\n'), ((11733, 11756), 'Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', ([], {}), '()\n', (11754, 11756), False, 'from Qt import QtWidgets\n'), ((15375, 15398), 'Qt.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (15396, 15398), False, 'from Qt import QtWidgets\n'), ((15462, 15481), 'Qt.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (15479, 15481), False, 'from Qt import QtWidgets\n'), ((15508, 15531), 'Qt.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (15529, 15531), False, 'from Qt import QtWidgets\n'), ((15685, 15708), 'Qt.QtWidgets.QScrollArea', 'QtWidgets.QScrollArea', ([], {}), '()\n', (15706, 15708), False, 'from Qt import QtWidgets\n'), ((15996, 16019), 'Qt.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (16017, 16019), False, 'from Qt import QtWidgets\n'), ((16147, 16165), 'Qt.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (16163, 16165), False, 'from Qt import QtWidgets\n'), ((16257, 16280), 'Qt.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (16278, 16280), False, 'from Qt import QtWidgets\n'), ((16309, 16330), 'Qt.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', ([], {}), '()\n', (16328, 16330), False, 'from Qt import QtWidgets\n'), ((16359, 16383), 'Qt.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Name"""'], {}), "('Name')\n", (16375, 16383), False, 'from Qt import QtWidgets\n'), ((16823, 16846), 'Qt.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (16844, 16846), False, 'from Qt import QtWidgets\n'), ((16881, 16904), 'Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', ([], {}), '()\n', (16902, 16904), False, 'from Qt import QtWidgets\n'), ((634, 660), 'Qt.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Type :"""'], {}), "('Type :')\n", (650, 660), False, 'from Qt import QtWidgets\n'), ((860, 886), 'Qt.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Name :"""'], {}), "('Name :')\n", (876, 886), False, 'from Qt import QtWidgets\n'), ((14108, 14131), 'Qt.QtWidgets.QPushButton', 'QtWidgets.QPushButton', ([], {}), '()\n', (14129, 14131), False, 'from Qt import QtWidgets\n'), ((19855, 19905), 'functools.partial', 'partial', (['self.NodeRefreshRequest.emit', 'self.__bloc'], {}), '(self.NodeRefreshRequest.emit, self.__bloc)\n', (19862, 19905), False, 'from functools import partial\n')]
|
import pytest
from requests import codes
from tilapya.errors import TransLinkAPIError
from tilapya.gtfsrt import GTFSRT
from .conftest import remove_response_headers_func
# Apply VCR to all tests in this file.
pytestmark = pytest.mark.vcr(before_record_response=remove_response_headers_func('Set-Cookie'))
@pytest.fixture
def authed_gtfs(valid_api_key):
return GTFSRT(api_key=valid_api_key)
def test_download_realtime(authed_gtfs):
assert authed_gtfs.trip_updates().content
def test_download_position(authed_gtfs):
assert authed_gtfs.position().content
def test_download_alerts(authed_gtfs):
assert authed_gtfs.service_alerts().content
def test_gtfsrt_invalid_key():
with pytest.raises(TransLinkAPIError) as info:
GTFSRT(api_key='foobar').trip_updates()
assert info.value.response.status_code == codes.forbidden
assert not info.value.code
assert not info.value.message
|
[
"pytest.raises",
"tilapya.gtfsrt.GTFSRT"
] |
[((370, 399), 'tilapya.gtfsrt.GTFSRT', 'GTFSRT', ([], {'api_key': 'valid_api_key'}), '(api_key=valid_api_key)\n', (376, 399), False, 'from tilapya.gtfsrt import GTFSRT\n'), ((705, 737), 'pytest.raises', 'pytest.raises', (['TransLinkAPIError'], {}), '(TransLinkAPIError)\n', (718, 737), False, 'import pytest\n'), ((755, 779), 'tilapya.gtfsrt.GTFSRT', 'GTFSRT', ([], {'api_key': '"""foobar"""'}), "(api_key='foobar')\n", (761, 779), False, 'from tilapya.gtfsrt import GTFSRT\n')]
|
"""
See COPYING for license information.
"""
import unittest
import os
import time
from twisted.python import log
from swftp.utils import (
try_datetime_parse, MetricCollector, parse_key_value_config)
class MetricCollectorTest(unittest.TestCase):
def setUp(self):
self.c = MetricCollector()
def test_init(self):
c = MetricCollector(10)
self.assertEqual(c.sample_size, 10)
self.assertEqual(c.current, {})
self.assertEqual(c.totals, {})
self.assertEqual(c.samples, {})
c = MetricCollector(20)
self.assertEqual(c.sample_size, 20)
def test_emit(self):
self.c.emit({'metric': 'some_metric'})
self.assertEqual(self.c.current['some_metric'], 1)
self.c.emit({'metric': 'some_metric', 'count': 10})
self.assertEqual(self.c.current['some_metric'], 11)
def test_add_metric(self):
self.c.add_metric('some_metric')
self.assertEqual(self.c.current['some_metric'], 1)
self.assertEqual(self.c.totals['some_metric'], 1)
self.c.add_metric('some_metric', count=10)
self.assertEqual(self.c.current['some_metric'], 11)
self.assertEqual(self.c.totals['some_metric'], 11)
def test_sample(self):
self.c.add_metric('some_metric')
self.c.sample()
self.assertEqual(self.c.samples['some_metric'], [1])
self.c.add_metric('some_metric')
self.c.sample()
self.assertEqual(self.c.samples['some_metric'], [1, 1])
for i in range(15):
self.c.add_metric('some_metric', count=i)
self.c.sample()
self.assertEqual(self.c.samples['some_metric'], range(4, 15))
def test_attach_logger(self):
self.c.start()
self.assertIn(self.c.emit, log.theLogPublisher.observers)
self.c.stop()
self.assertNotIn(self.c.emit, log.theLogPublisher.observers)
class DateTimeParseTest(unittest.TestCase):
def setUp(self):
os.environ['TZ'] = 'GMT'
time.tzset()
def test_invalid_date(self):
result = try_datetime_parse("this isn't a date!")
self.assertIsNone(result)
def test_RFC_1123(self):
result = try_datetime_parse("Thu, 10 Apr 2008 13:30:00 GMT")
self.assertEqual(result, 1207834200.0)
def test_RFC_1123_subsecond(self):
result = try_datetime_parse("Thu, 10 Apr 2008 13:30:00.12345 GMT")
self.assertEqual(result, 1207834200.0)
def test_ISO_8601(self):
result = try_datetime_parse("2008-04-10T13:30:00")
self.assertEqual(result, 1207834200.0)
def test_ISO_8601_subsecond(self):
result = try_datetime_parse("2008-04-10T13:30:00.12345")
self.assertEqual(result, 1207834200.0)
def test_universal_sortable(self):
result = try_datetime_parse("2008-04-10 13:30:00")
self.assertEqual(result, 1207834200.0)
def test_universal_sortable_subsecond(self):
result = try_datetime_parse("2008-04-10 13:30:00.12345")
self.assertEqual(result, 1207834200.0)
def test_date_short(self):
result = try_datetime_parse("2012-04-10")
self.assertEqual(result, 1334016000.0)
class ParseKeyValueConfigTest(unittest.TestCase):
def test_single(self):
res = parse_key_value_config('test: 1')
self.assertEqual(res, {'test': '1'})
def test_multiple(self):
res = parse_key_value_config('test: 1, test2: 2')
self.assertEqual(res, {'test': '1', 'test2': '2'})
def test_empty(self):
res = parse_key_value_config('')
self.assertEqual(res, {})
def test_duplicate(self):
res = parse_key_value_config('test: 1, test: 2')
self.assertEqual(res, {'test': '2'})
def test_whitespace(self):
res = parse_key_value_config(' test : 1 , test2 : 2 ')
self.assertEqual(res, {'test': '1', 'test2': '2'})
|
[
"swftp.utils.try_datetime_parse",
"swftp.utils.MetricCollector",
"time.tzset",
"swftp.utils.parse_key_value_config"
] |
[((293, 310), 'swftp.utils.MetricCollector', 'MetricCollector', ([], {}), '()\n', (308, 310), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((349, 368), 'swftp.utils.MetricCollector', 'MetricCollector', (['(10)'], {}), '(10)\n', (364, 368), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((545, 564), 'swftp.utils.MetricCollector', 'MetricCollector', (['(20)'], {}), '(20)\n', (560, 564), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((2011, 2023), 'time.tzset', 'time.tzset', ([], {}), '()\n', (2021, 2023), False, 'import time\n'), ((2075, 2115), 'swftp.utils.try_datetime_parse', 'try_datetime_parse', (['"""this isn\'t a date!"""'], {}), '("this isn\'t a date!")\n', (2093, 2115), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((2197, 2248), 'swftp.utils.try_datetime_parse', 'try_datetime_parse', (['"""Thu, 10 Apr 2008 13:30:00 GMT"""'], {}), "('Thu, 10 Apr 2008 13:30:00 GMT')\n", (2215, 2248), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((2353, 2410), 'swftp.utils.try_datetime_parse', 'try_datetime_parse', (['"""Thu, 10 Apr 2008 13:30:00.12345 GMT"""'], {}), "('Thu, 10 Apr 2008 13:30:00.12345 GMT')\n", (2371, 2410), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((2505, 2546), 'swftp.utils.try_datetime_parse', 'try_datetime_parse', (['"""2008-04-10T13:30:00"""'], {}), "('2008-04-10T13:30:00')\n", (2523, 2546), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((2651, 2698), 'swftp.utils.try_datetime_parse', 'try_datetime_parse', (['"""2008-04-10T13:30:00.12345"""'], {}), "('2008-04-10T13:30:00.12345')\n", (2669, 2698), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((2803, 2844), 'swftp.utils.try_datetime_parse', 'try_datetime_parse', (['"""2008-04-10 13:30:00"""'], {}), "('2008-04-10 13:30:00')\n", (2821, 2844), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((2959, 3006), 'swftp.utils.try_datetime_parse', 'try_datetime_parse', (['"""2008-04-10 13:30:00.12345"""'], {}), "('2008-04-10 13:30:00.12345')\n", (2977, 3006), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((3103, 3135), 'swftp.utils.try_datetime_parse', 'try_datetime_parse', (['"""2012-04-10"""'], {}), "('2012-04-10')\n", (3121, 3135), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((3276, 3309), 'swftp.utils.parse_key_value_config', 'parse_key_value_config', (['"""test: 1"""'], {}), "('test: 1')\n", (3298, 3309), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((3399, 3442), 'swftp.utils.parse_key_value_config', 'parse_key_value_config', (['"""test: 1, test2: 2"""'], {}), "('test: 1, test2: 2')\n", (3421, 3442), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((3543, 3569), 'swftp.utils.parse_key_value_config', 'parse_key_value_config', (['""""""'], {}), "('')\n", (3565, 3569), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((3649, 3691), 'swftp.utils.parse_key_value_config', 'parse_key_value_config', (['"""test: 1, test: 2"""'], {}), "('test: 1, test: 2')\n", (3671, 3691), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n'), ((3783, 3846), 'swftp.utils.parse_key_value_config', 'parse_key_value_config', (['""" test : 1 , test2 : 2 """'], {}), "(' test : 1 , test2 : 2 ')\n", (3805, 3846), False, 'from swftp.utils import try_datetime_parse, MetricCollector, parse_key_value_config\n')]
|
from matplotlib.colors import Normalize
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pandas as pd
import numpy as np
from math import pi, log
from scipy.stats import rankdata
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("fp", type=str)
parser.add_argument(
"bounds", type=float, nargs=4, help="lowerbound x, upperbound x, lb y, ub y"
)
args = parser.parse_args()
filepath = args.fp
dims = args.bounds
# === setup problem space, either real or Karpathy toy problem for validation ===
# pspace = np.loadtxt("golf_course_zoom_s1024.txt")
pspace = np.loadtxt(filepath)
# uncomment this line if you want smooth toy-problem
# pspace = G
print(dims)
lbp, ubp, lbb, ubb = dims
# ******************** PLOTTING ****************************************
# ======== establish figs =================
fig = plt.figure()
ax = fig.gca()
# ============= plot problem space bg images ====
cmap = plt.cm.viridis
colors = Normalize(min(pspace.flatten()), max(pspace.flatten()))(pspace)
colors = cmap(colors)
plt.axis('equal')
plt.imshow(
colors,
vmin=min(pspace.flatten()),
vmax=max(pspace.flatten()),
extent=[lbb, ubb,lbp, ubp],
aspect="auto",
interpolation="none",
origin="lower",
)
ax.set_xlabel("burnDv")
ax.set_ylabel("position")
plt.colorbar()
plt.show()
|
[
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.loadtxt"
] |
[((330, 346), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (344, 346), False, 'from argparse import ArgumentParser\n'), ((736, 756), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {}), '(filepath)\n', (746, 756), True, 'import numpy as np\n'), ((1013, 1025), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1023, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1233, 1250), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1241, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1538, 1552), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1550, 1552), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1566, 1568), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float32
rospy.init_node('publisher_radius')
pub = rospy.Publisher('radius', Float32, queue_size=1)
rate = rospy.Rate(1)
radius = 1.0
while not rospy.is_shutdown():
try:
pub.publish(radius)
rate.sleep()
except:
pass
|
[
"rospy.is_shutdown",
"rospy.Publisher",
"rospy.init_node",
"rospy.Rate"
] |
[((70, 105), 'rospy.init_node', 'rospy.init_node', (['"""publisher_radius"""'], {}), "('publisher_radius')\n", (85, 105), False, 'import rospy\n'), ((112, 160), 'rospy.Publisher', 'rospy.Publisher', (['"""radius"""', 'Float32'], {'queue_size': '(1)'}), "('radius', Float32, queue_size=1)\n", (127, 160), False, 'import rospy\n'), ((169, 182), 'rospy.Rate', 'rospy.Rate', (['(1)'], {}), '(1)\n', (179, 182), False, 'import rospy\n'), ((207, 226), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (224, 226), False, 'import rospy\n')]
|
# coding=utf-8
from django.contrib import admin
from .admin_support.forms import NoteModelForm
from .models import Note, Tag
class TagInline(admin.TabularInline):
model = Note.tags.through
class NoteAdmin(admin.ModelAdmin):
ordering = ["title"]
exclude = ("created", "modified", "body")
list_display = ("name", "title", "modified", "created")
search_fields = ["title"]
prepopulated_fields = {"name": ("title",)}
inlines = [TagInline]
form = NoteModelForm
admin.site.register(Tag)
admin.site.register(Note, NoteAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((495, 519), 'django.contrib.admin.site.register', 'admin.site.register', (['Tag'], {}), '(Tag)\n', (514, 519), False, 'from django.contrib import admin\n'), ((520, 556), 'django.contrib.admin.site.register', 'admin.site.register', (['Note', 'NoteAdmin'], {}), '(Note, NoteAdmin)\n', (539, 556), False, 'from django.contrib import admin\n')]
|
'''
test en-zh
'''
# import sys
import pytest # type: ignore
from loguru import logger
# sys.path.insert(0, '..')
# from google_tr_async.google_tr_async import google_tr_async
from google_tr_async import google_tr_async
@pytest.mark.asyncio
async def test_0():
''' test 0'''
text = \
'''There is now some uncertainty about the future of Google News in Europe after the European Union finalized its controversial new copyright legislation.
Google had previously showed how dramatically its search results could be affected, and warned that it may shut down the service in Europe …
The EU Copyright Directive is well-intentioned, requiring tech giants to license the right to reproduce copyrighted material on their own websites. However, the legislation as originally proposed would have made it impossible for Google to display brief snippets and photos from news stories in its search results without paying the news sites.
Google last month showed how its news search results would appear without photos and text excerpts, rendering the service all but useless. The company had previously said that its only option might be to shut down Google News in Europe.'''
trtext, proxy = await google_tr_async(text, debug=True)
# assert len(google_tr_async.dual) == 6
assert proxy is None
assert len(trtext) > 200
@pytest.mark.asyncio
async def test_1():
''' test 1 zh2en'''
text = '这是测试'
trtext, proxy = await google_tr_async(text, to_lang='en', debug=True)
logger.debug('trtext: %s' % trtext)
# logger.debug('google_tr_async.dual: %s' % google_tr_async.dual)
# assert len(google_tr_async.dual) == 6
# assert google_tr_async.dual == 6
# assert len(trtext) > 200
assert trtext == 'This is a test'
assert proxy is None
|
[
"loguru.logger.debug",
"google_tr_async.google_tr_async"
] |
[((1534, 1569), 'loguru.logger.debug', 'logger.debug', (["('trtext: %s' % trtext)"], {}), "('trtext: %s' % trtext)\n", (1546, 1569), False, 'from loguru import logger\n'), ((1239, 1272), 'google_tr_async.google_tr_async', 'google_tr_async', (['text'], {'debug': '(True)'}), '(text, debug=True)\n', (1254, 1272), False, 'from google_tr_async import google_tr_async\n'), ((1482, 1529), 'google_tr_async.google_tr_async', 'google_tr_async', (['text'], {'to_lang': '"""en"""', 'debug': '(True)'}), "(text, to_lang='en', debug=True)\n", (1497, 1529), False, 'from google_tr_async import google_tr_async\n')]
|
import pandas as pd
train = pd.read_csv('alldata/labeledTrainData.tsv', header=0, delimiter='\t', quoting=3)
print(train.shape)
print(train.columns.values)
print(train.head())
print(train['review'][0])
test = pd.read_csv('alldata/testData.tsv', header=0, delimiter='\t', quoting=3)
print(test.shape)
print(test.head())
train_split = train['review'][0].split(",")
for str in train_split:
print(str)
# data cleaning
from bs4 import BeautifulSoup
# 在一条评论上初始化一个BeautifulSoup对象
# 用beautifulsoup来清洗html标签
example1 = BeautifulSoup(train['review'][0], 'lxml')
# 比较一下原始的文本和处理过后的文本的差别,通过调用get_text()得到处理后的结果
print(train['review'][0])
print()
print(example1.get_text())
import re
letters_only = re.sub('[^a-zA-Z]', # The pattern to search for
' ', # The pattern to repalce it with
example1.get_text()) # The text to search
print(letters_only)
#大写变小写
lower_case = letters_only.lower() # Convert to lower case
#tokenization
words = lower_case.split() # Split into words
#stop words removal
from nltk.corpus import stopwords # import the stop word list
print(stopwords.words('english')[:10])
#从评论取出stop words
words = [w for w in words if not w in stopwords.words('english')]
print(words[:10])
|
[
"pandas.read_csv",
"nltk.corpus.stopwords.words",
"bs4.BeautifulSoup"
] |
[((28, 113), 'pandas.read_csv', 'pd.read_csv', (['"""alldata/labeledTrainData.tsv"""'], {'header': '(0)', 'delimiter': '"""\t"""', 'quoting': '(3)'}), "('alldata/labeledTrainData.tsv', header=0, delimiter='\\t', quoting=3\n )\n", (39, 113), True, 'import pandas as pd\n'), ((210, 282), 'pandas.read_csv', 'pd.read_csv', (['"""alldata/testData.tsv"""'], {'header': '(0)', 'delimiter': '"""\t"""', 'quoting': '(3)'}), "('alldata/testData.tsv', header=0, delimiter='\\t', quoting=3)\n", (221, 282), True, 'import pandas as pd\n'), ((517, 558), 'bs4.BeautifulSoup', 'BeautifulSoup', (["train['review'][0]", '"""lxml"""'], {}), "(train['review'][0], 'lxml')\n", (530, 558), False, 'from bs4 import BeautifulSoup\n'), ((1109, 1135), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1124, 1135), False, 'from nltk.corpus import stopwords\n'), ((1198, 1224), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1213, 1224), False, 'from nltk.corpus import stopwords\n')]
|
"""Classes to run register functions at certain timepoints and run asynchronously"""
import threading
import time
from typing import Any, Callable, Iterable, NoReturn, Union
import numpy as np
import sc3nb
from sc3nb.osc.osc_communication import Bundler, OSCCommunication, OSCMessage
class Event:
"""Stores a timestamp, function and arguments for that function.
Long running functions can be wrapped inside an own thread
Parameters
----------
timestamp : float
Time event should be executed
function : Callable[..., None]
Function to be executed
args : Iterable[Any]
Arguments for function
spawn : bool, optional
if True, create new thread for function, by default False
"""
def __init__(
self,
timestamp: float,
function: Callable[..., None],
args: Iterable[Any],
spawn: bool = False,
) -> None:
if spawn:
thread = threading.Thread(target=function, args=args)
function = thread.start
args = ()
self.timestamp = timestamp
self.function = function
self.args = args
def execute(self) -> None:
"""Executes function"""
self.function(*self.args)
def __eq__(self, other):
return self.timestamp == other.timestamp
def __lt__(self, other):
return self.timestamp < other.timestamp
def __le__(self, other):
return self.timestamp <= other.timestamp
def __repr__(self):
return "%s: %s" % (self.timestamp, self.function.__name__)
class TimedQueue:
"""Accumulates events as timestamps and functions.
Executes given functions according to the timestamps
Parameters
----------
relative_time : bool, optional
If True, use relative time, by default False
thread_sleep_time : float, optional
Sleep time in seconds for worker thread, by default 0.001
drop_time_threshold : float, optional
Threshold for execution time of events in seconds.
If this is exceeded the event will be dropped, by default 0.5
"""
def __init__(
self,
relative_time: bool = False,
thread_sleep_time: float = 0.001,
drop_time_threshold: float = 0.5,
) -> None:
self.drop_time_thr = drop_time_threshold
self.start = time.time() if relative_time else 0
self.onset_idx = np.empty((0, 2))
self.event_list = []
self.close_event = threading.Event()
self.lock = threading.Lock()
self.thread = threading.Thread(
target=self.__worker, args=(thread_sleep_time, self.close_event)
) # , daemon=True)
self.thread.start()
def close(self) -> None:
"""Closes event processing without waiting for pending events"""
self.close_event.set()
self.thread.join()
def join(self) -> None:
"""Closes event processing after waiting for pending events"""
self.complete()
self.close_event.set()
self.thread.join()
def complete(self) -> None:
"""Blocks until all pending events have completed"""
while self.event_list:
time.sleep(0.01)
def put(
self,
timestamp: float,
function: Callable[..., None],
args: Iterable[Any] = (),
spawn: bool = False,
) -> None:
"""Adds event to queue
Parameters
----------
timestamp : float
Time (POSIX) when event should be executed
function : Callable[..., None]
Function to be executed
args : Iterable[Any], optional
Arguments to be passed to function, by default ()
spawn : bool, optional
if True, create new sub-thread for function, by default False
Raises
------
TypeError
raised if function is not callable
"""
if not callable(function):
raise TypeError("function argument cannot be called")
if not isinstance(args, tuple):
args = (args,)
new_event = Event(timestamp, function, args, spawn)
with self.lock:
self.event_list.append(new_event)
evlen = len(self.event_list)
if not self.onset_idx.any():
idx = 0
else:
idx = np.searchsorted(self.onset_idx[:, 0], timestamp)
self.onset_idx = np.insert(
self.onset_idx, idx, [timestamp, evlen - 1], axis=0
)
def get(self) -> Event:
"""Get latest event from queue and remove event
Returns
-------
Event
Latest event
"""
event = self.peek()
self.pop()
return event
def peek(self) -> Event:
"""Look up latest event from queue
Returns
-------
Event
Latest event
"""
with self.lock:
return self.event_list[int(self.onset_idx[0][1])]
def empty(self) -> bool:
"""Checks if queue is empty
Returns
-------
bool
True if queue if empty
"""
with self.lock:
return bool(self.event_list)
def pop(self) -> None:
"""Removes latest event from queue"""
with self.lock:
event_idx = int(self.onset_idx[0][1])
self.onset_idx = self.onset_idx[1:]
# remove 1 from all idcs after popped event
self.onset_idx[:, 1][self.onset_idx[:, 1] > event_idx] -= 1
del self.event_list[event_idx]
def __worker(self, sleep_time: float, close_event: threading.Event) -> NoReturn:
"""Worker function to process events"""
while True:
if close_event.is_set():
break
if self.event_list:
event = self.peek()
if event.timestamp <= time.time() - self.start:
# execute only if not too old
if event.timestamp > time.time() - self.start - self.drop_time_thr:
event.execute()
self.pop()
# sleep_time = event_list[0].timestamp - (time.time() - self.start) - 0.001
time.sleep(sleep_time)
def __repr__(self):
return f"<TimedQueue {self.event_list.__repr__()}>"
def elapse(self, time_delta: float) -> None:
"""Add time delta to the current queue time.
Parameters
----------
time_delta : float
Additional time
"""
self.start += time_delta
class TimedQueueSC(TimedQueue):
"""Timed queue with OSC communication.
Parameters
----------
server : OSCCommunication, optional
OSC server to handle the bundlers and messsages, by default None
relative_time : bool, optional
If True, use relative time, by default False
thread_sleep_time : float, optional
Sleep time in seconds for worker thread, by default 0.001
"""
def __init__(
self,
server: OSCCommunication = None,
relative_time: bool = False,
thread_sleep_time: float = 0.001,
):
super().__init__(relative_time, thread_sleep_time)
self.server = server or sc3nb.SC.get_default().server
def put_bundler(self, onset: float, bundler: Bundler) -> None:
"""Add a Bundler to queue
Parameters
----------
onset : float
Sending timetag of the Bundler
bundler : Bundler
Bundler that will be sent
"""
self.put(onset, bundler.send)
def put_msg(
self, onset: float, msg: Union[OSCMessage, str], msg_params: Iterable[Any]
) -> None:
"""Add a message to queue
Parameters
----------
onset : float
Sending timetag of the message
msg : Union[OSCMessage, str]
OSCMessage or OSC address
msg_params : Iterable[Any]
If msg is str, this will be the parameters of the created OSCMessage
"""
if isinstance(msg, str):
self.put(onset, self.server.msg, args=(msg, msg_params))
else:
self.put(onset, self.server.send, args=(msg,))
|
[
"threading.Thread",
"numpy.empty",
"numpy.searchsorted",
"sc3nb.SC.get_default",
"time.time",
"threading.Lock",
"time.sleep",
"numpy.insert",
"threading.Event"
] |
[((2418, 2434), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (2426, 2434), True, 'import numpy as np\n'), ((2491, 2508), 'threading.Event', 'threading.Event', ([], {}), '()\n', (2506, 2508), False, 'import threading\n'), ((2530, 2546), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2544, 2546), False, 'import threading\n'), ((2570, 2657), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__worker', 'args': '(thread_sleep_time, self.close_event)'}), '(target=self.__worker, args=(thread_sleep_time, self.\n close_event))\n', (2586, 2657), False, 'import threading\n'), ((959, 1003), 'threading.Thread', 'threading.Thread', ([], {'target': 'function', 'args': 'args'}), '(target=function, args=args)\n', (975, 1003), False, 'import threading\n'), ((2357, 2368), 'time.time', 'time.time', ([], {}), '()\n', (2366, 2368), False, 'import time\n'), ((3202, 3218), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (3212, 3218), False, 'import time\n'), ((4453, 4515), 'numpy.insert', 'np.insert', (['self.onset_idx', 'idx', '[timestamp, evlen - 1]'], {'axis': '(0)'}), '(self.onset_idx, idx, [timestamp, evlen - 1], axis=0)\n', (4462, 4515), True, 'import numpy as np\n'), ((6275, 6297), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (6285, 6297), False, 'import time\n'), ((4375, 4423), 'numpy.searchsorted', 'np.searchsorted', (['self.onset_idx[:, 0]', 'timestamp'], {}), '(self.onset_idx[:, 0], timestamp)\n', (4390, 4423), True, 'import numpy as np\n'), ((7299, 7321), 'sc3nb.SC.get_default', 'sc3nb.SC.get_default', ([], {}), '()\n', (7319, 7321), False, 'import sc3nb\n'), ((5936, 5947), 'time.time', 'time.time', ([], {}), '()\n', (5945, 5947), False, 'import time\n'), ((6053, 6064), 'time.time', 'time.time', ([], {}), '()\n', (6062, 6064), False, 'import time\n')]
|
import os
from ..models import DocumentType
from ..permissions import (
permission_document_properties_edit,
permission_document_type_create, permission_document_type_delete,
permission_document_type_edit, permission_document_type_view,
)
from .base import GenericDocumentViewTestCase
from .literals import (
TEST_DOCUMENT_TYPE_LABEL, TEST_DOCUMENT_TYPE_LABEL_EDITED,
TEST_DOCUMENT_TYPE_QUICK_LABEL, TEST_DOCUMENT_TYPE_QUICK_LABEL_EDITED
)
from .mixins import (
DocumentQuickLabelViewTestMixin,
DocumentTypeDeletionPoliciesViewTestMixin,
DocumentTypeFilenameGeneratorViewTestMixin,
DocumentTypeQuickLabelTestMixin, DocumentTypeQuickLabelViewTestMixin,
DocumentTypeViewTestMixin
)
class DocumentTypeDeletionPoliciesViewTestCase(
DocumentTypeDeletionPoliciesViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def test_document_type_filename_generator_get_view_no_permission(self):
response = self._request_document_type_filename_generator_get_view()
self.assertEqual(response.status_code, 404)
def test_document_type_filename_generator_get_view_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_document_type_filename_generator_get_view()
self.assertEqual(response.status_code, 200)
class DocumentTypeFilenameGeneratorViewTestCase(
DocumentTypeFilenameGeneratorViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def test_document_type_filename_generator_get_view_no_permission(self):
response = self._request_document_type_filename_generator_get_view()
self.assertEqual(response.status_code, 404)
def test_document_type_filename_generator_get_view_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_document_type_filename_generator_get_view()
self.assertEqual(response.status_code, 200)
class DocumentTypeViewsTestCase(
DocumentTypeViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def test_document_type_create_view_no_permission(self):
self.test_document_type.delete()
response = self._request_test_document_type_create_view()
self.assertEqual(response.status_code, 403)
self.assertEqual(DocumentType.objects.count(), 0)
def test_document_type_create_view_with_permission(self):
self.test_document_type.delete()
self.grant_permission(permission=permission_document_type_create)
response = self._request_test_document_type_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(DocumentType.objects.count(), 1)
self.assertEqual(
DocumentType.objects.first().label, TEST_DOCUMENT_TYPE_LABEL
)
def test_document_type_delete_view_no_permission(self):
response = self._request_test_document_type_delete_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(DocumentType.objects.count(), 1)
def test_document_type_delete_view_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_delete
)
response = self._request_test_document_type_delete_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(DocumentType.objects.count(), 0)
def test_document_type_edit_view_no_permission(self):
response = self._request_test_document_type_edit_view()
self.assertEqual(response.status_code, 404)
self.test_document_type.refresh_from_db()
self.assertEqual(
self.test_document_type.label, TEST_DOCUMENT_TYPE_LABEL
)
def test_document_type_edit_view_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_test_document_type_edit_view()
self.assertEqual(response.status_code, 302)
self.test_document_type.refresh_from_db()
self.assertEqual(
self.test_document_type.label, TEST_DOCUMENT_TYPE_LABEL_EDITED
)
def test_document_type_list_view_no_permission(self):
response = self._request_test_document_type_list_view()
self.assertNotContains(
response=response, status_code=200, text=self.test_document_type
)
def test_document_type_list_view_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_view
)
response = self._request_test_document_type_list_view()
self.assertContains(
response=response, status_code=200, text=self.test_document_type
)
class DocumentTypeQuickLabelViewsTestCase(
DocumentTypeQuickLabelTestMixin, DocumentTypeQuickLabelViewTestMixin,
GenericDocumentViewTestCase
):
auto_upload_test_document = False
def test_document_type_quick_label_create_no_permission(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_view
)
response = self._request_quick_label_create()
self.assertEqual(response.status_code, 404)
self.assertEqual(self.test_document_type.filenames.count(), 0)
def test_document_type_quick_label_create_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_quick_label_create()
self.assertEqual(response.status_code, 302)
self.assertEqual(self.test_document_type.filenames.count(), 1)
def test_document_type_quick_label_delete_no_permission(self):
self._create_test_quick_label()
response = self._request_quick_label_delete()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document_type.filenames.count(), 1
)
def test_document_type_quick_label_delete_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
self._create_test_quick_label()
response = self._request_quick_label_delete()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_document_type.filenames.count(), 0
)
def test_document_type_quick_label_edit_no_permission(self):
self._create_test_quick_label()
response = self._request_quick_label_edit()
self.assertEqual(response.status_code, 404)
self.test_document_type_filename.refresh_from_db()
self.assertEqual(
self.test_document_type_filename.filename,
TEST_DOCUMENT_TYPE_QUICK_LABEL
)
def test_document_type_quick_label_edit_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
self._create_test_quick_label()
response = self._request_quick_label_edit()
self.assertEqual(response.status_code, 302)
self.test_document_type_filename.refresh_from_db()
self.assertEqual(
self.test_document_type_filename.filename,
TEST_DOCUMENT_TYPE_QUICK_LABEL_EDITED
)
def test_document_type_quick_label_list_no_permission(self):
self._create_test_quick_label()
response = self._request_quick_label_list_view()
self.assertEqual(response.status_code, 404)
def test_document_type_quick_label_list_with_access(self):
self._create_test_quick_label()
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_view
)
response = self._request_quick_label_list_view()
self.assertContains(
response, status_code=200, text=self.test_document_type_filename
)
class DocumentsQuickLabelViewTestCase(
DocumentQuickLabelViewTestMixin, DocumentTypeQuickLabelTestMixin,
GenericDocumentViewTestCase
):
def test_document_quick_label_no_permission(self):
self._create_test_quick_label()
response = self._request_document_quick_label_edit_view()
self.assertEqual(response.status_code, 404)
def test_document_quick_label_with_access(self):
self._create_test_quick_label()
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
response = self._request_document_quick_label_edit_view()
self.assertEqual(response.status_code, 302)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.label, self.test_document_type_filename.filename
)
def test_document_quick_label_preserve_extension_with_access(self):
self._create_test_quick_label()
self.grant_access(
permission=permission_document_properties_edit, obj=self.test_document
)
filename, extension = os.path.splitext(self.test_document.label)
response = self._request_document_quick_label_edit_view(
extra_data={'preserve_extension': True}
)
self.assertEqual(response.status_code, 302)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.label, '{}{}'.format(
self.test_document_type_filename.filename, extension
)
)
def test_document_quick_label_no_preserve_extension_with_access(self):
self._create_test_quick_label()
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
filename, extension = os.path.splitext(self.test_document.label)
response = self._request_document_quick_label_edit_view(
extra_data={'preserve_extension': False}
)
self.assertEqual(response.status_code, 302)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.label, self.test_document_type_filename.filename
)
|
[
"os.path.splitext"
] |
[((9421, 9463), 'os.path.splitext', 'os.path.splitext', (['self.test_document.label'], {}), '(self.test_document.label)\n', (9437, 9463), False, 'import os\n'), ((10140, 10182), 'os.path.splitext', 'os.path.splitext', (['self.test_document.label'], {}), '(self.test_document.label)\n', (10156, 10182), False, 'import os\n')]
|
"""
Unit and regression test for the kissim.encoding.features.sitealign.SiteAlignFeature class.
"""
from pathlib import Path
import pytest
import numpy as np
import pandas as pd
from opencadd.databases.klifs import setup_local
from kissim.io import PocketBioPython
from kissim.encoding.features import SiteAlignFeature
PATH_TEST_DATA = Path(__name__).parent / "kissim" / "tests" / "data"
LOCAL = setup_local(PATH_TEST_DATA / "KLIFS_download")
class TestsSiteAlignFeature:
"""
Test SiteAlignFeature class methods.
"""
@pytest.mark.parametrize(
"structure_klifs_id, klifs_session, feature_name",
[
(12347, LOCAL, "hba"),
(12347, LOCAL, "hbd"),
(12347, LOCAL, "size"),
(12347, LOCAL, "charge"),
(12347, LOCAL, "aliphatic"),
(12347, LOCAL, "aromatic"),
],
)
def test_from_pocket(self, structure_klifs_id, klifs_session, feature_name):
"""
Test if SiteAlignFeature can be set from a Pocket object.
Test object attribues.
"""
pocket = PocketBioPython.from_structure_klifs_id(
structure_klifs_id, klifs_session=klifs_session
)
feature = SiteAlignFeature.from_pocket(pocket, feature_name)
assert isinstance(feature, SiteAlignFeature)
# Test class attributes
assert feature.name == structure_klifs_id
for residue_id, residue_ix, residue_name, category in zip(
feature._residue_ids, feature._residue_ixs, feature._residue_names, feature._categories
):
if residue_id is not None:
assert isinstance(residue_id, int)
assert isinstance(residue_ix, int)
assert isinstance(feature_name, str)
assert isinstance(category, float)
@pytest.mark.parametrize(
"structure_klifs_id, klifs_session, feature_name",
[(12347, LOCAL, "xxx")],
)
def test_from_pocket_raises(self, structure_klifs_id, klifs_session, feature_name):
"""
Test if SiteAlignFeature raises error when passed an invalid feature name.
"""
with pytest.raises(KeyError):
pocket = PocketBioPython.from_structure_klifs_id(
structure_klifs_id, klifs_session=klifs_session
)
SiteAlignFeature.from_pocket(pocket, feature_name)
@pytest.mark.parametrize(
"structure_klifs_id, klifs_session",
[(12347, LOCAL)],
)
def test_values(self, structure_klifs_id, klifs_session):
"""
Test class property: values.
"""
pocket = PocketBioPython.from_structure_klifs_id(
structure_klifs_id, klifs_session=klifs_session
)
# Use example feature type
feature = SiteAlignFeature.from_pocket(pocket, feature_name="hba")
assert isinstance(feature.values, list)
for value in feature.values:
assert isinstance(value, float)
@pytest.mark.parametrize(
"structure_klifs_id, klifs_session",
[(12347, LOCAL)],
)
def test_details(self, structure_klifs_id, klifs_session):
"""
Test class property: details.
"""
pocket = PocketBioPython.from_structure_klifs_id(
structure_klifs_id, klifs_session=klifs_session
)
# Use example feature type
feature = SiteAlignFeature.from_pocket(pocket, feature_name="hba")
assert isinstance(feature.details, pd.DataFrame)
assert feature.details.columns.to_list() == [
"residue.id",
"residue.name",
"sitealign.category",
]
@pytest.mark.parametrize(
"residue_name, feature_name, value",
[
("ALA", "size", 1.0), # Size
("ASN", "size", 2.0),
("ARG", "size", 3.0),
("PTR", "size", 3.0), # Converted non-standard
("MSE", "size", 2.0), # Converted non-standard
("XXX", "size", np.nan), # Non-convertable non-standard
("ALA", "hbd", 0.0),
("ASN", "hbd", 1.0),
("ARG", "hbd", 3.0),
("XXX", "hbd", np.nan),
("ALA", "hba", 0.0),
("ASN", "hba", 1.0),
("ASP", "hba", 2.0),
("XXX", "hba", np.nan),
("ALA", "charge", 0.0),
("ARG", "charge", 1.0),
("ASP", "charge", -1.0),
("XXX", "charge", np.nan),
("ALA", "aromatic", 0.0),
("HIS", "aromatic", 1.0),
("XXX", "aromatic", np.nan),
("ARG", "aliphatic", 0.0),
("ALA", "aliphatic", 1.0),
("XXX", "aliphatic", np.nan),
],
)
def test_residue_to_value(self, residue_name, feature_name, value):
"""
Test function for retrieval of residue's size and pharmacophoric features
(i.e. number of hydrogen bond donor,
hydrogen bond acceptors, charge features, aromatic features or aliphatic features )
Parameters
----------
residue_name : str
Three-letter code for residue.
feature_name : str
Feature type name.
value : float or None
Feature value.
"""
feature = SiteAlignFeature()
# Call feature from residue function
value_calculated = feature._residue_to_value(residue_name, feature_name)
if value_calculated: # If not None
assert isinstance(value_calculated, float)
# Note: Cannot use == to compare np.nan values
if np.isnan(value):
assert np.isnan(value_calculated)
else:
assert value_calculated == value
@pytest.mark.parametrize(
"feature_name",
[("XXX"), (1)],
)
def test_raise_invalid_feature_name(self, feature_name):
"""
Test if KeyError is raised if user passes an incorrect SiteAlign feature string.
"""
feature = SiteAlignFeature()
with pytest.raises(KeyError):
feature._raise_invalid_feature_name(feature_name)
@pytest.mark.parametrize(
"residue_name, residue_name_converted",
[
("MSE", "MET"),
("ALA", None),
("XXX", None),
],
)
def test_convert_modified_residue(self, residue_name, residue_name_converted):
"""
Test if modified residues are converted into standard residues correctly.
If conversion is not possible, test if None is returned.
"""
feature = SiteAlignFeature()
assert feature._convert_modified_residue(residue_name) == residue_name_converted
|
[
"kissim.io.PocketBioPython.from_structure_klifs_id",
"opencadd.databases.klifs.setup_local",
"numpy.isnan",
"pytest.raises",
"pathlib.Path",
"kissim.encoding.features.SiteAlignFeature",
"kissim.encoding.features.SiteAlignFeature.from_pocket",
"pytest.mark.parametrize"
] |
[((400, 446), 'opencadd.databases.klifs.setup_local', 'setup_local', (["(PATH_TEST_DATA / 'KLIFS_download')"], {}), "(PATH_TEST_DATA / 'KLIFS_download')\n", (411, 446), False, 'from opencadd.databases.klifs import setup_local\n'), ((541, 782), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""structure_klifs_id, klifs_session, feature_name"""', "[(12347, LOCAL, 'hba'), (12347, LOCAL, 'hbd'), (12347, LOCAL, 'size'), (\n 12347, LOCAL, 'charge'), (12347, LOCAL, 'aliphatic'), (12347, LOCAL,\n 'aromatic')]"], {}), "('structure_klifs_id, klifs_session, feature_name',\n [(12347, LOCAL, 'hba'), (12347, LOCAL, 'hbd'), (12347, LOCAL, 'size'),\n (12347, LOCAL, 'charge'), (12347, LOCAL, 'aliphatic'), (12347, LOCAL,\n 'aromatic')])\n", (564, 782), False, 'import pytest\n'), ((1829, 1932), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""structure_klifs_id, klifs_session, feature_name"""', "[(12347, LOCAL, 'xxx')]"], {}), "('structure_klifs_id, klifs_session, feature_name',\n [(12347, LOCAL, 'xxx')])\n", (1852, 1932), False, 'import pytest\n'), ((2394, 2472), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""structure_klifs_id, klifs_session"""', '[(12347, LOCAL)]'], {}), "('structure_klifs_id, klifs_session', [(12347, LOCAL)])\n", (2417, 2472), False, 'import pytest\n'), ((2993, 3071), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""structure_klifs_id, klifs_session"""', '[(12347, LOCAL)]'], {}), "('structure_klifs_id, klifs_session', [(12347, LOCAL)])\n", (3016, 3071), False, 'import pytest\n'), ((3674, 4347), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""residue_name, feature_name, value"""', "[('ALA', 'size', 1.0), ('ASN', 'size', 2.0), ('ARG', 'size', 3.0), ('PTR',\n 'size', 3.0), ('MSE', 'size', 2.0), ('XXX', 'size', np.nan), ('ALA',\n 'hbd', 0.0), ('ASN', 'hbd', 1.0), ('ARG', 'hbd', 3.0), ('XXX', 'hbd',\n np.nan), ('ALA', 'hba', 0.0), ('ASN', 'hba', 1.0), ('ASP', 'hba', 2.0),\n ('XXX', 'hba', np.nan), ('ALA', 'charge', 0.0), ('ARG', 'charge', 1.0),\n ('ASP', 'charge', -1.0), ('XXX', 'charge', np.nan), ('ALA', 'aromatic',\n 0.0), ('HIS', 'aromatic', 1.0), ('XXX', 'aromatic', np.nan), ('ARG',\n 'aliphatic', 0.0), ('ALA', 'aliphatic', 1.0), ('XXX', 'aliphatic', np.nan)]"], {}), "('residue_name, feature_name, value', [('ALA',\n 'size', 1.0), ('ASN', 'size', 2.0), ('ARG', 'size', 3.0), ('PTR',\n 'size', 3.0), ('MSE', 'size', 2.0), ('XXX', 'size', np.nan), ('ALA',\n 'hbd', 0.0), ('ASN', 'hbd', 1.0), ('ARG', 'hbd', 3.0), ('XXX', 'hbd',\n np.nan), ('ALA', 'hba', 0.0), ('ASN', 'hba', 1.0), ('ASP', 'hba', 2.0),\n ('XXX', 'hba', np.nan), ('ALA', 'charge', 0.0), ('ARG', 'charge', 1.0),\n ('ASP', 'charge', -1.0), ('XXX', 'charge', np.nan), ('ALA', 'aromatic',\n 0.0), ('HIS', 'aromatic', 1.0), ('XXX', 'aromatic', np.nan), ('ARG',\n 'aliphatic', 0.0), ('ALA', 'aliphatic', 1.0), ('XXX', 'aliphatic', np.nan)]\n )\n", (3697, 4347), False, 'import pytest\n'), ((5722, 5773), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""feature_name"""', "['XXX', 1]"], {}), "('feature_name', ['XXX', 1])\n", (5745, 5773), False, 'import pytest\n'), ((6120, 6235), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""residue_name, residue_name_converted"""', "[('MSE', 'MET'), ('ALA', None), ('XXX', None)]"], {}), "('residue_name, residue_name_converted', [('MSE',\n 'MET'), ('ALA', None), ('XXX', None)])\n", (6143, 6235), False, 'import pytest\n'), ((1096, 1189), 'kissim.io.PocketBioPython.from_structure_klifs_id', 'PocketBioPython.from_structure_klifs_id', (['structure_klifs_id'], {'klifs_session': 'klifs_session'}), '(structure_klifs_id, klifs_session=\n klifs_session)\n', (1135, 1189), False, 'from kissim.io import PocketBioPython\n'), ((1225, 1275), 'kissim.encoding.features.SiteAlignFeature.from_pocket', 'SiteAlignFeature.from_pocket', (['pocket', 'feature_name'], {}), '(pocket, feature_name)\n', (1253, 1275), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((2636, 2729), 'kissim.io.PocketBioPython.from_structure_klifs_id', 'PocketBioPython.from_structure_klifs_id', (['structure_klifs_id'], {'klifs_session': 'klifs_session'}), '(structure_klifs_id, klifs_session=\n klifs_session)\n', (2675, 2729), False, 'from kissim.io import PocketBioPython\n'), ((2800, 2856), 'kissim.encoding.features.SiteAlignFeature.from_pocket', 'SiteAlignFeature.from_pocket', (['pocket'], {'feature_name': '"""hba"""'}), "(pocket, feature_name='hba')\n", (2828, 2856), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((3237, 3330), 'kissim.io.PocketBioPython.from_structure_klifs_id', 'PocketBioPython.from_structure_klifs_id', (['structure_klifs_id'], {'klifs_session': 'klifs_session'}), '(structure_klifs_id, klifs_session=\n klifs_session)\n', (3276, 3330), False, 'from kissim.io import PocketBioPython\n'), ((3401, 3457), 'kissim.encoding.features.SiteAlignFeature.from_pocket', 'SiteAlignFeature.from_pocket', (['pocket'], {'feature_name': '"""hba"""'}), "(pocket, feature_name='hba')\n", (3429, 3457), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((5283, 5301), 'kissim.encoding.features.SiteAlignFeature', 'SiteAlignFeature', ([], {}), '()\n', (5299, 5301), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((5594, 5609), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (5602, 5609), True, 'import numpy as np\n'), ((5994, 6012), 'kissim.encoding.features.SiteAlignFeature', 'SiteAlignFeature', ([], {}), '()\n', (6010, 6012), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((6575, 6593), 'kissim.encoding.features.SiteAlignFeature', 'SiteAlignFeature', ([], {}), '()\n', (6591, 6593), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((2160, 2183), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2173, 2183), False, 'import pytest\n'), ((2206, 2299), 'kissim.io.PocketBioPython.from_structure_klifs_id', 'PocketBioPython.from_structure_klifs_id', (['structure_klifs_id'], {'klifs_session': 'klifs_session'}), '(structure_klifs_id, klifs_session=\n klifs_session)\n', (2245, 2299), False, 'from kissim.io import PocketBioPython\n'), ((2337, 2387), 'kissim.encoding.features.SiteAlignFeature.from_pocket', 'SiteAlignFeature.from_pocket', (['pocket', 'feature_name'], {}), '(pocket, feature_name)\n', (2365, 2387), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((5630, 5656), 'numpy.isnan', 'np.isnan', (['value_calculated'], {}), '(value_calculated)\n', (5638, 5656), True, 'import numpy as np\n'), ((6027, 6050), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (6040, 6050), False, 'import pytest\n'), ((340, 354), 'pathlib.Path', 'Path', (['__name__'], {}), '(__name__)\n', (344, 354), False, 'from pathlib import Path\n')]
|
from zeit.vgwort.token import _order_tokens
import transaction
import unittest
import zeit.vgwort.interfaces
import zeit.vgwort.testing
import zope.component
class TokenStorageTest(zeit.vgwort.testing.EndToEndTestCase):
def order(self, amount):
ts = zope.component.getUtility(zeit.vgwort.interfaces.ITokens)
try:
ts.order(amount)
except zeit.vgwort.interfaces.TechnicalError:
self.skipTest('vgwort test system down')
def test_order_tokens(self):
ts = zope.component.getUtility(zeit.vgwort.interfaces.ITokens)
self.assertEqual(0, len(ts))
self.order(2)
self.assertEqual(2, len(ts))
def test_order_should_add_str(self):
ts = zope.component.getUtility(zeit.vgwort.interfaces.ITokens)
self.order(1)
self.assertTrue(isinstance(ts._data[0][0], str))
self.assertTrue(isinstance(ts._data[0][0], str))
class OrderTokensTest(zeit.vgwort.testing.TestCase):
def test_enough_tokens_should_not_order(self):
ts = zope.component.getUtility(
zeit.vgwort.interfaces.ITokens)
ts.order(20)
self.assertEqual(20, len(ts))
_order_tokens()
self.assertEqual(20, len(ts))
def test_insufficient_tokens_should_order_new(self):
ts = zope.component.getUtility(
zeit.vgwort.interfaces.ITokens)
self.assertEqual(0, len(ts))
_order_tokens()
self.assertEqual(1, len(ts))
class TokenTransactionTest(zeit.vgwort.testing.TestCase):
layer = zeit.vgwort.testing.XMLRPC_LAYER
def test_error_during_publish_still_marks_token_as_claimed(self):
tokens = zope.component.getUtility(zeit.vgwort.interfaces.ITokens)
tokens.order(1)
self.assertEqual(1, len(tokens))
transaction.commit()
tokens.claim_immediately()
# if an error occurs during publishing, the transaction will be aborted
transaction.abort()
self.assertEqual(0, len(tokens))
class ObjectCopyTest(zeit.vgwort.testing.TestCase):
def test_copying_should_removes_vgwort_properties_from_copy(self):
import datetime
import pytz
import zeit.cms.interfaces
import zeit.vgwort.interfaces
content = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/testcontent')
token = zeit.vgwort.interfaces.IToken(content)
token.public_token = u'<PASSWORD>'
token.private_token = u'<PASSWORD>'
info = zeit.vgwort.interfaces.IReportInfo(content)
info.reported_on = datetime.datetime.now(pytz.UTC)
info.reported_error = u'error'
online = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/online/')
zope.copypastemove.interfaces.IObjectCopier(content).copyTo(
online, 'foo')
copy = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/online/foo')
token = zeit.vgwort.interfaces.IToken(copy)
self.assertEqual(None, token.public_token)
self.assertEqual(None, token.private_token)
info = zeit.vgwort.interfaces.IReportInfo(copy)
self.assertEqual(None, info.reported_on)
self.assertEqual(None, info.reported_error)
class SecurityObjectCopyTest(zeit.vgwort.testing.BrowserTestCase):
def test_copying_should_work_even_with_security_on(self):
# see #9960
self.browser.handleErrors = False
self.assertNothingRaised(
self.browser.open,
'http://localhost/++skin++vivi/repository/online/@@copy?unique_id='
'http%3A%2F%2Fxml.zeit.de%2Fonline%2F2007%2F01%2FSomalia')
class TokenServiceTest(unittest.TestCase):
def test_should_be_initializable_without_config(self):
from zeit.vgwort.token import TokenService
TokenService()
|
[
"transaction.commit",
"zeit.vgwort.token._order_tokens",
"zeit.vgwort.token.TokenService",
"transaction.abort",
"datetime.datetime.now"
] |
[((1180, 1195), 'zeit.vgwort.token._order_tokens', '_order_tokens', ([], {}), '()\n', (1193, 1195), False, 'from zeit.vgwort.token import _order_tokens\n'), ((1421, 1436), 'zeit.vgwort.token._order_tokens', '_order_tokens', ([], {}), '()\n', (1434, 1436), False, 'from zeit.vgwort.token import _order_tokens\n'), ((1799, 1819), 'transaction.commit', 'transaction.commit', ([], {}), '()\n', (1817, 1819), False, 'import transaction\n'), ((1945, 1964), 'transaction.abort', 'transaction.abort', ([], {}), '()\n', (1962, 1964), False, 'import transaction\n'), ((2574, 2605), 'datetime.datetime.now', 'datetime.datetime.now', (['pytz.UTC'], {}), '(pytz.UTC)\n', (2595, 2605), False, 'import datetime\n'), ((3812, 3826), 'zeit.vgwort.token.TokenService', 'TokenService', ([], {}), '()\n', (3824, 3826), False, 'from zeit.vgwort.token import TokenService\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('computing', '0009_auto_20141128_1121'),
]
operations = [
migrations.RenameField(
model_name='computer',
old_name='warranty_type',
new_name='warranty',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((253, 349), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""computer"""', 'old_name': '"""warranty_type"""', 'new_name': '"""warranty"""'}), "(model_name='computer', old_name='warranty_type',\n new_name='warranty')\n", (275, 349), False, 'from django.db import models, migrations\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetKeysResult',
'AwaitableGetKeysResult',
'get_keys',
'get_keys_output',
]
@pulumi.output_type
class GetKeysResult:
"""
A collection of values returned by getKeys.
"""
def __init__(__self__, id=None, key_signing_keys=None, managed_zone=None, project=None, zone_signing_keys=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key_signing_keys and not isinstance(key_signing_keys, list):
raise TypeError("Expected argument 'key_signing_keys' to be a list")
pulumi.set(__self__, "key_signing_keys", key_signing_keys)
if managed_zone and not isinstance(managed_zone, str):
raise TypeError("Expected argument 'managed_zone' to be a str")
pulumi.set(__self__, "managed_zone", managed_zone)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
if zone_signing_keys and not isinstance(zone_signing_keys, list):
raise TypeError("Expected argument 'zone_signing_keys' to be a list")
pulumi.set(__self__, "zone_signing_keys", zone_signing_keys)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keySigningKeys")
def key_signing_keys(self) -> Sequence['outputs.GetKeysKeySigningKeyResult']:
"""
A list of Key-signing key (KSK) records. Structure is documented below. Additionally, the DS record is provided:
"""
return pulumi.get(self, "key_signing_keys")
@property
@pulumi.getter(name="managedZone")
def managed_zone(self) -> str:
return pulumi.get(self, "managed_zone")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
@property
@pulumi.getter(name="zoneSigningKeys")
def zone_signing_keys(self) -> Sequence['outputs.GetKeysZoneSigningKeyResult']:
"""
A list of Zone-signing key (ZSK) records. Structure is documented below.
"""
return pulumi.get(self, "zone_signing_keys")
class AwaitableGetKeysResult(GetKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKeysResult(
id=self.id,
key_signing_keys=self.key_signing_keys,
managed_zone=self.managed_zone,
project=self.project,
zone_signing_keys=self.zone_signing_keys)
def get_keys(managed_zone: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKeysResult:
"""
Get the DNSKEY and DS records of DNSSEC-signed managed zones. For more information see the
[official documentation](https://cloud.google.com/dns/docs/dnskeys/)
and [API](https://cloud.google.com/dns/docs/reference/v1/dnsKeys).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
foo = gcp.dns.ManagedZone("foo",
dns_name="foo.bar.",
dnssec_config=gcp.dns.ManagedZoneDnssecConfigArgs(
state="on",
non_existence="nsec3",
))
foo_dns_keys = foo.id.apply(lambda id: gcp.dns.get_keys(managed_zone=id))
pulumi.export("fooDnsDsRecord", foo_dns_keys.key_signing_keys[0].ds_record)
```
:param str managed_zone: The name or id of the Cloud DNS managed zone.
:param str project: The ID of the project in which the resource belongs. If `project` is not provided, the provider project is used.
"""
__args__ = dict()
__args__['managedZone'] = managed_zone
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:dns/getKeys:getKeys', __args__, opts=opts, typ=GetKeysResult).value
return AwaitableGetKeysResult(
id=__ret__.id,
key_signing_keys=__ret__.key_signing_keys,
managed_zone=__ret__.managed_zone,
project=__ret__.project,
zone_signing_keys=__ret__.zone_signing_keys)
@_utilities.lift_output_func(get_keys)
def get_keys_output(managed_zone: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetKeysResult]:
"""
Get the DNSKEY and DS records of DNSSEC-signed managed zones. For more information see the
[official documentation](https://cloud.google.com/dns/docs/dnskeys/)
and [API](https://cloud.google.com/dns/docs/reference/v1/dnsKeys).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
foo = gcp.dns.ManagedZone("foo",
dns_name="foo.bar.",
dnssec_config=gcp.dns.ManagedZoneDnssecConfigArgs(
state="on",
non_existence="nsec3",
))
foo_dns_keys = foo.id.apply(lambda id: gcp.dns.get_keys(managed_zone=id))
pulumi.export("fooDnsDsRecord", foo_dns_keys.key_signing_keys[0].ds_record)
```
:param str managed_zone: The name or id of the Cloud DNS managed zone.
:param str project: The ID of the project in which the resource belongs. If `project` is not provided, the provider project is used.
"""
...
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set",
"pulumi.InvokeOptions",
"pulumi.runtime.invoke"
] |
[((1854, 1890), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""keySigningKeys"""'}), "(name='keySigningKeys')\n", (1867, 1890), False, 'import pulumi\n'), ((2190, 2223), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""managedZone"""'}), "(name='managedZone')\n", (2203, 2223), False, 'import pulumi\n'), ((2434, 2471), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""zoneSigningKeys"""'}), "(name='zoneSigningKeys')\n", (2447, 2471), False, 'import pulumi\n'), ((799, 829), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (809, 829), False, 'import pulumi\n'), ((991, 1049), 'pulumi.set', 'pulumi.set', (['__self__', '"""key_signing_keys"""', 'key_signing_keys'], {}), "(__self__, 'key_signing_keys', key_signing_keys)\n", (1001, 1049), False, 'import pulumi\n'), ((1197, 1247), 'pulumi.set', 'pulumi.set', (['__self__', '"""managed_zone"""', 'managed_zone'], {}), "(__self__, 'managed_zone', managed_zone)\n", (1207, 1247), False, 'import pulumi\n'), ((1380, 1420), 'pulumi.set', 'pulumi.set', (['__self__', '"""project"""', 'project'], {}), "(__self__, 'project', project)\n", (1390, 1420), False, 'import pulumi\n'), ((1585, 1645), 'pulumi.set', 'pulumi.set', (['__self__', '"""zone_signing_keys"""', 'zone_signing_keys'], {}), "(__self__, 'zone_signing_keys', zone_signing_keys)\n", (1595, 1645), False, 'import pulumi\n'), ((1811, 1833), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (1821, 1833), False, 'import pulumi\n'), ((2133, 2169), 'pulumi.get', 'pulumi.get', (['self', '"""key_signing_keys"""'], {}), "(self, 'key_signing_keys')\n", (2143, 2169), False, 'import pulumi\n'), ((2274, 2306), 'pulumi.get', 'pulumi.get', (['self', '"""managed_zone"""'], {}), "(self, 'managed_zone')\n", (2284, 2306), False, 'import pulumi\n'), ((2386, 2413), 'pulumi.get', 'pulumi.get', (['self', '"""project"""'], {}), "(self, 'project')\n", (2396, 2413), False, 'import pulumi\n'), ((2676, 2713), 'pulumi.get', 'pulumi.get', (['self', '"""zone_signing_keys"""'], {}), "(self, 'zone_signing_keys')\n", (2686, 2713), False, 'import pulumi\n'), ((4337, 4359), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (4357, 4359), False, 'import pulumi\n'), ((4451, 4544), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""gcp:dns/getKeys:getKeys"""', '__args__'], {'opts': 'opts', 'typ': 'GetKeysResult'}), "('gcp:dns/getKeys:getKeys', __args__, opts=opts, typ=\n GetKeysResult)\n", (4472, 4544), False, 'import pulumi\n')]
|
import logging
from dropbox.client import DropboxClient # Dropobox official library
from ..redislist import RedisDropboxDownloadList, RedisDropboxIndexList
from .dropboxfile import DropboxFile
log = logging.getLogger('dropbox')
class DropboxDownloader:
"""
Download files from Dropbox based on a list previously built by the `DropboxCrawler` and
stored internally.
Parameters:
bearertoken_id -- the id of the `BearToken` owner of the Dropbox account.
access_token -- the access token of the `BearToken` owner of the Dropbox account.
"""
def __init__(self, bearertoken_id, access_token):
self.bearertoken_id = bearertoken_id
self.access_token = access_token
@property
def _client(self):
"""
A `dropbox.DropboxClient` for the current `bearertoken`.
It is a cached attribute so that it is a singleton.
"""
try:
cl = self._client_cached
except AttributeError:
cl = self._client_cached = DropboxClient(self.access_token)
return cl
def run(self):
print("Downloading for bearerid: ", self.bearertoken_id)
redis_dw = RedisDropboxDownloadList(self.bearertoken_id)
redis_ix = RedisDropboxIndexList(self.bearertoken_id)
for redis_entry in redis_dw.iterate():
# `redis_entry` is a `RedisDropboxEntry` instance.
# If:
# - `redis_entry.is_del()`: move the entry to the index list
# - `redis_entry.is_reset()`: move the entry to the index list
# - `redis_entry.is_add()`: download the file locally, update
# `redis_entry.remote_path` with the local file name, move the entry to the
# index list
#
# Bear in mind that:
# - entries with `redis_entry.is_add()` are only files (no dirs cause they have
# already been filtered out)
# - entries with `redis_entry.is_del()`: we don't know if they are files or dir
# but we don't care since during indexing we ask Solr to delete: name and name/*
# And a sanity check is run when creating a `RedisDropboxEntry` instance.
# TODO
print(redis_entry.operation, redis_entry.remote_path)
if redis_entry.is_add():
# Download the file. We could use client.get_file or client.get_file_and_metadata,
# but under the hood the actual call to the API is the same, cause that basic API
# call returns the file plus its metadata.
log.debug('Downloading: {}'.format(redis_entry.remote_path))
content, metadata = self._client.get_file_and_metadata(redis_entry.remote_path)
file = DropboxFile(content, metadata)
file.store_to_disk(self.bearertoken_id)
# Update `remote_path` attribute with the local name
redis_entry.local_name = file.local_name
redis_ix.buffer(redis_entry)
redis_ix.flush_buffer()
|
[
"dropbox.client.DropboxClient",
"logging.getLogger"
] |
[((203, 231), 'logging.getLogger', 'logging.getLogger', (['"""dropbox"""'], {}), "('dropbox')\n", (220, 231), False, 'import logging\n'), ((1020, 1052), 'dropbox.client.DropboxClient', 'DropboxClient', (['self.access_token'], {}), '(self.access_token)\n', (1033, 1052), False, 'from dropbox.client import DropboxClient\n')]
|
import pandas as pd
import numpy as np
import Levenshtein
import random
random.seed(12345)
d = pd.read_csv("../data/asjp19wide.csv", index_col=0)
words = d.values[~d.isnull()]
words = np.concatenate([w.split('-') for w in words])
tests = pd.DataFrame(columns=['word1', 'word2', 'LD'])
for i in range(1000):
if i % 100 == 0:
print(i)
w1, w2 = random.sample(list(words), 2)
tests.loc[i] = [w1, w2, Levenshtein.distance(w1, w2)]
tests.to_csv('levenshteinTests.csv', index=False)
|
[
"pandas.read_csv",
"Levenshtein.distance",
"random.seed",
"pandas.DataFrame"
] |
[((73, 91), 'random.seed', 'random.seed', (['(12345)'], {}), '(12345)\n', (84, 91), False, 'import random\n'), ((97, 147), 'pandas.read_csv', 'pd.read_csv', (['"""../data/asjp19wide.csv"""'], {'index_col': '(0)'}), "('../data/asjp19wide.csv', index_col=0)\n", (108, 147), True, 'import pandas as pd\n'), ((243, 289), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['word1', 'word2', 'LD']"}), "(columns=['word1', 'word2', 'LD'])\n", (255, 289), True, 'import pandas as pd\n'), ((423, 451), 'Levenshtein.distance', 'Levenshtein.distance', (['w1', 'w2'], {}), '(w1, w2)\n', (443, 451), False, 'import Levenshtein\n')]
|
"""Test for the snakemake workflow distributed with region_set_profiler"""
import json
import subprocess
import os
import pandas as pd
import numpy as np
tmpdir = "/icgc/dkfzlsdf/analysis/hs_ontogeny/temp"
# TODO: gtfanno result has weird index
gtfanno_result: pd.DataFrame = pd.read_pickle(
"/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_primary-annotations.p"
)
# all_regions_annotated = pd.read_pickle('/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_all-annotations.p')
# all_regions_annotated.loc[all_regions_annotated.feat_class == 'intergenic', 'feature_rank'] = 'primary'
# gtfanno_result_temp = '/home/kraemers/temp/gtfanno-temp.p'
# primary_annotations.to_pickle(gtfanno_result_temp)
# gtfanno_result = primary_annotations
gene_annos = gtfanno_result.groupby(["Chromosome", "Start", "End", "gtfanno_uid"])[
"gene_name"
].aggregate(lambda ser: ser.str.cat(sep=","))
assert (
gene_annos.index.get_level_values("gtfanno_uid") == np.arange(gene_annos.shape[0])
).all()
gene_annos.index = gene_annos.index.droplevel(3)
clustered_gene_anno_fp = tmpdir + "clustered-gene-annos.p"
gene_annos.to_pickle(clustered_gene_anno_fp)
# Code to merge DMRs which are closer than merging_distance bp
# This should be moved elsewhere
# merging could also be achieved with pyranges:
# 1. slop all intervals with merging_distance on both sides
# 2. Cluster all intervals
# 3. Use the clustered intervals to find groups of intervals within the clustered intervals and compute the group annotations
merging_distance = 500
gtfanno_result = gtfanno_result.query('feat_class == "Promoter"')
distance_to_next_region = (
gtfanno_result.Start.iloc[1:].values - gtfanno_result.End.iloc[0:-1].values
)
# we iterate over the regions
# whenever the distance to the next region is > merging_distance, we begin a new cluster of regions
# In vectorized form:
region_cluster_ids = np.concatenate(
[[1], 1 + np.cumsum(distance_to_next_region > merging_distance)], axis=0
)
# Compress to gene anno series for the merged DMRs
gene_annos = gtfanno_result.groupby(region_cluster_ids)["gene_name"].apply(
lambda ser: ser.str.cat(sep=",")
)
gene_annos.to_pickle(clustered_gene_anno_fp)
gtfanno_result["gene_name"].to_pickle(clustered_gene_anno_fp)
config = {
"tasks": {
"cluster_ids": {
"no-basos/beta-value_zscores/metric-euclidean/linkage-ward/enrichments/min-gap_0.25": (
"min-gap_0.25",
"/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/clustering/full-hierarchy/method-selection/no-basos/beta-value_zscores/metric-euclidean/linkage-ward/cutree-all.p",
),
# 'no-basos/beta-value_zscores/metric-euclidean/linkage-ward/enrichments/min-gap_0.12': ('min-gap_0.12',
# '/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/clustering/full-hierarchy/method-selection/no-basos/beta-value_zscores/metric-euclidean/linkage-ward/cutree-all.p')
},
"metadata_tables": {
"codex": "/icgc/dkfzlsdf/analysis/hs_ontogeny/databases/enrichment_databases/lola_chipseq_2018-04-12/mm10/codex/regions/codex_annotations.csv",
"msigdb_canonical_pathways": "/icgc/dkfzlsdf/analysis/hs_ontogeny/databases/region_set_profiler_databases/msigdb_gmts/canonical-pathways.gmt",
},
"gene_annotations": {"promoters_500-bp-clusters": clustered_gene_anno_fp},
},
"output_dir": "/icgc/dkfzlsdf/analysis/hs_ontogeny/temp/rsp-tests",
"tmpdir": tmpdir,
"chromosomes": [
"1",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
],
}
config_fp = os.path.expanduser("~/temp/rsp-config.json")
with open(config_fp, "w") as fout:
json.dump(config, fout)
subprocess.run(
f"""
snakemake \
--snakefile {os.path.expanduser('~/projects/region_set_profiler/src/region_set_profiler/region_set_profiler.smk')} \
--configfile {config_fp} \
--cores 24 \
--keep-going \
--forcerun /icgc/dkfzlsdf/analysis/hs_ontogeny/temp/rsp-tests/no-basos/beta-value_zscores/metric-euclidean/linkage-ward/enrichments/min-gap_0.25/msigdb_canonical_pathways:promoters_500-bp-clusters/msigdb_canonical_pathways:promoters_500-bp-clusters.done
""",
shell=True,
executable="/bin/bash",
)
# --dryrun \
|
[
"json.dump",
"numpy.cumsum",
"numpy.arange",
"pandas.read_pickle",
"os.path.expanduser"
] |
[((279, 460), 'pandas.read_pickle', 'pd.read_pickle', (['"""/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_primary-annotations.p"""'], {}), "(\n '/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_primary-annotations.p'\n )\n", (293, 460), True, 'import pandas as pd\n'), ((4090, 4134), 'os.path.expanduser', 'os.path.expanduser', (['"""~/temp/rsp-config.json"""'], {}), "('~/temp/rsp-config.json')\n", (4108, 4134), False, 'import os\n'), ((4174, 4197), 'json.dump', 'json.dump', (['config', 'fout'], {}), '(config, fout)\n', (4183, 4197), False, 'import json\n'), ((1122, 1152), 'numpy.arange', 'np.arange', (['gene_annos.shape[0]'], {}), '(gene_annos.shape[0])\n', (1131, 1152), True, 'import numpy as np\n'), ((2074, 2127), 'numpy.cumsum', 'np.cumsum', (['(distance_to_next_region > merging_distance)'], {}), '(distance_to_next_region > merging_distance)\n', (2083, 2127), True, 'import numpy as np\n'), ((4256, 4366), 'os.path.expanduser', 'os.path.expanduser', (['"""~/projects/region_set_profiler/src/region_set_profiler/region_set_profiler.smk"""'], {}), "(\n '~/projects/region_set_profiler/src/region_set_profiler/region_set_profiler.smk'\n )\n", (4274, 4366), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import functools
from django.forms.utils import ErrorList
from .readonly import read_only_mode, ReadOnlyError
from .signals import send_post_commit, send_post_rollback, send_pre_commit
def full_clean_if_not_read_only(full_clean):
"""Decorator for preventing form submissions while in read-only mode."""
def wrapper(self):
full_clean(self)
if read_only_mode:
if '__all__' not in self._errors:
self._errors['__all__'] = ErrorList()
self._errors.get('__all__').insert(0, ReadOnlyError.message)
if hasattr(self, 'cleaned_data'):
delattr(self, 'cleaned_data')
return wrapper
def wrap(before=None, after=None, condition=lambda *args, **kwargs: True):
"""
A helper for creating decorators.
Runs a "before" function before the decorated function, and an "after"
function afterwards. The condition check is performed once before
the decorated function.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
yes = condition(*args, **kwargs)
if yes and before:
before()
result = func(*args, **kwargs)
if yes and after:
after()
return result
return wrapped
return decorator
def wrap_before(before, condition=lambda *args, **kwargs: True):
"""
A helper for creating decorators.
Runs a "before" function before the decorated function. The condition
check is performed before the decorated function is called.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if condition(*args, **kwargs):
before()
return func(*args, **kwargs)
return wrapped
return decorator
def wrap_after(after, condition=lambda *args, **kwargs: True):
"""
A helper for creating decorators.
Runs an "after" function after the decorated function. The condition
check is performed after the decorated function is called.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
result = func(*args, **kwargs)
if condition(*args, **kwargs):
after()
return result
return wrapped
return decorator
commit = wrap(
before=send_pre_commit,
after=send_post_commit,
)
rollback = wrap_after(
after=send_post_rollback,
)
|
[
"django.forms.utils.ErrorList",
"functools.wraps"
] |
[((1036, 1057), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1051, 1057), False, 'import functools\n'), ((1659, 1680), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1674, 1680), False, 'import functools\n'), ((2163, 2184), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (2178, 2184), False, 'import functools\n'), ((499, 510), 'django.forms.utils.ErrorList', 'ErrorList', ([], {}), '()\n', (508, 510), False, 'from django.forms.utils import ErrorList\n')]
|
import sys
sys.path.append("../../")
import unittest
import paddle
import numpy as np
from paddleslim import UnstructuredPruner
from paddle.vision.models import mobilenet_v1
class TestUnstructuredPruner(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestUnstructuredPruner, self).__init__(*args, **kwargs)
paddle.disable_static()
self._gen_model()
def _gen_model(self):
self.net = mobilenet_v1(num_classes=10, pretrained=False)
self.pruner = UnstructuredPruner(
self.net, mode='ratio', ratio=0.98, threshold=0.0)
def test_prune(self):
ori_density = UnstructuredPruner.total_sparse(self.net)
ori_threshold = self.pruner.threshold
self.pruner.step()
self.net(
paddle.to_tensor(
np.random.uniform(0, 1, [16, 3, 32, 32]), dtype='float32'))
cur_density = UnstructuredPruner.total_sparse(self.net)
cur_threshold = self.pruner.threshold
print("Original threshold: {}".format(ori_threshold))
print("Current threshold: {}".format(cur_threshold))
print("Original density: {}".format(ori_density))
print("Current density: {}".format(cur_density))
self.assertLessEqual(ori_threshold, cur_threshold)
self.assertLessEqual(cur_density, ori_density)
self.pruner.update_params()
self.assertEqual(cur_density, UnstructuredPruner.total_sparse(self.net))
def test_summarize_weights(self):
max_value = -float("inf")
threshold = self.pruner.summarize_weights(self.net, 1.0)
for name, sub_layer in self.net.named_sublayers():
if not self.pruner._should_prune_layer(sub_layer):
continue
for param in sub_layer.parameters(include_sublayers=False):
max_value = max(
max_value,
np.max(np.abs(np.array(param.value().get_tensor()))))
print("The returned threshold is {}.".format(threshold))
print("The max_value is {}.".format(max_value))
self.assertEqual(max_value, threshold)
if __name__ == "__main__":
unittest.main()
|
[
"sys.path.append",
"unittest.main",
"numpy.random.uniform",
"paddleslim.UnstructuredPruner",
"paddle.disable_static",
"paddleslim.UnstructuredPruner.total_sparse",
"paddle.vision.models.mobilenet_v1"
] |
[((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((2156, 2171), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2169, 2171), False, 'import unittest\n'), ((344, 367), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (365, 367), False, 'import paddle\n'), ((440, 486), 'paddle.vision.models.mobilenet_v1', 'mobilenet_v1', ([], {'num_classes': '(10)', 'pretrained': '(False)'}), '(num_classes=10, pretrained=False)\n', (452, 486), False, 'from paddle.vision.models import mobilenet_v1\n'), ((509, 578), 'paddleslim.UnstructuredPruner', 'UnstructuredPruner', (['self.net'], {'mode': '"""ratio"""', 'ratio': '(0.98)', 'threshold': '(0.0)'}), "(self.net, mode='ratio', ratio=0.98, threshold=0.0)\n", (527, 578), False, 'from paddleslim import UnstructuredPruner\n'), ((641, 682), 'paddleslim.UnstructuredPruner.total_sparse', 'UnstructuredPruner.total_sparse', (['self.net'], {}), '(self.net)\n', (672, 682), False, 'from paddleslim import UnstructuredPruner\n'), ((902, 943), 'paddleslim.UnstructuredPruner.total_sparse', 'UnstructuredPruner.total_sparse', (['self.net'], {}), '(self.net)\n', (933, 943), False, 'from paddleslim import UnstructuredPruner\n'), ((1417, 1458), 'paddleslim.UnstructuredPruner.total_sparse', 'UnstructuredPruner.total_sparse', (['self.net'], {}), '(self.net)\n', (1448, 1458), False, 'from paddleslim import UnstructuredPruner\n'), ((820, 860), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '[16, 3, 32, 32]'], {}), '(0, 1, [16, 3, 32, 32])\n', (837, 860), True, 'import numpy as np\n')]
|
# 人脸追踪例程
#
# 这个例程展示了如何使用关键特征来追踪一个已经使用Haar Cascade检测出来的人脸。
# 程序第一阶段先使用 Haar Cascade 找出人脸.然后使用关键特征来学习,最后不停的找这个人脸。
# 关键特征点可以用来追踪任何栋。
#
#翻译:01Studio
import sensor, time, image
# Reset sensor
sensor.reset()
sensor.set_contrast(3)
sensor.set_gainceiling(16)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240)) #在VGA(640*480)下开个小窗口,相当于数码缩放。
sensor.set_pixformat(sensor.GRAYSCALE)
# 延时以便摄像头稳定工作
sensor.skip_frames(time = 2000)
# 加载 Haar Cascade 模型
# 默认使用25个步骤,减少步骤会加快速度但会影响识别成功率.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# 特征kpts1
kpts1 = None
# 找到人脸!
while (kpts1 == None):
img = sensor.snapshot()
img.draw_string(0, 0, "Looking for a face...")
# Find faces
objects = img.find_features(face_cascade, threshold=0.5, scale=1.25)
if objects:
# 将 ROI(x,y,w,h)往各个方向扩展 31 像素
face = (objects[0][0]-31, objects[0][1]-31,objects[0][2]+31*2, objects[0][3]+31*2)
# 使用扩展后的 ROI 区域(人脸)学习关键点
kpts1 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, roi=face)
# 用矩形框展示人脸
img.draw_rectangle(objects[0])
# 打印关键点
print(kpts1)
img.draw_keypoints(kpts1, size=24)
img = sensor.snapshot()
time.sleep(2000) #暂停以便观察特征
# FPS clock
clock = time.clock()
while (True):
clock.tick()
img = sensor.snapshot()
# 从图像中提取关键点
kpts2 = img.find_keypoints(threshold=10, scale_factor=1.1, max_keypoints=100, normalized=True)
if (kpts2):
# 跟关键点kpts1匹配
c=image.match_descriptor(kpts1, kpts2, threshold=85)
match = c[6] # C[6] 为 matches值,这个值越大表示匹配程度越高.
if (match>5): #设置当大于5的时候为匹配成功,并画图标示。打印相关信息。
img.draw_rectangle(c[2:6])
img.draw_cross(c[0], c[1], size=10)
print(kpts2, "matched:%d dt:%d"%(match, c[7]))
# Draw FPS
img.draw_string(0, 0, "FPS:%.2f"%(clock.fps()))
|
[
"sensor.set_framesize",
"sensor.set_windowing",
"sensor.set_gainceiling",
"sensor.skip_frames",
"image.match_descriptor",
"sensor.set_contrast",
"sensor.reset",
"sensor.snapshot",
"time.clock",
"time.sleep",
"image.HaarCascade",
"sensor.set_pixformat"
] |
[((189, 203), 'sensor.reset', 'sensor.reset', ([], {}), '()\n', (201, 203), False, 'import sensor, time, image\n'), ((204, 226), 'sensor.set_contrast', 'sensor.set_contrast', (['(3)'], {}), '(3)\n', (223, 226), False, 'import sensor, time, image\n'), ((227, 253), 'sensor.set_gainceiling', 'sensor.set_gainceiling', (['(16)'], {}), '(16)\n', (249, 253), False, 'import sensor, time, image\n'), ((254, 286), 'sensor.set_framesize', 'sensor.set_framesize', (['sensor.VGA'], {}), '(sensor.VGA)\n', (274, 286), False, 'import sensor, time, image\n'), ((287, 319), 'sensor.set_windowing', 'sensor.set_windowing', (['(320, 240)'], {}), '((320, 240))\n', (307, 319), False, 'import sensor, time, image\n'), ((350, 388), 'sensor.set_pixformat', 'sensor.set_pixformat', (['sensor.GRAYSCALE'], {}), '(sensor.GRAYSCALE)\n', (370, 388), False, 'import sensor, time, image\n'), ((404, 433), 'sensor.skip_frames', 'sensor.skip_frames', ([], {'time': '(2000)'}), '(time=2000)\n', (422, 433), False, 'import sensor, time, image\n'), ((505, 548), 'image.HaarCascade', 'image.HaarCascade', (['"""frontalface"""'], {'stages': '(25)'}), "('frontalface', stages=25)\n", (522, 548), False, 'import sensor, time, image\n'), ((1189, 1206), 'sensor.snapshot', 'sensor.snapshot', ([], {}), '()\n', (1204, 1206), False, 'import sensor, time, image\n'), ((1207, 1223), 'time.sleep', 'time.sleep', (['(2000)'], {}), '(2000)\n', (1217, 1223), False, 'import sensor, time, image\n'), ((1255, 1267), 'time.clock', 'time.clock', ([], {}), '()\n', (1265, 1267), False, 'import sensor, time, image\n'), ((635, 652), 'sensor.snapshot', 'sensor.snapshot', ([], {}), '()\n', (650, 652), False, 'import sensor, time, image\n'), ((1310, 1327), 'sensor.snapshot', 'sensor.snapshot', ([], {}), '()\n', (1325, 1327), False, 'import sensor, time, image\n'), ((1492, 1542), 'image.match_descriptor', 'image.match_descriptor', (['kpts1', 'kpts2'], {'threshold': '(85)'}), '(kpts1, kpts2, threshold=85)\n', (1514, 1542), False, 'import sensor, time, image\n')]
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 <NAME> All rights reserved.
#
"""
"""
#end_pymotw_header
import warnings
import logging
logging.basicConfig(level=logging.INFO)
def send_warnings_to_log(message, category, filename, lineno, file=None):
logging.warning(
'%s:%s: %s:%s' %
(filename, lineno, category.__name__, message))
return
old_showwarning = warnings.showwarning
warnings.showwarning = send_warnings_to_log
warnings.warn('message')
|
[
"logging.warning",
"warnings.warn",
"logging.basicConfig"
] |
[((153, 192), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (172, 192), False, 'import logging\n'), ((467, 491), 'warnings.warn', 'warnings.warn', (['"""message"""'], {}), "('message')\n", (480, 491), False, 'import warnings\n'), ((272, 357), 'logging.warning', 'logging.warning', (["('%s:%s: %s:%s' % (filename, lineno, category.__name__, message))"], {}), "('%s:%s: %s:%s' % (filename, lineno, category.__name__, message)\n )\n", (287, 357), False, 'import logging\n')]
|
#!/usr/bin/env python3
"""Setup script. Used by easy_install and pip."""
import os
from setuptools import setup, find_packages
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
SRC_PATH = os.path.join(BASE_PATH, "src")
PACKAGES = find_packages(where=SRC_PATH)
NAME = 'HartreeParticleDSL'
AUTHOR = ("<NAME> <<EMAIL>>")
AUTHOR_EMAIL = '<EMAIL>'
URL = 'https://github.com/NYI'
DOWNLOAD_URL = 'https://github.com/NYI'
DESCRIPTION = ('HartreeParticleDSL - A Generic Particle DSL supporting a variety of backends')
LONG_DESCRIPTION = '''\
TBD
'''
LICENSE = ' TBD '
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Topic :: Utilities',
'Operating System :: POSIX',
'Operating System :: Unix']
VERSION = '0.0.1a'
if __name__ == '__main__':
def get_files(directory, install_path, valid_suffixes):
'''Utility routine that creates a list of 2-tuples, each consisting of
the target installation directory and a list of files
(specified relative to the project root directory).
:param str directory: the directory containing the required files.
:param str install_path: the location where the files will be placed.
:param valid_suffixes: the suffixes of the required files.
:type valid_suffixes: [str]
:returns: a list of 2-tuples, each consisting of the target \
installation directory and a list of files (specified relative \
to the project root directory).
:rtype: [(str, [str])]
'''
examples = []
for dirpath, _, filenames in os.walk(directory):
if ("__" not in dirpath) and filenames:
rel_path = os.path.relpath(dirpath, directory)
files = []
for filename in filenames:
if any([filename.endswith(suffix) for
suffix in valid_suffixes]):
files.append(
os.path.join(os.path.basename(install_path),
rel_path, filename))
if files:
examples.append((os.path.join(install_path, rel_path),
files))
return examples
# We have all of the example, tutorial and wrapper libraries files
# listed in MANIFEST.in but unless we specify them in the data_files
# argument of setup() they don't seem to get installed.
# Since the data_files argument doesn't accept wildcards we have to
# explicitly list every file we want.
# INSTALL_PATH controls where the files will be installed.
# VALID_SUFFIXES controls the type of files to include.
EGS_DIR = os.path.join(BASE_PATH, "examples")
INSTALL_PATH = os.path.join("share", "HartreeParticleDSL", "examples")
VALID_SUFFIXES = ["90", "py", "md", ".c", ".cl", "Makefile", ".mk"]
EXAMPLES = get_files(EGS_DIR, INSTALL_PATH, VALID_SUFFIXES)
LIBS_DIR = os.path.join(BASE_PATH, "lib")
INSTALL_PATH = os.path.join("share", "HartreeParticleDSL", "lib")
VALID_SUFFIXES = ["90", "sh", "py", "md", "Makefile", ".mk",
".jinja", "doxyfile"]
LIBS = get_files(LIBS_DIR, INSTALL_PATH, VALID_SUFFIXES)
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=(AUTHOR_EMAIL),
license=LICENSE,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
packages=PACKAGES,
package_dir={"": "src"},
install_requires=['pyparsing', 'fparser==0.0.12', 'configparser',
'six'],
extras_require={
'dag': ["graphviz"],
'doc': ["sphinx", "sphinxcontrib.bibtex < 2.0.0",
"sphinx_rtd_theme", "autoapi"],
'psydata': ["Jinja2"],
'test': ["pep8", "pylint", "pytest-cov", "pytest-pep8",
"pytest-pylint", "pytest-flakes", "pytest-pep257"],
},
include_package_data=True,
# scripts=['bin/psyclone', 'bin/genkernelstub', 'bin/psyad'],
data_files=LIBS
)
|
[
"os.path.abspath",
"setuptools.setup",
"os.path.basename",
"os.walk",
"os.path.relpath",
"os.path.join",
"setuptools.find_packages"
] |
[((195, 225), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""src"""'], {}), "(BASE_PATH, 'src')\n", (207, 225), False, 'import os\n'), ((237, 266), 'setuptools.find_packages', 'find_packages', ([], {'where': 'SRC_PATH'}), '(where=SRC_PATH)\n', (250, 266), False, 'from setuptools import setup, find_packages\n'), ((157, 182), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (172, 182), False, 'import os\n'), ((2958, 2993), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""examples"""'], {}), "(BASE_PATH, 'examples')\n", (2970, 2993), False, 'import os\n'), ((3013, 3068), 'os.path.join', 'os.path.join', (['"""share"""', '"""HartreeParticleDSL"""', '"""examples"""'], {}), "('share', 'HartreeParticleDSL', 'examples')\n", (3025, 3068), False, 'import os\n'), ((3221, 3251), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""lib"""'], {}), "(BASE_PATH, 'lib')\n", (3233, 3251), False, 'import os\n'), ((3271, 3321), 'os.path.join', 'os.path.join', (['"""share"""', '"""HartreeParticleDSL"""', '"""lib"""'], {}), "('share', 'HartreeParticleDSL', 'lib')\n", (3283, 3321), False, 'import os\n'), ((3496, 4129), 'setuptools.setup', 'setup', ([], {'name': 'NAME', 'version': 'VERSION', 'author': 'AUTHOR', 'author_email': 'AUTHOR_EMAIL', 'license': 'LICENSE', 'url': 'URL', 'description': 'DESCRIPTION', 'long_description': 'LONG_DESCRIPTION', 'classifiers': 'CLASSIFIERS', 'packages': 'PACKAGES', 'package_dir': "{'': 'src'}", 'install_requires': "['pyparsing', 'fparser==0.0.12', 'configparser', 'six']", 'extras_require': "{'dag': ['graphviz'], 'doc': ['sphinx', 'sphinxcontrib.bibtex < 2.0.0',\n 'sphinx_rtd_theme', 'autoapi'], 'psydata': ['Jinja2'], 'test': ['pep8',\n 'pylint', 'pytest-cov', 'pytest-pep8', 'pytest-pylint', 'pytest-flakes',\n 'pytest-pep257']}", 'include_package_data': '(True)', 'data_files': 'LIBS'}), "(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL,\n license=LICENSE, url=URL, description=DESCRIPTION, long_description=\n LONG_DESCRIPTION, classifiers=CLASSIFIERS, packages=PACKAGES,\n package_dir={'': 'src'}, install_requires=['pyparsing',\n 'fparser==0.0.12', 'configparser', 'six'], extras_require={'dag': [\n 'graphviz'], 'doc': ['sphinx', 'sphinxcontrib.bibtex < 2.0.0',\n 'sphinx_rtd_theme', 'autoapi'], 'psydata': ['Jinja2'], 'test': ['pep8',\n 'pylint', 'pytest-cov', 'pytest-pep8', 'pytest-pylint', 'pytest-flakes',\n 'pytest-pep257']}, include_package_data=True, data_files=LIBS)\n", (3501, 4129), False, 'from setuptools import setup, find_packages\n'), ((1839, 1857), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1846, 1857), False, 'import os\n'), ((1938, 1973), 'os.path.relpath', 'os.path.relpath', (['dirpath', 'directory'], {}), '(dirpath, directory)\n', (1953, 1973), False, 'import os\n'), ((2394, 2430), 'os.path.join', 'os.path.join', (['install_path', 'rel_path'], {}), '(install_path, rel_path)\n', (2406, 2430), False, 'import os\n'), ((2237, 2267), 'os.path.basename', 'os.path.basename', (['install_path'], {}), '(install_path)\n', (2253, 2267), False, 'import os\n')]
|
# BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>, <NAME>, <NAME>, <NAME>
"""Functions to generate derived time features useful
in forecasting, such as growth, seasonality, holidays.
"""
import inspect
import math
import warnings
from datetime import datetime
import fbprophet.hdays as fbholidays
import holidays
import numpy as np
import pandas as pd
from scipy.special import expit
from greykite.common import constants as cst
def convert_date_to_continuous_time(dt):
"""Converts date to continuous time. Each year is one unit.
Parameters
----------
dt : datetime object
the date to convert
Returns
-------
conti_date : `float`
the date represented in years
"""
year_length = datetime(dt.year, 12, 31).timetuple().tm_yday
tt = dt.timetuple()
return (dt.year +
(tt.tm_yday - 1
+ dt.hour / 24
+ dt.minute / (24 * 60)
+ dt.second / (24 * 3600)) / float(year_length))
def get_default_origin_for_time_vars(df, time_col):
"""Sets default value for origin_for_time_vars
Parameters
----------
df : `pandas.DataFrame`
Training data. A data frame which includes the timestamp and value columns
time_col : `str`
The column name in `df` representing time for the time series data.
Returns
-------
dt_continuous_time : `float`
The time origin used to create continuous variables for time
"""
date = pd.to_datetime(df[time_col][0])
return convert_date_to_continuous_time(date)
def build_time_features_df(dt, conti_year_origin):
"""This function gets a datetime-like vector and creates new columns containing temporal
features useful for time series analysis and forecasting e.g. year, week of year, etc.
Parameters
----------
dt : array-like (1-dimensional)
A vector of datetime-like values
conti_year_origin : float
The origin used for creating continuous time.
Returns
-------
time_features_df : `pandas.DataFrame`
Dataframe with the following time features.
* "datetime": `datetime.datetime` object, a combination of date and a time
* "date": `datetime.date` object, date with the format (year, month, day)
* "year": integer, year of the date e.g. 2018
* "year_length": integer, number of days in the year e.g. 365 or 366
* "quarter": integer, quarter of the date, 1, 2, 3, 4
* "quarter_start": `pandas.DatetimeIndex`, date of beginning of the current quarter
* "quarter_length": integer, number of days in the quarter, 90/91 for Q1, 91 for Q2, 92 for Q3 and Q4
* "month": integer, month of the year, January=1, February=2, ..., December=12
* "month_length": integer, number of days in the month, 28/ 29/ 30/ 31
* "woy": integer, ISO 8601 week of the year where a week starts from Monday, 1, 2, ..., 53
* "doy": integer, ordinal day of the year, 1, 2, ..., year_length
* "doq": integer, ordinal day of the quarter, 1, 2, ..., quarter_length
* "dom": integer, ordinal day of the month, 1, 2, ..., month_length
* "dow": integer, day of the week, Monday=1, Tuesday=2, ..., Sunday=7
* "str_dow": string, day of the week as a string e.g. "1-Mon", "2-Tue", ..., "7-Sun"
* "str_doy": string, day of the year e.g. "2020-03-20" for March 20, 2020
* "hour": integer, discrete hours of the datetime, 0, 1, ..., 23
* "minute": integer, minutes of the datetime, 0, 1, ..., 59
* "second": integer, seconds of the datetime, 0, 1, ..., 3599
* "year_month": string, (year, month) e.g. "2020-03" for March 2020
* "year_woy": string, (year, week of year) e.g. "2020_42" for 42nd week of 2020
* "month_dom": string, (month, day of month) e.g. "02/20" for February 20th
* "year_woy_dow": string, (year, week of year, day of week) e.g. "2020_03_6" for Saturday of 3rd week in 2020
* "woy_dow": string, (week of year, day of week) e.g. "03_6" for Saturday of 3rd week
* "dow_hr": string, (day of week, hour) e.g. "4_09" for 9am on Thursday
* "dow_hr_min": string, (day of week, hour, minute) e.g. "4_09_10" for 9:10am on Thursday
* "tod": float, time of day, continuous, 0.0 to 24.0
* "tow": float, time of week, continuous, 0.0 to 7.0
* "tom": float, standardized time of month, continuous, 0.0 to 1.0
* "toq": float, time of quarter, continuous, 0.0 to 1.0
* "toy": float, standardized time of year, continuous, 0.0 to 1.0
* "conti_year": float, year in continuous time, eg 2018.5 means middle of the year 2018
* "is_weekend": boolean, weekend indicator, True for weekend, else False
* "dow_grouped": string, Monday-Thursday=1234-MTuWTh, Friday=5-Fri, Saturday=6-Sat, Sunday=7-Sun
* "ct1": float, linear growth based on conti_year_origin, -infinity to infinity
* "ct2": float, signed quadratic growth, -infinity to infinity
* "ct3": float, signed cubic growth, -infinity to infinity
* "ct_sqrt": float, signed square root growth, -infinity to infinity
* "ct_root3": float, signed cubic root growth, -infinity to infinity
"""
dt = pd.DatetimeIndex(dt)
if len(dt) == 0:
raise ValueError("Length of dt cannot be zero.")
# basic time features
date = dt.date
year = dt.year
year_length = (365.0 + dt.is_leap_year)
quarter = dt.quarter
month = dt.month
month_length = dt.days_in_month
# finds first day of quarter
quarter_start = pd.DatetimeIndex(
dt.year.map(str) + "-" + (3 * quarter - 2).map(int).map(str) + "-01")
next_quarter_start = dt + pd.tseries.offsets.QuarterBegin(startingMonth=1)
quarter_length = (next_quarter_start - quarter_start).days
# finds offset from first day of quarter (rounds down to nearest day)
doq = ((dt - quarter_start) / pd.to_timedelta("1D") + 1).astype(int)
# week of year, "woy", follows ISO 8601:
# - Week 01 is the week with the year's first Thursday in it.
# - A week begins with Monday and ends with Sunday.
# So the week number of the week that overlaps both years, is 1, 52, or 53,
# depending on whether it has more days in the previous year or new year.
# - e.g. Jan 1st, 2018 is Monday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 1, 2]
# - e.g. Jan 1st, 2019 is Tuesday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 2, 2]
# - e.g. Jan 1st, 2020 is Wednesday. woy of first 8 days = [1, 1, 1, 1, 1, 2, 2, 2]
# - e.g. Jan 1st, 2015 is Thursday. woy of first 8 days = [1, 1, 1, 1, 2, 2, 2, 2]
# - e.g. Jan 1st, 2021 is Friday. woy of first 8 days = [53, 53, 53, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2022 is Saturday. woy of first 8 days = [52, 52, 1, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2023 is Sunday. woy of first 8 days = [52, 1, 1, 1, 1, 1, 1, 1]
woy = dt.strftime("%V").astype(int)
doy = dt.dayofyear
dom = dt.day
dow = dt.strftime("%u").astype(int)
str_dow = dt.strftime("%u-%a") # e.g. 1-Mon, 2-Tue, ..., 7-Sun
hour = dt.hour
minute = dt.minute
second = dt.second
# grouped time feature
str_doy = dt.strftime("%Y-%m-%d") # e.g. 2020-03-20 for March 20, 2020
year_month = dt.strftime("%Y-%m") # e.g. 2020-03 for March 2020
month_dom = dt.strftime("%m/%d") # e.g. 02/20 for February 20th
year_woy = dt.strftime("%Y_%V") # e.g. 2020_42 for 42nd week of 2020
year_woy_dow = dt.strftime("%Y_%V_%u") # e.g. 2020_03_6 for Saturday of 3rd week in 2020
woy_dow = dt.strftime("%W_%u") # e.g. 03_6 for Saturday of 3rd week
dow_hr = dt.strftime("%u_%H") # e.g. 4_09 for 9am on Thursday
dow_hr_min = dt.strftime("%u_%H_%M") # e.g. 4_09_10 for 9:10am on Thursday
# derived time features
tod = hour + (minute / 60.0) + (second / 3600.0)
tow = dow - 1 + (tod / 24.0)
tom = (dom - 1 + (tod / 24.0)) / month_length
toq = (doq - 1 + (tod / 24.0)) / quarter_length
# time of year, continuous, 0.0 to 1.0. e.g. Jan 1, 12 am = 0/365, Jan 2, 12 am = 1/365, ...
# To handle leap years, Feb 28 = 58/365 - 59/365, Feb 29 = 59/365, Mar 1 = 59/365 - 60/365
# offset term is nonzero only in leap years
# doy_offset reduces doy by 1 from from Mar 1st (doy > 60)
doy_offset = (year_length == 366) * 1.0 * (doy > 60)
# tod_offset sets tod to 0 on Feb 29th (doy == 60)
tod_offset = 1 - (year_length == 366) * 1.0 * (doy == 60)
toy = (doy - 1 - doy_offset + (tod / 24.0) * tod_offset) / 365.0
# year of date in continuous time, eg 2018.5 means middle of year 2018
# this is useful for modeling features that do not care about leap year e.g. environmental variables
conti_year = year + (doy - 1 + (tod / 24.0)) / year_length
is_weekend = pd.Series(dow).apply(lambda x: x in [6, 7]).values # weekend indicator
# categorical var with levels (Mon-Thu, Fri, Sat, Sun), could help when training data are sparse.
dow_grouped = pd.Series(str_dow).apply(lambda x: "1234-MTuWTh" if (x in ["1-Mon", "2-Tue", "3-Wed", "4-Thu"]) else x).values
# growth terms
ct1 = conti_year - conti_year_origin
ct2 = signed_pow(ct1, 2)
ct3 = signed_pow(ct1, 3)
ct_sqrt = signed_pow(ct1, 1/2)
ct_root3 = signed_pow(ct1, 1/3)
# All keys must be added to constants.
features_dict = {
"datetime": dt,
"date": date,
"year": year,
"year_length": year_length,
"quarter": quarter,
"quarter_start": quarter_start,
"quarter_length": quarter_length,
"month": month,
"month_length": month_length,
"woy": woy,
"doy": doy,
"doq": doq,
"dom": dom,
"dow": dow,
"str_dow": str_dow,
"str_doy": str_doy,
"hour": hour,
"minute": minute,
"second": second,
"year_month": year_month,
"year_woy": year_woy,
"month_dom": month_dom,
"year_woy_dow": year_woy_dow,
"woy_dow": woy_dow,
"dow_hr": dow_hr,
"dow_hr_min": dow_hr_min,
"tod": tod,
"tow": tow,
"tom": tom,
"toq": toq,
"toy": toy,
"conti_year": conti_year,
"is_weekend": is_weekend,
"dow_grouped": dow_grouped,
"ct1": ct1,
"ct2": ct2,
"ct3": ct3,
"ct_sqrt": ct_sqrt,
"ct_root3": ct_root3,
}
df = pd.DataFrame(features_dict)
return df
def add_time_features_df(df, time_col, conti_year_origin):
"""Adds a time feature data frame to a data frame
:param df: the input data frame
:param time_col: the name of the time column of interest
:param conti_year_origin: the origin of time for the continuous time variable
:return: the same data frame (df) augmented with new columns
"""
df = df.reset_index(drop=True)
time_df = build_time_features_df(
dt=df[time_col],
conti_year_origin=conti_year_origin)
time_df = time_df.reset_index(drop=True)
return pd.concat([df, time_df], axis=1)
def get_holidays(countries, year_start, year_end):
"""This function extracts a holiday data frame for the period of interest
[year_start to year_end] for the given countries.
This is done using the holidays libraries in pypi:fbprophet and pypi:holidays
Implementation resembles that of `~fbprophet.make_holidays.make_holidays_df`
Parameters
----------
countries : `list` [`str`]
countries for which we need holidays
year_start : `int`
first year of interest, inclusive
year_end : `int`
last year of interest, inclusive
Returns
-------
holiday_df_dict : `dict` [`str`, `pandas.DataFrame`]
- key: country name
- value: data frame with holidays for that country
Each data frame has two columns: EVENT_DF_DATE_COL, EVENT_DF_LABEL_COL
"""
country_holiday_dict = {}
year_list = list(range(year_start, year_end + 1))
for country in countries:
try:
# Fetch the holidays from fbprophet holiday set
# Suppress the following warning for India:
# "We only support Diwali and Holi holidays from 2010 to 2025"
if country in ["India", "IN"]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
country_holidays = getattr(fbholidays, country)(years=year_list)
else:
country_holidays = getattr(fbholidays, country)(years=year_list)
except AttributeError:
# Fetch the holidays from pypi:holidays set
try:
country_holidays = getattr(holidays, country)(years=year_list)
except AttributeError:
raise AttributeError(f"Holidays in {country} are not currently supported!")
country_df = pd.DataFrame({
cst.EVENT_DF_DATE_COL: list(country_holidays.keys()),
cst.EVENT_DF_LABEL_COL: list(country_holidays.values())})
country_df[cst.EVENT_DF_DATE_COL] = pd.to_datetime(country_df[cst.EVENT_DF_DATE_COL])
country_holiday_dict[country] = country_df
return country_holiday_dict
def get_available_holiday_lookup_countries(countries=None):
"""Returns list of available countries for modeling holidays
:param countries: List[str]
only look for available countries in this set
:return: List[str]
list of available countries for modeling holidays
"""
fb_countries = [
name for name, obj in inspect.getmembers(fbholidays)
if inspect.isclass(obj) and obj.__module__ == fbholidays.__name__]
holidays_countries = [
name for name, obj in inspect.getmembers(holidays)
if inspect.isclass(obj) and obj.__module__ == holidays.__name__]
all_countries = set(fb_countries + holidays_countries)
if countries is not None:
countries = set(countries)
found_countries = all_countries.intersection(countries)
else:
found_countries = all_countries
found_countries.discard("HolidayBase") # edge case, remove if found
return sorted(list(found_countries))
def get_available_holidays_in_countries(
countries,
year_start,
year_end):
"""Returns a dictionary mapping each country to its holidays
between the years specified.
:param countries: List[str]
countries for which we need holidays
:param year_start: int
first year of interest
:param year_end: int
last year of interest
:return: Dict[str, List[str]]
key: country name
value: list of holidays in that country between [year_start, year_end]
"""
country_holiday_dict = get_holidays(countries, year_start, year_end)
country_holiday_list = {country: list(sorted(set(df[cst.EVENT_DF_LABEL_COL].values)))
for country, df in country_holiday_dict.items()}
return country_holiday_list
def get_available_holidays_across_countries(
countries,
year_start,
year_end):
"""Returns a list of holidays that occur any of the countries
between the years specified.
:param countries: List[str]
countries for which we need holidays
:param year_start: int
first year of interest
:param year_end: int
last year of interest
:return: List[str]
names of holidays in any of the countries between [year_start, year_end]
"""
country_holiday_list = get_available_holidays_in_countries(
countries=countries,
year_start=year_start,
year_end=year_end)
holiday_across_countries = {
holiday for country, holiday_list in country_holiday_list.items()
for holiday in holiday_list}
return list(sorted(holiday_across_countries))
def add_daily_events(
df,
event_df_dict,
date_col=cst.EVENT_DF_DATE_COL,
regular_day_label=cst.EVENT_DEFAULT):
"""For each key of event_df_dict, it adds a new column to a data frame (df)
with a date column (date_col).
Each new column will represent the events given for that key.
Notes
-----
As a side effect, the columns in ``event_df_dict`` are renamed.
Parameters
----------
df : `pandas.DataFrame`
The data frame which has a date column.
event_df_dict : `dict` [`str`, `pandas.DataFrame`]
A dictionary of data frames, each representing events data
for the corresponding key.
Values are DataFrames with two columns:
- The first column contains the date. Must be at the same
frequency as ``df[date_col]`` for proper join. Must be in a
format recognized by `pandas.to_datetime`.
- The second column contains the event label for each date
date_col : `str`
Column name in ``df`` that contains the dates for joining against
the events in ``event_df_dict``.
regular_day_label : `str`
The label used for regular days which are not "events".
Returns
-------
df_daily_events : `pandas.DataFrame`
An augmented data frame version of df with new label columns --
one for each key of ``event_df_dict``.
"""
df[date_col] = pd.to_datetime(df[date_col])
for label, event_df in event_df_dict.items():
event_df = event_df.copy()
new_col = f"{cst.EVENT_PREFIX}_{label}"
event_df.columns = [date_col, new_col]
event_df[date_col] = pd.to_datetime(event_df[date_col])
df = df.merge(event_df, on=date_col, how="left")
df[new_col] = df[new_col].fillna(regular_day_label)
return df
def add_event_window(
df,
time_col,
label_col,
time_delta="1D",
pre_num=1,
post_num=1,
events_name=""):
"""For a data frame of events with a time_col and label_col
it adds shifted events
prior and after the given events
For example if the event data frame includes the row
'2019-12-25, Christmas'
the function will produce dataframes with the events:
'2019-12-24, Christmas' and '2019-12-26, Christmas'
if pre_num and post_num are 1 or more.
:param df: pd.DataFrame
the events data frame with two columns 'time_col' and 'label_col'
:param time_col: str
The column with the timestamp of the events.
This can be daily but does not have to
:param label_col: str
the column with labels for the events
:param time_delta: str
the amount of the shift for each unit specified by a string
e.g. "1D" stands for one day delta
:param pre_num: int
the number of events to be added prior to the given event for each event in df
:param post_num: int
the number of events to be added after to the given event for each event in df
:param events_name: str
for each shift, we generate a new data frame
and those data frames will be stored in a dictionary with appropriate keys.
Each key starts with "events_name"
and follow up with:
"_minus_1", "_minus_2", "_plus_1", "_plus_2", ...
depending on pre_num and post_num
:return: dict[key: pd.Dataframe]
A dictionary of dataframes for each needed shift.
For example if pre_num=2 and post_num=3.
2 + 3 = 5 data frames will be stored in the return dictionary.
"""
df_dict = {}
pd_time_delta = pd.to_timedelta(time_delta)
for num in range(pre_num):
df0 = pd.DataFrame()
df0[time_col] = df[time_col] - (num + 1) * pd_time_delta
df0[label_col] = df[label_col]
df_dict[events_name + "_minus_" + f"{(num + 1):.0f}"] = df0
for num in range(post_num):
df0 = pd.DataFrame()
df0[time_col] = df[time_col] + (num + 1) * pd_time_delta
df0[label_col] = df[label_col]
df_dict[events_name + "_plus_" + f"{(num + 1):.0f}"] = df0
return df_dict
def get_evenly_spaced_changepoints_values(
df,
continuous_time_col="ct1",
n_changepoints=2):
"""Partitions interval into n_changepoints + 1 segments,
placing a changepoint at left endpoint of each segment.
The left most segment doesn't get a changepoint.
Changepoints should be determined from training data.
:param df: pd.DataFrame
training dataset. contains continuous_time_col
:param continuous_time_col: str
name of continuous time column (e.g. conti_year, ct1)
:param n_changepoints: int
number of changepoints requested
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
if not n_changepoints > 0:
raise ValueError("n_changepoints must be > 0")
n = df.shape[0]
n_steps = n_changepoints + 1
step_size = n / n_steps
indices = np.floor(np.arange(start=1, stop=n_steps) * step_size)
return df[continuous_time_col][indices].values
def get_evenly_spaced_changepoints_dates(
df,
time_col,
n_changepoints):
"""Partitions interval into n_changepoints + 1 segments,
placing a changepoint at left endpoint of each segment.
The left most segment doesn't get a changepoint.
Changepoints should be determined from training data.
:param df: pd.DataFrame
training dataset. contains continuous_time_col
:param time_col: str
name of time column
:param n_changepoints: int
number of changepoints requested
:return: pd.Series
values of df[time_col] at the changepoints
"""
if not n_changepoints >= 0:
raise ValueError("n_changepoints must be >= 0")
changepoint_indices = np.floor(np.arange(start=1, stop=n_changepoints + 1) * (df.shape[0] / (n_changepoints + 1)))
changepoint_indices = df.index[np.concatenate([[0], changepoint_indices.astype(int)])]
return df.loc[changepoint_indices, time_col]
def get_custom_changepoints_values(
df,
changepoint_dates,
time_col=cst.TIME_COL,
continuous_time_col="ct1"):
"""Returns the values of continuous_time_col at the
requested changepoint_dates.
:param df: pd.DataFrame
training dataset. contains continuous_time_col and time_col
:param changepoint_dates: Iterable[Union[int, float, str, datetime]]
Changepoint dates, interpreted by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset
:param time_col: str
The column name in `df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
:param continuous_time_col: str
name of continuous time column (e.g. conti_year, ct1)
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
ts = pd.to_datetime(df[time_col])
changepoint_dates = pd.to_datetime(changepoint_dates)
# maps each changepoint to first date >= changepoint in the dataframe
# if there is no such date, the changepoint is dropped (it would not be useful anyway)
changepoint_ts = [ts[ts >= date].min() for date in changepoint_dates if any(ts >= date)]
indices = ts.isin(changepoint_ts)
changepoints = df[indices][continuous_time_col].values
if changepoints.shape[0] == 0:
changepoints = None
return changepoints
def get_changepoint_string(changepoint_dates):
"""Gets proper formatted strings for changepoint dates.
The default format is "_%Y_%m_%d_%H". When necessary, it appends "_%M" or "_%M_%S".
Parameters
----------
changepoint_dates : `list`
List of changepoint dates, parsable by `pandas.to_datetime`.
Returns
-------
date_strings : `list[`str`]`
List of string formatted changepoint dates.
"""
changepoint_dates = list(pd.to_datetime(changepoint_dates))
time_format = "_%Y_%m_%d_%H"
if any([stamp.second != 0 for stamp in changepoint_dates]):
time_format += "_%M_%S"
elif any([stamp.minute != 0 for stamp in changepoint_dates]):
time_format += "_%M"
date_strings = [date.strftime(time_format) for date in changepoint_dates]
return date_strings
def get_changepoint_features(
df,
changepoint_values,
continuous_time_col="ct1",
growth_func=None,
changepoint_dates=None):
"""Returns features for growth terms with continuous time origins at
the changepoint_values (locations) specified
Generates a time series feature for each changepoint:
Let t = continuous_time value, c = changepoint value
Then the changepoint feature value at time point t is
`growth_func(t - c) * I(t >= c)`, where I is the indicator function
This represents growth as a function of time, where the time origin is
the changepoint
In the typical case where growth_func(0) = 0 (has origin at 0),
the total effect of the changepoints is continuous in time.
If `growth_func` is the identity function, and `continuous_time`
represents the year in continuous time, these terms form the basis for a
continuous, piecewise linear curve to the growth trend.
Fitting these terms with linear model, the coefficents represent slope
change at each changepoint
Intended usage
----------
To make predictions (on test set)
Allow growth term as a function of time to change at these points.
Parameters
----------
:param df: pd.Dataframe
The dataset to make predictions. Contains column continuous_time_col.
:param changepoint_values: array-like
List of changepoint values (on same scale as df[continuous_time_col]).
Should be determined from training data
:param continuous_time_col: Optional[str]
Name of continuous time column in df
growth_func is applied to this column to generate growth term
If None, uses "ct1", linear growth
:param growth_func: Optional[callable]
Growth function for defining changepoints (scalar -> scalar).
If None, uses identity function to use continuous_time_col directly
as growth term
:param changepoint_dates: Optional[list]
List of change point dates, parsable by `pandas.to_datetime`.
:return: pd.DataFrame, shape (df.shape[0], len(changepoints))
Changepoint features, 0-indexed
"""
if continuous_time_col is None:
continuous_time_col = "ct1"
if growth_func is None:
def growth_func(x):
return x
if changepoint_dates is not None:
time_postfixes = get_changepoint_string(changepoint_dates)
else:
time_postfixes = [""] * len(changepoint_values)
changepoint_df = pd.DataFrame()
for i, changepoint in enumerate(changepoint_values):
time_feature = np.array(df[continuous_time_col]) - changepoint # shifted time column (t - c_i)
growth_term = np.array([growth_func(max(x, 0)) for x in time_feature]) # growth as a function of time
time_feature_ind = time_feature >= 0 # Indicator(t >= c_i), lets changepoint take effect starting at c_i
new_col = growth_term * time_feature_ind
new_changepoint = pd.Series(new_col, name=f"{cst.CHANGEPOINT_COL_PREFIX}{i}{time_postfixes[i]}")
changepoint_df = pd.concat([changepoint_df, new_changepoint], axis=1)
return changepoint_df
def get_changepoint_values_from_config(
changepoints_dict,
time_features_df,
time_col=cst.TIME_COL):
"""Applies the changepoint method specified in `changepoints_dict` to return the changepoint values
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param time_features_df: pd.Dataframe
training dataset. contains column "continuous_time_col"
:param time_col: str
The column name in `time_features_df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
Used only in the "custom" method.
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
changepoint_values = None
if changepoints_dict is not None:
valid_changepoint_methods = ["uniform", "custom"]
changepoint_method = changepoints_dict.get("method")
continuous_time_col = changepoints_dict.get("continuous_time_col")
if changepoint_method is None:
raise Exception("changepoint method must be specified")
if changepoint_method not in valid_changepoint_methods:
raise NotImplementedError(
f"changepoint method {changepoint_method} not recognized. "
f"Must be one of {valid_changepoint_methods}")
if changepoint_method == "uniform":
if changepoints_dict["n_changepoints"] > 0:
params = {"continuous_time_col": continuous_time_col} if continuous_time_col is not None else {}
changepoint_values = get_evenly_spaced_changepoints_values(
df=time_features_df,
n_changepoints=changepoints_dict["n_changepoints"],
**params)
elif changepoint_method == "custom":
params = {}
if time_col is not None:
params["time_col"] = time_col
if continuous_time_col is not None:
params["continuous_time_col"] = continuous_time_col
changepoint_values = get_custom_changepoints_values(
df=time_features_df,
changepoint_dates=changepoints_dict["dates"],
**params)
return changepoint_values
def get_changepoint_features_and_values_from_config(
df,
time_col,
changepoints_dict=None,
origin_for_time_vars=None):
"""Extracts changepoints from changepoint configuration and input data
:param df: pd.DataFrame
Training data. A data frame which includes the timestamp and value columns
:param time_col: str
The column name in `df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param origin_for_time_vars: Optional[float]
The time origin used to create continuous variables for time
:return: Dict[str, any]
Dictionary with the requested changepoints and associated information
changepoint_df: pd.DataFrame, shape (df.shape[0], len(changepoints))
Changepoint features for modeling the training data
changepoint_values: array-like
List of changepoint values (on same scale as df[continuous_time_col])
Can be used to generate changepoints for prediction.
continuous_time_col: Optional[str]
Name of continuous time column in df
growth_func is applied to this column to generate growth term.
If None, uses "ct1", linear growth
Can be used to generate changepoints for prediction.
growth_func: Optional[callable]
Growth function for defining changepoints (scalar -> scalar).
If None, uses identity function to use continuous_time_col directly
as growth term.
Can be used to generate changepoints for prediction.
changepoint_cols: List[str]
Names of the changepoint columns for modeling
"""
# extracts changepoint values
if changepoints_dict is None:
changepoint_values = None
continuous_time_col = None
growth_func = None
else:
if origin_for_time_vars is None:
origin_for_time_vars = get_default_origin_for_time_vars(df, time_col)
time_features_df = build_time_features_df(
df[time_col],
conti_year_origin=origin_for_time_vars)
changepoint_values = get_changepoint_values_from_config(
changepoints_dict=changepoints_dict,
time_features_df=time_features_df,
time_col="datetime") # datetime column generated by `build_time_features_df`
continuous_time_col = changepoints_dict.get("continuous_time_col")
growth_func = changepoints_dict.get("growth_func")
# extracts changepoint column names
if changepoint_values is None:
changepoint_df = None
changepoint_cols = []
else:
if changepoints_dict is None:
changepoint_dates = None
elif changepoints_dict["method"] == "custom":
changepoint_dates = list(pd.to_datetime(changepoints_dict["dates"]))
elif changepoints_dict["method"] == "uniform":
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df,
time_col=time_col,
n_changepoints=changepoints_dict["n_changepoints"]
).tolist()[1:] # the changepoint features does not include the growth term
else:
changepoint_dates = None
changepoint_df = get_changepoint_features(
df=time_features_df,
changepoint_values=changepoint_values,
continuous_time_col=continuous_time_col,
growth_func=growth_func,
changepoint_dates=changepoint_dates)
changepoint_cols = list(changepoint_df.columns)
return {
"changepoint_df": changepoint_df,
"changepoint_values": changepoint_values,
"continuous_time_col": continuous_time_col,
"growth_func": growth_func,
"changepoint_cols": changepoint_cols
}
def get_changepoint_dates_from_changepoints_dict(
changepoints_dict,
df=None,
time_col=None):
"""Gets the changepoint dates from ``changepoints_dict``
Parameters
----------
changepoints_dict : `dict` or `None`
The ``changepoints_dict`` which is compatible with
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`
df : `pandas.DataFrame` or `None`, default `None`
The data df to put changepoints on.
time_col : `str` or `None`, default `None`
The column name of time column in ``df``.
Returns
-------
changepoint_dates : `list`
List of changepoint dates.
"""
if (changepoints_dict is None
or "method" not in changepoints_dict.keys()
or changepoints_dict["method"] not in ["auto", "uniform", "custom"]):
return None
method = changepoints_dict["method"]
if method == "custom":
# changepoints_dict["dates"] is `Iterable`, converts to list
changepoint_dates = list(changepoints_dict["dates"])
elif method == "uniform":
if df is None or time_col is None:
raise ValueError("When the method of ``changepoints_dict`` is 'uniform', ``df`` and "
"``time_col`` must be provided.")
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df,
time_col=time_col,
n_changepoints=changepoints_dict["n_changepoints"]
)
# the output is `pandas.Series`, converts to list
changepoint_dates = changepoint_dates.tolist()[1:]
else:
raise ValueError("The method of ``changepoints_dict`` can not be 'auto'. "
"Please specify or detect change points first.")
return changepoint_dates
def add_event_window_multi(
event_df_dict,
time_col,
label_col,
time_delta="1D",
pre_num=1,
post_num=1,
pre_post_num_dict=None):
"""For a given dictionary of events data frames with a time_col and label_col
it adds shifted events prior and after the given events
For example if the event data frame includes the row '2019-12-25, Christmas' as a row
the function will produce dataframes with the events '2019-12-24, Christmas' and '2019-12-26, Christmas' if
pre_num and post_num are 1 or more.
Parameters
----------
event_df_dict: `dict` [`str`, `pandas.DataFrame`]
A dictionary of events data frames
with each having two columns: ``time_col`` and ``label_col``.
time_col: `str`
The column with the timestamp of the events.
This can be daily but does not have to be.
label_col : `str`
The column with labels for the events.
time_delta : `str`, default "1D"
The amount of the shift for each unit specified by a string
e.g. '1D' stands for one day delta
pre_num : `int`, default 1
The number of events to be added prior to the given event for each event in df.
post_num: `int`, default 1
The number of events to be added after to the given event for each event in df.
pre_post_num_dict : `dict` [`str`, (`int`, `int`)] or None, default None
Optionally override ``pre_num`` and ``post_num`` for each key in ``event_df_dict``.
For example, if ``event_df_dict`` has keys "US" and "India", this parameter
can be set to ``pre_post_num_dict = {"US": [1, 3], "India": [1, 2]}``,
denoting that the "US" ``pre_num`` is 1 and ``post_num`` is 3, and "India" ``pre_num`` is 1
and ``post_num`` is 2. Keys not specified by ``pre_post_num_dict`` use the default given by
``pre_num`` and ``post_num``.
Returns
-------
df : `dict` [`str`, `pandas.DataFrame`]
A dictionary of dataframes for each needed shift. For example if pre_num=2 and post_num=3.
2 + 3 = 5 data frames will be stored in the return dictionary.
"""
if pre_post_num_dict is None:
pre_post_num_dict = {}
shifted_df_dict = {}
for event_df_key, event_df in event_df_dict.items():
if event_df_key in pre_post_num_dict.keys():
pre_num0 = pre_post_num_dict[event_df_key][0]
post_num0 = pre_post_num_dict[event_df_key][1]
else:
pre_num0 = pre_num
post_num0 = post_num
df_dict0 = add_event_window(
df=event_df,
time_col=time_col,
label_col=label_col,
time_delta=time_delta,
pre_num=pre_num0,
post_num=post_num0,
events_name=event_df_key)
shifted_df_dict.update(df_dict0)
return shifted_df_dict
def get_fourier_col_name(k, col_name, function_name="sin", seas_name=None):
"""Returns column name corresponding to a particular fourier term, as returned by fourier_series_fcn
:param k: int
fourier term
:param col_name: str
column in the dataframe used to generate fourier series
:param function_name: str
sin or cos
:param seas_name: strcols_interact
appended to new column names added for fourier terms
:return: str
column name in DataFrame returned by fourier_series_fcn
"""
# patsy doesn't allow "." in formula term. Replace "." with "_" rather than quoting "Q()" all fourier terms
name = f"{function_name}{k:.0f}_{col_name}"
if seas_name is not None:
name = f"{name}_{seas_name}"
return name
def fourier_series_fcn(col_name, period=1.0, order=1, seas_name=None):
"""Generates a function which creates fourier series matrix for a column of an input df
:param col_name: str
is the column name in the dataframe which is to be used for
generating fourier series. It needs to be a continuous variable.
:param period: float
the period of the fourier series
:param order: int
the order of the fourier series
:param seas_name: Optional[str]
appended to new column names added for fourier terms.
Useful to distinguish multiple fourier
series on same col_name with different periods.
:return: callable
a function which can be applied to any data.frame df
with a column name being equal to col_name
"""
def fs_func(df):
out_df = pd.DataFrame()
out_cols = []
if col_name not in df.columns:
raise ValueError("The data frame does not have the column: " + col_name)
x = df[col_name]
x = np.array(x)
for i in range(order):
k = i + 1
sin_col_name = get_fourier_col_name(
k,
col_name,
function_name="sin",
seas_name=seas_name)
cos_col_name = get_fourier_col_name(
k,
col_name,
function_name="cos",
seas_name=seas_name)
out_cols.append(sin_col_name)
out_cols.append(cos_col_name)
omega = 2 * math.pi / period
u = omega * k * x
out_df[sin_col_name] = np.sin(u)
out_df[cos_col_name] = np.cos(u)
return {"df": out_df, "cols": out_cols}
return fs_func
def fourier_series_multi_fcn(
col_names,
periods=None,
orders=None,
seas_names=None):
"""Generates a func which adds multiple fourier series with multiple periods.
Parameters
----------
col_names : `list` [`str`]
the column names which are to be used to generate Fourier series.
Each column can have its own period and order.
periods: `list` [`float`] or None
the periods corresponding to each column given in col_names
orders : `list` [`int`] or None
the orders for each of the Fourier series
seas_names : `list` [`str`] or None
Appended to the Fourier series name.
If not provided (None) col_names will be used directly.
"""
k = len(col_names)
if periods is None:
periods = [1.0] * k
if orders is None:
orders = [1] * k
if len(periods) != len(orders):
raise ValueError("periods and orders must have the same length.")
def fs_multi_func(df):
out_df = None
out_cols = []
for i in range(k):
col_name = col_names[i]
period = periods[i]
order = orders[i]
seas_name = None
if seas_names is not None:
seas_name = seas_names[i]
func0 = fourier_series_fcn(
col_name=col_name,
period=period,
order=order,
seas_name=seas_name)
res = func0(df)
fs_df = res["df"]
fs_cols = res["cols"]
out_df = pd.concat([out_df, fs_df], axis=1)
out_cols = out_cols + fs_cols
return {"df": out_df, "cols": out_cols}
return fs_multi_func
def signed_pow(x, y):
""" Takes the absolute value of x and raises it to power of y.
Then it multiplies the result by sign of x.
This guarantees this function is non-decreasing.
This is useful in many contexts e.g. statistical modeling.
:param x: the base number which can be any real number
:param y: the power which can be any real number
:return: returns abs(x) to power of y multiplied by sign of x
"""
return np.sign(x) * np.power(np.abs(x), y)
def signed_pow_fcn(y):
return lambda x: signed_pow(x, y)
signed_sqrt = signed_pow_fcn(1 / 2)
signed_sq = signed_pow_fcn(2)
def logistic(x, growth_rate=1.0, capacity=1.0, floor=0.0, inflection_point=0.0):
"""Evaluates the logistic function at x with the specified growth rate,
capacity, floor, and inflection point.
:param x: value to evaluate the logistic function
:type x: float
:param growth_rate: growth rate
:type growth_rate: float
:param capacity: max value (carrying capacity)
:type capacity: float
:param floor: min value (lower bound)
:type floor: float
:param inflection_point: the t value of the inflection point
:type inflection_point: float
:return: value of the logistic function at t
:rtype: float
"""
return floor + capacity * expit(growth_rate * (x - inflection_point))
def get_logistic_func(growth_rate=1.0, capacity=1.0, floor=0.0, inflection_point=0.0):
"""Returns a function that evaluates the logistic function at t with the
specified growth rate, capacity, floor, and inflection point.
f(x) = floor + capacity / (1 + exp(-growth_rate * (x - inflection_point)))
:param growth_rate: growth rate
:type growth_rate: float
:param capacity: max value (carrying capacity)
:type capacity: float
:param floor: min value (lower bound)
:type floor: float
:param inflection_point: the t value of the inflection point
:type inflection_point: float
:return: the logistic function with specified parameters
:rtype: callable
"""
return lambda t: logistic(t, growth_rate, capacity, floor, inflection_point)
|
[
"numpy.abs",
"pandas.DatetimeIndex",
"numpy.sin",
"numpy.arange",
"inspect.getmembers",
"pandas.DataFrame",
"warnings.simplefilter",
"inspect.isclass",
"warnings.catch_warnings",
"pandas.concat",
"datetime.datetime",
"scipy.special.expit",
"pandas.to_timedelta",
"pandas.to_datetime",
"pandas.Series",
"numpy.cos",
"numpy.array",
"pandas.tseries.offsets.QuarterBegin",
"numpy.sign"
] |
[((2738, 2769), 'pandas.to_datetime', 'pd.to_datetime', (['df[time_col][0]'], {}), '(df[time_col][0])\n', (2752, 2769), True, 'import pandas as pd\n'), ((6696, 6716), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['dt'], {}), '(dt)\n', (6712, 6716), True, 'import pandas as pd\n'), ((11951, 11978), 'pandas.DataFrame', 'pd.DataFrame', (['features_dict'], {}), '(features_dict)\n', (11963, 11978), True, 'import pandas as pd\n'), ((12559, 12591), 'pandas.concat', 'pd.concat', (['[df, time_df]'], {'axis': '(1)'}), '([df, time_df], axis=1)\n', (12568, 12591), True, 'import pandas as pd\n'), ((18829, 18857), 'pandas.to_datetime', 'pd.to_datetime', (['df[date_col]'], {}), '(df[date_col])\n', (18843, 18857), True, 'import pandas as pd\n'), ((21067, 21094), 'pandas.to_timedelta', 'pd.to_timedelta', (['time_delta'], {}), '(time_delta)\n', (21082, 21094), True, 'import pandas as pd\n'), ((24501, 24529), 'pandas.to_datetime', 'pd.to_datetime', (['df[time_col]'], {}), '(df[time_col])\n', (24515, 24529), True, 'import pandas as pd\n'), ((24554, 24587), 'pandas.to_datetime', 'pd.to_datetime', (['changepoint_dates'], {}), '(changepoint_dates)\n', (24568, 24587), True, 'import pandas as pd\n'), ((28436, 28450), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28448, 28450), True, 'import pandas as pd\n'), ((7166, 7214), 'pandas.tseries.offsets.QuarterBegin', 'pd.tseries.offsets.QuarterBegin', ([], {'startingMonth': '(1)'}), '(startingMonth=1)\n', (7197, 7214), True, 'import pandas as pd\n'), ((14607, 14656), 'pandas.to_datetime', 'pd.to_datetime', (['country_df[cst.EVENT_DF_DATE_COL]'], {}), '(country_df[cst.EVENT_DF_DATE_COL])\n', (14621, 14656), True, 'import pandas as pd\n'), ((19067, 19101), 'pandas.to_datetime', 'pd.to_datetime', (['event_df[date_col]'], {}), '(event_df[date_col])\n', (19081, 19101), True, 'import pandas as pd\n'), ((21140, 21154), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21152, 21154), True, 'import pandas as pd\n'), ((21374, 21388), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21386, 21388), True, 'import pandas as pd\n'), ((25506, 25539), 'pandas.to_datetime', 'pd.to_datetime', (['changepoint_dates'], {}), '(changepoint_dates)\n', (25520, 25539), True, 'import pandas as pd\n'), ((28912, 28990), 'pandas.Series', 'pd.Series', (['new_col'], {'name': 'f"""{cst.CHANGEPOINT_COL_PREFIX}{i}{time_postfixes[i]}"""'}), "(new_col, name=f'{cst.CHANGEPOINT_COL_PREFIX}{i}{time_postfixes[i]}')\n", (28921, 28990), True, 'import pandas as pd\n'), ((29016, 29068), 'pandas.concat', 'pd.concat', (['[changepoint_df, new_changepoint]'], {'axis': '(1)'}), '([changepoint_df, new_changepoint], axis=1)\n', (29025, 29068), True, 'import pandas as pd\n'), ((44484, 44498), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (44496, 44498), True, 'import pandas as pd\n'), ((44683, 44694), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (44691, 44694), True, 'import numpy as np\n'), ((47575, 47585), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (47582, 47585), True, 'import numpy as np\n'), ((15095, 15125), 'inspect.getmembers', 'inspect.getmembers', (['fbholidays'], {}), '(fbholidays)\n', (15113, 15125), False, 'import inspect\n'), ((15258, 15286), 'inspect.getmembers', 'inspect.getmembers', (['holidays'], {}), '(holidays)\n', (15276, 15286), False, 'import inspect\n'), ((22480, 22512), 'numpy.arange', 'np.arange', ([], {'start': '(1)', 'stop': 'n_steps'}), '(start=1, stop=n_steps)\n', (22489, 22512), True, 'import numpy as np\n'), ((23334, 23377), 'numpy.arange', 'np.arange', ([], {'start': '(1)', 'stop': '(n_changepoints + 1)'}), '(start=1, stop=n_changepoints + 1)\n', (23343, 23377), True, 'import numpy as np\n'), ((28531, 28564), 'numpy.array', 'np.array', (['df[continuous_time_col]'], {}), '(df[continuous_time_col])\n', (28539, 28564), True, 'import numpy as np\n'), ((45275, 45284), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (45281, 45284), True, 'import numpy as np\n'), ((45320, 45329), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (45326, 45329), True, 'import numpy as np\n'), ((46971, 47005), 'pandas.concat', 'pd.concat', (['[out_df, fs_df]'], {'axis': '(1)'}), '([out_df, fs_df], axis=1)\n', (46980, 47005), True, 'import pandas as pd\n'), ((47597, 47606), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (47603, 47606), True, 'import numpy as np\n'), ((48433, 48476), 'scipy.special.expit', 'expit', (['(growth_rate * (x - inflection_point))'], {}), '(growth_rate * (x - inflection_point))\n', (48438, 48476), False, 'from scipy.special import expit\n'), ((2000, 2025), 'datetime.datetime', 'datetime', (['dt.year', '(12)', '(31)'], {}), '(dt.year, 12, 31)\n', (2008, 2025), False, 'from datetime import datetime\n'), ((10327, 10341), 'pandas.Series', 'pd.Series', (['dow'], {}), '(dow)\n', (10336, 10341), True, 'import pandas as pd\n'), ((10519, 10537), 'pandas.Series', 'pd.Series', (['str_dow'], {}), '(str_dow)\n', (10528, 10537), True, 'import pandas as pd\n'), ((15137, 15157), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (15152, 15157), False, 'import inspect\n'), ((15298, 15318), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (15313, 15318), False, 'import inspect\n'), ((7386, 7407), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""1D"""'], {}), "('1D')\n", (7401, 7407), True, 'import pandas as pd\n'), ((13817, 13842), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (13840, 13842), False, 'import warnings\n'), ((13864, 13895), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (13885, 13895), False, 'import warnings\n'), ((37123, 37165), 'pandas.to_datetime', 'pd.to_datetime', (["changepoints_dict['dates']"], {}), "(changepoints_dict['dates'])\n", (37137, 37165), True, 'import pandas as pd\n')]
|
# coding: utf-8
# In[2]:
#start of code
#importing packages
import numpy as np
import scipy.signal as sp
import matplotlib.pyplot as plt
# In[3]:
def time_domain_output(f,H,t_start,t_end):
t = np.linspace(t_start,t_end,10*(t_end-t_start))
t2,y,svec=sp.lsim(H,f,t)
return y
# In[4]:
t_start = 0
t_end = 100
t = np.linspace(t_start,t_end,10*(t_end-t_start))
f1 = np.cos(1.5*t) * np.exp(-0.5*t)
#d2y + 2.25y = x
H=sp.lti([1],[1,0,2.25])
# In[5]:
y1 = time_domain_output(f1,H,t_start,t_end)
# In[14]:
plt.plot(t,y1)
plt.xlabel(r"t ---------->",size=15)
plt.ylabel(r"x ---------->",size=15)
plt.title(r"System with decay = 0.5",size=20)
plt.show()
# In[16]:
f2 = np.cos(1.5*t) * np.exp(-0.05*t)
# In[17]:
y2 = time_domain_output(f2,H,t_start,t_end)
# In[18]:
plt.plot(t,y2)
plt.xlabel(r"t ---------->",size=15)
plt.ylabel(r"x ---------->",size=15)
plt.title(r"System with decay = 0.05",size=20)
plt.show()
# In[19]:
def input(freq,damp_fac):
t = np.linspace(0,100,1000)
return np.cos(freq*t) * np.exp(-damp_fac*t)
# In[25]:
n=5
t = np.linspace(0,100,1000)
freq_range =np.linspace(1.4,1.6,n)
for freq in freq_range:
plt.plot(t,time_domain_output(input(freq,0.05),H,0,100))
plt.xlabel("t -------->",size=15)
plt.ylabel("x -------->",size =15)
plt.title(r"System response with Different Frequencies",size=15)
plt.legend(["Freq = ${:.2f}$".format(f) for f in freq_range])
plt.show()
# In[62]:
w,S,phi=H.bode()
plt.semilogx(w,S)
plt.plot(1.5,28,"ro",label=r"Resonance Frequency")
plt.title(r"Magnitude Bode plot with resonance freq = 1.5",size=14)
plt.xlabel(r"Freq in rad/s log(w) -------->",size=15)
plt.ylabel("Mag in dB -------->",size =15)
plt.legend()
plt.show()
plt.semilogx(w,phi)
#plt.plot(1.5,28,"ro",label=r"Resonance Frequency")
plt.title(r"Phase Bode plot with resonance freq = 1.5",size=14)
plt.xlabel(r"Freq in rad/s log(w) -------->",size=15)
plt.ylabel("Phase in degrees -------->",size =15)
plt.show()
# In[11]:
#eqn1 -- dx2 + x-y = 0
#Eqn2 --dy2 + 2(y-x) = 0
# In[52]:
#form eqn1 y = dx2 + x
#eq2 -- dx4+3dx2=0
xs = sp.lti([1,0,2],[1,0,3,0])
ys = sp.lti([2],[1,0,3,0])
# In[53]:
t = np.linspace(0,20,200)
# In[54]:
t1,x = sp.impulse(xs,None,t)
t2,y = sp.impulse(ys,None,t)
# In[72]:
plt.plot(t1,x,label=r"x(t)")
plt.plot(t2,y,label=r"y(t)")
plt.legend()
plt.xlabel("t ---------------->",size=15)
plt.title("Coupled Equation Response",size=15)
plt.show()
# In[77]:
H_circ1 = sp.lti(np.poly1d([10**12]),np.poly1d([1,10**8,10**12]))
w1,S1,phi1=H_circ1.bode()
plt.semilogx(w1,S1)
plt.xlabel("Frequency in rad/s",size=15)
plt.ylabel("Magnitude in dB",size=15)
plt.title("Magnitude plot",size=15)
plt.grid(True)
plt.show()
plt.semilogx(w1,phi1)
plt.xlabel("Frequency in rad/s",size=15)
plt.ylabel("Phase in degrees",size=15)
plt.title("Phase plot",size=15)
plt.grid(True)
plt.show()
# In[79]:
t_steady = np.linspace(0,10**-2,10**5)
in_steady = np.cos(10**3 * t_steady) - np.cos(10**6 * t_steady)
# In[80]:
t1,y_steady,svec1=sp.lsim(H_circ1,in_steady,t_steady)
# In[91]:
plt.plot(t1,y_steady)
plt.title("Steady state Response")
plt.ylabel(r"$V_{o}(t) --->$",size=15)
plt.xlabel(r"$t --->$",size=15)
plt.show()
# In[93]:
t_trans = np.linspace(0,35*10**-6,30*10**2+1)
in_trans = np.cos(10**3 * t_trans) - np.cos(10**6 * t_trans)
# In[94]:
t2,y_trans,svec2 = sp.lsim(H_circ1,in_trans,t_trans)
# In[95]:
plt.plot(t2,y_trans)
plt.title("Transient Response")
plt.ylabel(r"$V_{o}(t) --->$",size=15)
plt.xlabel(r"$t --->$",size=15)
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.poly1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.signal.impulse",
"matplotlib.pyplot.legend",
"scipy.signal.lsim",
"matplotlib.pyplot.grid",
"numpy.exp",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.semilogx",
"scipy.signal.lti"
] |
[((340, 391), 'numpy.linspace', 'np.linspace', (['t_start', 't_end', '(10 * (t_end - t_start))'], {}), '(t_start, t_end, 10 * (t_end - t_start))\n', (351, 391), True, 'import numpy as np\n'), ((441, 466), 'scipy.signal.lti', 'sp.lti', (['[1]', '[1, 0, 2.25]'], {}), '([1], [1, 0, 2.25])\n', (447, 466), True, 'import scipy.signal as sp\n'), ((535, 550), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y1'], {}), '(t, y1)\n', (543, 550), True, 'import matplotlib.pyplot as plt\n'), ((550, 596), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t ---------->"""'], {'size': '(15)'}), "('t ---------->', size=15)\n", (560, 596), True, 'import matplotlib.pyplot as plt\n'), ((597, 644), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x ---------->"""'], {'size': '(15)'}), "('x ---------->', size=15)\n", (607, 644), True, 'import matplotlib.pyplot as plt\n'), ((645, 690), 'matplotlib.pyplot.title', 'plt.title', (['"""System with decay = 0.5"""'], {'size': '(20)'}), "('System with decay = 0.5', size=20)\n", (654, 690), True, 'import matplotlib.pyplot as plt\n'), ((691, 701), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (699, 701), True, 'import matplotlib.pyplot as plt\n'), ((825, 840), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y2'], {}), '(t, y2)\n', (833, 840), True, 'import matplotlib.pyplot as plt\n'), ((840, 886), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t ---------->"""'], {'size': '(15)'}), "('t ---------->', size=15)\n", (850, 886), True, 'import matplotlib.pyplot as plt\n'), ((887, 934), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x ---------->"""'], {'size': '(15)'}), "('x ---------->', size=15)\n", (897, 934), True, 'import matplotlib.pyplot as plt\n'), ((935, 981), 'matplotlib.pyplot.title', 'plt.title', (['"""System with decay = 0.05"""'], {'size': '(20)'}), "('System with decay = 0.05', size=20)\n", (944, 981), True, 'import matplotlib.pyplot as plt\n'), ((982, 992), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (990, 992), True, 'import matplotlib.pyplot as plt\n'), ((1136, 1161), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (1147, 1161), True, 'import numpy as np\n'), ((1172, 1196), 'numpy.linspace', 'np.linspace', (['(1.4)', '(1.6)', 'n'], {}), '(1.4, 1.6, n)\n', (1183, 1196), True, 'import numpy as np\n'), ((1280, 1320), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t -------->"""'], {'size': '(15)'}), "('t -------->', size=15)\n", (1290, 1320), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1360), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x -------->"""'], {'size': '(15)'}), "('x -------->', size=15)\n", (1330, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1361, 1425), 'matplotlib.pyplot.title', 'plt.title', (['"""System response with Different Frequencies"""'], {'size': '(15)'}), "('System response with Different Frequencies', size=15)\n", (1370, 1425), True, 'import matplotlib.pyplot as plt\n'), ((1488, 1498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1496, 1498), True, 'import matplotlib.pyplot as plt\n'), ((1530, 1548), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['w', 'S'], {}), '(w, S)\n', (1542, 1548), True, 'import matplotlib.pyplot as plt\n'), ((1548, 1600), 'matplotlib.pyplot.plot', 'plt.plot', (['(1.5)', '(28)', '"""ro"""'], {'label': '"""Resonance Frequency"""'}), "(1.5, 28, 'ro', label='Resonance Frequency')\n", (1556, 1600), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1666), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnitude Bode plot with resonance freq = 1.5"""'], {'size': '(14)'}), "('Magnitude Bode plot with resonance freq = 1.5', size=14)\n", (1608, 1666), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1726), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Freq in rad/s log(w) -------->"""'], {'size': '(15)'}), "('Freq in rad/s log(w) -------->', size=15)\n", (1677, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1775), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mag in dB -------->"""'], {'size': '(15)'}), "('Mag in dB -------->', size=15)\n", (1737, 1775), True, 'import matplotlib.pyplot as plt\n'), ((1776, 1788), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1786, 1788), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1797, 1799), True, 'import matplotlib.pyplot as plt\n'), ((1800, 1820), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['w', 'phi'], {}), '(w, phi)\n', (1812, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1935), 'matplotlib.pyplot.title', 'plt.title', (['"""Phase Bode plot with resonance freq = 1.5"""'], {'size': '(14)'}), "('Phase Bode plot with resonance freq = 1.5', size=14)\n", (1881, 1935), True, 'import matplotlib.pyplot as plt\n'), ((1936, 1995), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Freq in rad/s log(w) -------->"""'], {'size': '(15)'}), "('Freq in rad/s log(w) -------->', size=15)\n", (1946, 1995), True, 'import matplotlib.pyplot as plt\n'), ((1996, 2051), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phase in degrees -------->"""'], {'size': '(15)'}), "('Phase in degrees -------->', size=15)\n", (2006, 2051), True, 'import matplotlib.pyplot as plt\n'), ((2052, 2062), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2060, 2062), True, 'import matplotlib.pyplot as plt\n'), ((2186, 2217), 'scipy.signal.lti', 'sp.lti', (['[1, 0, 2]', '[1, 0, 3, 0]'], {}), '([1, 0, 2], [1, 0, 3, 0])\n', (2192, 2217), True, 'import scipy.signal as sp\n'), ((2217, 2242), 'scipy.signal.lti', 'sp.lti', (['[2]', '[1, 0, 3, 0]'], {}), '([2], [1, 0, 3, 0])\n', (2223, 2242), True, 'import scipy.signal as sp\n'), ((2257, 2280), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(200)'], {}), '(0, 20, 200)\n', (2268, 2280), True, 'import numpy as np\n'), ((2300, 2323), 'scipy.signal.impulse', 'sp.impulse', (['xs', 'None', 't'], {}), '(xs, None, t)\n', (2310, 2323), True, 'import scipy.signal as sp\n'), ((2329, 2352), 'scipy.signal.impulse', 'sp.impulse', (['ys', 'None', 't'], {}), '(ys, None, t)\n', (2339, 2352), True, 'import scipy.signal as sp\n'), ((2365, 2394), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 'x'], {'label': '"""x(t)"""'}), "(t1, x, label='x(t)')\n", (2373, 2394), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2423), 'matplotlib.pyplot.plot', 'plt.plot', (['t2', 'y'], {'label': '"""y(t)"""'}), "(t2, y, label='y(t)')\n", (2402, 2423), True, 'import matplotlib.pyplot as plt\n'), ((2423, 2435), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2433, 2435), True, 'import matplotlib.pyplot as plt\n'), ((2436, 2495), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t ---------------->"""'], {'size': '(15)'}), "('t ---------------->', size=15)\n", (2446, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2542), 'matplotlib.pyplot.title', 'plt.title', (['"""Coupled Equation Response"""'], {'size': '(15)'}), "('Coupled Equation Response', size=15)\n", (2504, 2542), True, 'import matplotlib.pyplot as plt\n'), ((2542, 2552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2550, 2552), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2679), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['w1', 'S1'], {}), '(w1, S1)\n', (2671, 2679), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2720), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency in rad/s"""'], {'size': '(15)'}), "('Frequency in rad/s', size=15)\n", (2689, 2720), True, 'import matplotlib.pyplot as plt\n'), ((2720, 2758), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude in dB"""'], {'size': '(15)'}), "('Magnitude in dB', size=15)\n", (2730, 2758), True, 'import matplotlib.pyplot as plt\n'), ((2758, 2794), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnitude plot"""'], {'size': '(15)'}), "('Magnitude plot', size=15)\n", (2767, 2794), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2808), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2802, 2808), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2819), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2817, 2819), True, 'import matplotlib.pyplot as plt\n'), ((2820, 2842), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['w1', 'phi1'], {}), '(w1, phi1)\n', (2832, 2842), True, 'import matplotlib.pyplot as plt\n'), ((2842, 2883), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency in rad/s"""'], {'size': '(15)'}), "('Frequency in rad/s', size=15)\n", (2852, 2883), True, 'import matplotlib.pyplot as plt\n'), ((2883, 2922), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phase in degrees"""'], {'size': '(15)'}), "('Phase in degrees', size=15)\n", (2893, 2922), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2954), 'matplotlib.pyplot.title', 'plt.title', (['"""Phase plot"""'], {'size': '(15)'}), "('Phase plot', size=15)\n", (2931, 2954), True, 'import matplotlib.pyplot as plt\n'), ((2954, 2968), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2962, 2968), True, 'import matplotlib.pyplot as plt\n'), ((2969, 2979), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2977, 2979), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3038), 'numpy.linspace', 'np.linspace', (['(0)', '(10 ** -2)', '(10 ** 5)'], {}), '(0, 10 ** -2, 10 ** 5)\n', (3016, 3038), True, 'import numpy as np\n'), ((3129, 3166), 'scipy.signal.lsim', 'sp.lsim', (['H_circ1', 'in_steady', 't_steady'], {}), '(H_circ1, in_steady, t_steady)\n', (3136, 3166), True, 'import scipy.signal as sp\n'), ((3179, 3201), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 'y_steady'], {}), '(t1, y_steady)\n', (3187, 3201), True, 'import matplotlib.pyplot as plt\n'), ((3201, 3235), 'matplotlib.pyplot.title', 'plt.title', (['"""Steady state Response"""'], {}), "('Steady state Response')\n", (3210, 3235), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3276), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$V_{o}(t) --->$"""'], {'size': '(15)'}), "('$V_{o}(t) --->$', size=15)\n", (3246, 3276), True, 'import matplotlib.pyplot as plt\n'), ((3277, 3309), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t --->$"""'], {'size': '(15)'}), "('$t --->$', size=15)\n", (3287, 3309), True, 'import matplotlib.pyplot as plt\n'), ((3310, 3320), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3318, 3320), True, 'import matplotlib.pyplot as plt\n'), ((3345, 3392), 'numpy.linspace', 'np.linspace', (['(0)', '(35 * 10 ** -6)', '(30 * 10 ** 2 + 1)'], {}), '(0, 35 * 10 ** -6, 30 * 10 ** 2 + 1)\n', (3356, 3392), True, 'import numpy as np\n'), ((3475, 3510), 'scipy.signal.lsim', 'sp.lsim', (['H_circ1', 'in_trans', 't_trans'], {}), '(H_circ1, in_trans, t_trans)\n', (3482, 3510), True, 'import scipy.signal as sp\n'), ((3523, 3544), 'matplotlib.pyplot.plot', 'plt.plot', (['t2', 'y_trans'], {}), '(t2, y_trans)\n', (3531, 3544), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3575), 'matplotlib.pyplot.title', 'plt.title', (['"""Transient Response"""'], {}), "('Transient Response')\n", (3553, 3575), True, 'import matplotlib.pyplot as plt\n'), ((3576, 3616), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$V_{o}(t) --->$"""'], {'size': '(15)'}), "('$V_{o}(t) --->$', size=15)\n", (3586, 3616), True, 'import matplotlib.pyplot as plt\n'), ((3617, 3649), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t --->$"""'], {'size': '(15)'}), "('$t --->$', size=15)\n", (3627, 3649), True, 'import matplotlib.pyplot as plt\n'), ((3650, 3660), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3658, 3660), True, 'import matplotlib.pyplot as plt\n'), ((206, 257), 'numpy.linspace', 'np.linspace', (['t_start', 't_end', '(10 * (t_end - t_start))'], {}), '(t_start, t_end, 10 * (t_end - t_start))\n', (217, 257), True, 'import numpy as np\n'), ((266, 282), 'scipy.signal.lsim', 'sp.lsim', (['H', 'f', 't'], {}), '(H, f, t)\n', (273, 282), True, 'import scipy.signal as sp\n'), ((391, 406), 'numpy.cos', 'np.cos', (['(1.5 * t)'], {}), '(1.5 * t)\n', (397, 406), True, 'import numpy as np\n'), ((407, 423), 'numpy.exp', 'np.exp', (['(-0.5 * t)'], {}), '(-0.5 * t)\n', (413, 423), True, 'import numpy as np\n'), ((721, 736), 'numpy.cos', 'np.cos', (['(1.5 * t)'], {}), '(1.5 * t)\n', (727, 736), True, 'import numpy as np\n'), ((737, 754), 'numpy.exp', 'np.exp', (['(-0.05 * t)'], {}), '(-0.05 * t)\n', (743, 754), True, 'import numpy as np\n'), ((1041, 1066), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (1052, 1066), True, 'import numpy as np\n'), ((2584, 2605), 'numpy.poly1d', 'np.poly1d', (['[10 ** 12]'], {}), '([10 ** 12])\n', (2593, 2605), True, 'import numpy as np\n'), ((2604, 2637), 'numpy.poly1d', 'np.poly1d', (['[1, 10 ** 8, 10 ** 12]'], {}), '([1, 10 ** 8, 10 ** 12])\n', (2613, 2637), True, 'import numpy as np\n'), ((3045, 3071), 'numpy.cos', 'np.cos', (['(10 ** 3 * t_steady)'], {}), '(10 ** 3 * t_steady)\n', (3051, 3071), True, 'import numpy as np\n'), ((3072, 3098), 'numpy.cos', 'np.cos', (['(10 ** 6 * t_steady)'], {}), '(10 ** 6 * t_steady)\n', (3078, 3098), True, 'import numpy as np\n'), ((3392, 3417), 'numpy.cos', 'np.cos', (['(10 ** 3 * t_trans)'], {}), '(10 ** 3 * t_trans)\n', (3398, 3417), True, 'import numpy as np\n'), ((3418, 3443), 'numpy.cos', 'np.cos', (['(10 ** 6 * t_trans)'], {}), '(10 ** 6 * t_trans)\n', (3424, 3443), True, 'import numpy as np\n'), ((1077, 1093), 'numpy.cos', 'np.cos', (['(freq * t)'], {}), '(freq * t)\n', (1083, 1093), True, 'import numpy as np\n'), ((1094, 1115), 'numpy.exp', 'np.exp', (['(-damp_fac * t)'], {}), '(-damp_fac * t)\n', (1100, 1115), True, 'import numpy as np\n')]
|
import re
def sort_by_double_camel(chars: str) -> str:
"""ダブルキャメルケースで分割ソートする
Args:
chars(str): ソート対象の文字列
Returns:
(list[str]): 昇順でソートされたダブルキャメルケース文字列
"""
double_camels = sorted(re.findall("[A-Z][a-z]*[A-Z]", chars), key=str.lower)
return "".join(double_camels)
def main():
print(sort_by_double_camel(input()))
if __name__ == "__main__":
main()
|
[
"re.findall"
] |
[((215, 252), 're.findall', 're.findall', (['"""[A-Z][a-z]*[A-Z]"""', 'chars'], {}), "('[A-Z][a-z]*[A-Z]', chars)\n", (225, 252), False, 'import re\n')]
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2020 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from collections import deque
from typing import Any, Deque, Dict, Generator
TargetGenerator = Generator[Any, Any, Any]
class Task(object):
taskid = 0
def __init__(self, target: TargetGenerator):
Task.taskid += 1
self.tid = Task.taskid
self.target = target
self.sendval = None
def run(self) -> Any:
return self.target.send(self.sendval)
class SystemCall(object):
def handle(self):
pass
class Scheduler(object):
def __init__(self):
self.ready: Deque[Task] = deque()
self.taskmap: Dict[int, Task] = {}
def new(self, target: TargetGenerator) -> int:
new_task = Task(target)
self.taskmap[new_task.tid] = new_task
self.schedule(new_task)
return new_task.tid
def schedule(self, task: Task) -> None:
self.ready.append(task)
def exit(self, task: Task) -> None:
print(f"Task {task.tid} terminated")
del self.taskmap[task.tid]
def mainloop(self) -> None:
while self.taskmap:
task = self.ready.popleft()
try:
result = task.run()
if isinstance(result, SystemCall):
result.task = task
result.sched = self
result.handle()
continue
except StopIteration:
self.exit(task)
continue
self.schedule(task)
class GetTid(SystemCall):
def handle(self):
self.task.sendval = self.task.tid
self.sched.schedule(self.task)
class NewTask(SystemCall):
def __init__(self, target: TargetGenerator):
self.target = target
def handle(self):
tid = self.sched.new(self.target)
self.task.sendval = tid
self.sched.schedule(self.task)
class KillTask(SystemCall):
def __init__(self, tid):
self.tid = tid
def handle(self):
task = self.sched.taskmap.get(self.tid)
if task:
task.target.close()
self.task.sendval = True
else:
self.task.sendval = False
self.sched.schedule(self.task)
if __name__ == '__main__':
def foo():
mytid = yield GetTid()
while True:
print(f"I'm foo, {mytid}")
yield
def main():
child = yield NewTask(foo())
for _ in range(5):
yield
yield KillTask(child)
print(f"main done")
sched = Scheduler()
sched.new(main())
sched.mainloop()
|
[
"collections.deque"
] |
[((1934, 1941), 'collections.deque', 'deque', ([], {}), '()\n', (1939, 1941), False, 'from collections import deque\n')]
|
'''Python script to generate CAC'''
'''Authors - <NAME>
'''
import numpy as np
import pandas as pd
from datetime import datetime
import collections
from .helpers import *
class CAC:
def __init__(self, fin_perf, oper_metrics, oth_metrics):
print("INIT CAC")
self.fin_perf = pd.DataFrame(fin_perf)
self.oper_metrics = pd.DataFrame(oper_metrics)
self.oth_metrics = pd.DataFrame(oth_metrics)
def run(self):
self.clean_inputs()
print(self.fin_perf)
print(self.oper_metrics)
print(self.oth_metrics)
self.ttm_cac()
self.yoy_growth()
self.clean_outputs()
json = {
"CAC & CAC TTM": self.cac_ttm.to_dict(orient='records'),
"CAC YoY Growth": self.cac_yoy.to_dict(orient='records'),
}
return json
def clean_inputs(self):
self.fin_perf = self.fin_perf.copy()
self.fin_perf.set_index("Financial Performance", inplace=True)
self.fin_perf.apply(filter_to_dec_list)
self.oper_metrics = self.oper_metrics.copy()
self.oper_metrics.set_index("Operating Metrics", inplace=True)
self.oper_metrics.apply(filter_to_dec_list)
self.oth_metrics.set_index("Other Metrics", inplace=True)
self.oth_metrics.apply(filter_to_dec_list)
def clean_outputs(self):
self.cac_ttm = self.cac_ttm.astype(object)
self.cac_ttm.apply(nan_to_blank_list)
self.cac_ttm = self.cac_ttm.apply(numbers_with_commas_list)
self.cac_ttm = self.cac_ttm.drop(self.cac_ttm.columns[0], axis=1)
self.cac_ttm.reset_index(inplace=True)
self.cac_yoy = self.cac_yoy.astype(object)
self.cac_yoy.apply(nan_to_blank_list)
cac_yoy_copy = self.cac_yoy.copy()
self.cac_yoy = self.cac_yoy.apply(numbers_with_commas_list)
self.cac_yoy.loc['YoY growth'] = cac_yoy_copy.loc['YoY growth'].apply(dec_to_percents)
self.cac_yoy.loc['YoY growth*'] = cac_yoy_copy.loc['YoY growth*'].apply(dec_to_percents)
self.cac_yoy = self.cac_yoy.drop(self.cac_yoy.columns[0], axis=1)
self.cac_yoy.reset_index(inplace=True)
print("CAC & CAC TTM")
print(self.cac_ttm)
print("CAC YoY Growth")
print(self.cac_yoy)
def ttm_cac(self):
index = ["S&M", "Total Expense", "# of New Customers", "CAC", "TTM CAC"]
self.cac_ttm = pd.DataFrame(index=np.arange(len(index)), columns=self.fin_perf.columns)
self.cac_ttm.set_index(pd.Series(index, name=""), inplace=True)
self.cac_ttm.loc['S&M'] = -self.fin_perf.loc['S&M']*1000
self.cac_ttm.loc['Total Expense'] = self.cac_ttm.loc['S&M']
self.cac_ttm.loc['# of New Customers'] = self.oper_metrics.loc['A']
self.cac_ttm.loc['CAC'] = self.cac_ttm.loc['Total Expense'].div(self.cac_ttm.loc['# of New Customers'].replace({0:np.nan}))
self.cac_ttm.loc['TTM CAC'][:12] = ["N/A"]*12
for i in range(12, self.cac_ttm.shape[1]):
self.cac_ttm.loc['TTM CAC'][i] = self.cac_ttm.loc['Total Expense'].iloc[i-11:i+1].sum()/self.cac_ttm.loc['# of New Customers'].iloc[i-11:i+1].sum()
def yoy_growth(self):
index = ["TTM CAC", "YoY growth", "Avg ARR Per Customer", "YoY growth*"]
self.cac_yoy = pd.DataFrame(index=np.arange(len(index)), columns=self.fin_perf.columns)
self.cac_yoy.set_index(pd.Series(index, name=""), inplace=True)
self.cac_yoy.loc['TTM CAC'] = self.cac_ttm.loc['TTM CAC']
self.cac_yoy.loc['YoY growth'].iloc[:min(self.cac_yoy.shape[1], 24)] = [float("NaN")]*min(self.cac_yoy.shape[1], 24)
self.cac_yoy.loc['YoY growth*'].iloc[:min(self.cac_yoy.shape[1], 24)] = [float("NaN")]*min(self.cac_yoy.shape[1], 24)
self.cac_yoy.loc['Avg ARR Per Customer'] = self.oth_metrics.loc['Avg ARR per Customer']
if self.cac_yoy.shape[1] >= 24:
self.cac_yoy.loc['YoY growth'].iloc[24:] = list(self.cac_yoy.loc['TTM CAC'].iloc[24:].array/self.cac_yoy.loc['TTM CAC'].iloc[12:-12].array-1)
self.cac_yoy.loc['YoY growth*'].iloc[24:] = list(self.cac_yoy.loc['Avg ARR Per Customer'].iloc[24:].array/self.cac_yoy.loc['Avg ARR Per Customer'].iloc[12:-12].array-1)
|
[
"pandas.DataFrame",
"pandas.Series"
] |
[((297, 319), 'pandas.DataFrame', 'pd.DataFrame', (['fin_perf'], {}), '(fin_perf)\n', (309, 319), True, 'import pandas as pd\n'), ((348, 374), 'pandas.DataFrame', 'pd.DataFrame', (['oper_metrics'], {}), '(oper_metrics)\n', (360, 374), True, 'import pandas as pd\n'), ((402, 427), 'pandas.DataFrame', 'pd.DataFrame', (['oth_metrics'], {}), '(oth_metrics)\n', (414, 427), True, 'import pandas as pd\n'), ((2512, 2537), 'pandas.Series', 'pd.Series', (['index'], {'name': '""""""'}), "(index, name='')\n", (2521, 2537), True, 'import pandas as pd\n'), ((3394, 3419), 'pandas.Series', 'pd.Series', (['index'], {'name': '""""""'}), "(index, name='')\n", (3403, 3419), True, 'import pandas as pd\n')]
|
import pytest
from unittest.mock import (
call,
mock_open,
patch,
)
from subnet import ip_network, IPv4Network, IPv4Address
from wireguard import (
Config,
ServerConfig,
Peer,
Server,
)
from wireguard.utils import IPAddressSet
def test_basic_server():
subnet = '192.168.0.0/24'
address = '192.168.0.1'
server = Server(
'test-server',
subnet,
address=address,
)
config = ServerConfig(server)
wg_config = config.local_config
config_lines = wg_config.split('\n')
# Ensure that [Interface] is first in the config, allowing for blank lines before
for line in config_lines:
if line:
assert line == '[Interface]'
break
# Check that these are on a line alone in the config output
assert f'Address = {address}/24' in config_lines
assert '# test-server' not in config_lines # Should only be present in Peer section on remote
assert '[Peer]' not in config_lines # We haven't configured any peers, so this shouldn't exist
# Check that these don't appear anywhere at all because of how basic this config is
for option in ['DNS', 'PreUp', 'PostUp', 'PreDown', 'PostDown', 'SaveConfig', 'MTU', 'Table', 'AllowedIPs', 'Endpoint', 'PersistentKeepalive', 'PresharedKey', 'PublicKey']:
assert f'{option} =' not in wg_config
def test_basic_peer():
address = '192.168.0.2'
peer = Peer(
'test-peer',
address=address,
)
config = Config(peer)
wg_config = config.local_config
config_lines = wg_config.split('\n')
# Ensure that [Interface] is first in the config, allowing for blank lines before
for line in config_lines:
if line:
assert line == '[Interface]'
break
assert f'Address = {address}/32' in config_lines
assert '# test-peer' not in config_lines # Should only be present in Peer section on remote
assert '[Peer]' not in config_lines # We haven't configured any peers, so this shouldn't exist
# Check that these don't appear anywhere at all because of how basic this config is
for option in ['DNS', 'PreUp', 'PostUp', 'PreDown', 'PostDown', 'SaveConfig', 'MTU', 'Table', 'AllowedIPs', 'Endpoint', 'PersistentKeepalive', 'PresharedKey', 'PublicKey']:
assert f'{option} =' not in wg_config
def test_inadmissible_non_peer():
class NonPeer():
attrib1 = IPAddressSet()
attrib2 = 'something'
with pytest.raises(ValueError) as exc:
config = Config(NonPeer())
assert 'provide a valid Peer' in str(exc.value)
def test_admissible_non_peer():
class NonPeer():
allowed_ips = IPAddressSet()
public_key = 'something'
config = Config(NonPeer())
for line in config.local_config.split('\n'):
if line:
assert line == '[Interface]'
assert '[Peer]' in config.remote_config
assert 'PublicKey = something' in config.remote_config
def test_write_server_config_no_params():
subnet = '192.168.0.0/24'
address = '192.168.0.1'
server = Server(
'test-server',
subnet,
address=address,
)
with patch('builtins.open', mock_open()) as mo:
server.config().write()
mo.assert_has_calls([
call('/etc/wireguard/wg0.conf', mode='w', encoding='utf-8'),
call('/etc/wireguard/wg0-peers.conf', mode='w', encoding='utf-8'),
], any_order=True)
@pytest.mark.parametrize(
('interface', 'path', 'full_path', 'peers_full_path'),
[
(None, None, '/etc/wireguard/wg0.conf', '/etc/wireguard/wg0-peers.conf',), # Default options
('wg3', None, '/etc/wireguard/wg3.conf', '/etc/wireguard/wg3-peers.conf',),
(None, '/opt/my-wg-dir', '/opt/my-wg-dir/wg0.conf', '/opt/my-wg-dir/wg0-peers.conf',),
('wg1', '/opt/my-other-wg-dir', '/opt/my-other-wg-dir/wg1.conf', '/opt/my-other-wg-dir/wg1-peers.conf',),
])
def test_write_server_config(interface, path, full_path, peers_full_path):
subnet = '192.168.0.0/24'
address = '192.168.0.1'
server = Server(
'test-server',
subnet,
address=address,
interface=interface
)
config = server.config()
assert config.full_path(path) == full_path
assert config.peers_full_path(path) == peers_full_path
with patch('builtins.open', mock_open()) as mo:
config.write(path)
mo.assert_has_calls([
call(full_path, mode='w', encoding='utf-8'),
call(peers_full_path, mode='w', encoding='utf-8'),
], any_order=True)
def test_write_peer_config_no_params():
address = '192.168.0.1'
peer = Peer(
'test-peer',
address=address,
)
with patch('builtins.open', mock_open()) as mo:
peer.config().write()
mo.assert_has_calls([
call('/etc/wireguard/wg0.conf', mode='w', encoding='utf-8'),
], any_order=True)
@pytest.mark.parametrize(
('interface', 'path', 'full_path',),
[
(None, None, '/etc/wireguard/wg0.conf',), # Default options
('wg3', None, '/etc/wireguard/wg3.conf',),
(None, '/opt/my-wg-dir', '/opt/my-wg-dir/wg0.conf',),
('wg1', '/opt/my-other-wg-dir', '/opt/my-other-wg-dir/wg1.conf',),
])
def test_write_peer_config(interface, path, full_path):
address = '192.168.0.2'
peer = Peer(
'test-peer',
address=address,
interface=interface,
)
config = Config(peer)
assert config.full_path(path) == full_path
with patch('builtins.open', mock_open()) as mo:
peer.config().write(path)
mo.assert_has_calls([
call(full_path, mode='w', encoding='utf-8'),
], any_order=True)
|
[
"wireguard.Server",
"wireguard.Peer",
"wireguard.Config",
"pytest.raises",
"unittest.mock.mock_open",
"wireguard.ServerConfig",
"pytest.mark.parametrize",
"unittest.mock.call",
"wireguard.utils.IPAddressSet"
] |
[((3466, 3910), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('interface', 'path', 'full_path', 'peers_full_path')", "[(None, None, '/etc/wireguard/wg0.conf', '/etc/wireguard/wg0-peers.conf'),\n ('wg3', None, '/etc/wireguard/wg3.conf',\n '/etc/wireguard/wg3-peers.conf'), (None, '/opt/my-wg-dir',\n '/opt/my-wg-dir/wg0.conf', '/opt/my-wg-dir/wg0-peers.conf'), ('wg1',\n '/opt/my-other-wg-dir', '/opt/my-other-wg-dir/wg1.conf',\n '/opt/my-other-wg-dir/wg1-peers.conf')]"], {}), "(('interface', 'path', 'full_path',\n 'peers_full_path'), [(None, None, '/etc/wireguard/wg0.conf',\n '/etc/wireguard/wg0-peers.conf'), ('wg3', None,\n '/etc/wireguard/wg3.conf', '/etc/wireguard/wg3-peers.conf'), (None,\n '/opt/my-wg-dir', '/opt/my-wg-dir/wg0.conf',\n '/opt/my-wg-dir/wg0-peers.conf'), ('wg1', '/opt/my-other-wg-dir',\n '/opt/my-other-wg-dir/wg1.conf', '/opt/my-other-wg-dir/wg1-peers.conf')])\n", (3489, 3910), False, 'import pytest\n'), ((4963, 5239), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('interface', 'path', 'full_path')", "[(None, None, '/etc/wireguard/wg0.conf'), ('wg3', None,\n '/etc/wireguard/wg3.conf'), (None, '/opt/my-wg-dir',\n '/opt/my-wg-dir/wg0.conf'), ('wg1', '/opt/my-other-wg-dir',\n '/opt/my-other-wg-dir/wg1.conf')]"], {}), "(('interface', 'path', 'full_path'), [(None, None,\n '/etc/wireguard/wg0.conf'), ('wg3', None, '/etc/wireguard/wg3.conf'), (\n None, '/opt/my-wg-dir', '/opt/my-wg-dir/wg0.conf'), ('wg1',\n '/opt/my-other-wg-dir', '/opt/my-other-wg-dir/wg1.conf')])\n", (4986, 5239), False, 'import pytest\n'), ((357, 403), 'wireguard.Server', 'Server', (['"""test-server"""', 'subnet'], {'address': 'address'}), "('test-server', subnet, address=address)\n", (363, 403), False, 'from wireguard import Config, ServerConfig, Peer, Server\n'), ((449, 469), 'wireguard.ServerConfig', 'ServerConfig', (['server'], {}), '(server)\n', (461, 469), False, 'from wireguard import Config, ServerConfig, Peer, Server\n'), ((1434, 1468), 'wireguard.Peer', 'Peer', (['"""test-peer"""'], {'address': 'address'}), "('test-peer', address=address)\n", (1438, 1468), False, 'from wireguard import Config, ServerConfig, Peer, Server\n'), ((1506, 1518), 'wireguard.Config', 'Config', (['peer'], {}), '(peer)\n', (1512, 1518), False, 'from wireguard import Config, ServerConfig, Peer, Server\n'), ((3090, 3136), 'wireguard.Server', 'Server', (['"""test-server"""', 'subnet'], {'address': 'address'}), "('test-server', subnet, address=address)\n", (3096, 3136), False, 'from wireguard import Config, ServerConfig, Peer, Server\n'), ((4105, 4172), 'wireguard.Server', 'Server', (['"""test-server"""', 'subnet'], {'address': 'address', 'interface': 'interface'}), "('test-server', subnet, address=address, interface=interface)\n", (4111, 4172), False, 'from wireguard import Config, ServerConfig, Peer, Server\n'), ((4688, 4722), 'wireguard.Peer', 'Peer', (['"""test-peer"""'], {'address': 'address'}), "('test-peer', address=address)\n", (4692, 4722), False, 'from wireguard import Config, ServerConfig, Peer, Server\n'), ((5395, 5450), 'wireguard.Peer', 'Peer', (['"""test-peer"""'], {'address': 'address', 'interface': 'interface'}), "('test-peer', address=address, interface=interface)\n", (5399, 5450), False, 'from wireguard import Config, ServerConfig, Peer, Server\n'), ((5496, 5508), 'wireguard.Config', 'Config', (['peer'], {}), '(peer)\n', (5502, 5508), False, 'from wireguard import Config, ServerConfig, Peer, Server\n'), ((2428, 2442), 'wireguard.utils.IPAddressSet', 'IPAddressSet', ([], {}), '()\n', (2440, 2442), False, 'from wireguard.utils import IPAddressSet\n'), ((2483, 2508), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2496, 2508), False, 'import pytest\n'), ((2682, 2696), 'wireguard.utils.IPAddressSet', 'IPAddressSet', ([], {}), '()\n', (2694, 2696), False, 'from wireguard.utils import IPAddressSet\n'), ((3201, 3212), 'unittest.mock.mock_open', 'mock_open', ([], {}), '()\n', (3210, 3212), False, 'from unittest.mock import call, mock_open, patch\n'), ((4380, 4391), 'unittest.mock.mock_open', 'mock_open', ([], {}), '()\n', (4389, 4391), False, 'from unittest.mock import call, mock_open, patch\n'), ((4779, 4790), 'unittest.mock.mock_open', 'mock_open', ([], {}), '()\n', (4788, 4790), False, 'from unittest.mock import call, mock_open, patch\n'), ((5590, 5601), 'unittest.mock.mock_open', 'mock_open', ([], {}), '()\n', (5599, 5601), False, 'from unittest.mock import call, mock_open, patch\n'), ((3296, 3355), 'unittest.mock.call', 'call', (['"""/etc/wireguard/wg0.conf"""'], {'mode': '"""w"""', 'encoding': '"""utf-8"""'}), "('/etc/wireguard/wg0.conf', mode='w', encoding='utf-8')\n", (3300, 3355), False, 'from unittest.mock import call, mock_open, patch\n'), ((3369, 3434), 'unittest.mock.call', 'call', (['"""/etc/wireguard/wg0-peers.conf"""'], {'mode': '"""w"""', 'encoding': '"""utf-8"""'}), "('/etc/wireguard/wg0-peers.conf', mode='w', encoding='utf-8')\n", (3373, 3434), False, 'from unittest.mock import call, mock_open, patch\n'), ((4470, 4513), 'unittest.mock.call', 'call', (['full_path'], {'mode': '"""w"""', 'encoding': '"""utf-8"""'}), "(full_path, mode='w', encoding='utf-8')\n", (4474, 4513), False, 'from unittest.mock import call, mock_open, patch\n'), ((4527, 4576), 'unittest.mock.call', 'call', (['peers_full_path'], {'mode': '"""w"""', 'encoding': '"""utf-8"""'}), "(peers_full_path, mode='w', encoding='utf-8')\n", (4531, 4576), False, 'from unittest.mock import call, mock_open, patch\n'), ((4872, 4931), 'unittest.mock.call', 'call', (['"""/etc/wireguard/wg0.conf"""'], {'mode': '"""w"""', 'encoding': '"""utf-8"""'}), "('/etc/wireguard/wg0.conf', mode='w', encoding='utf-8')\n", (4876, 4931), False, 'from unittest.mock import call, mock_open, patch\n'), ((5687, 5730), 'unittest.mock.call', 'call', (['full_path'], {'mode': '"""w"""', 'encoding': '"""utf-8"""'}), "(full_path, mode='w', encoding='utf-8')\n", (5691, 5730), False, 'from unittest.mock import call, mock_open, patch\n')]
|
# Copyright 2010-2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pyhole Search Plugin"""
from BeautifulSoup import BeautifulSoup
from pyhole.core import plugin
from pyhole.core import request
from pyhole.core import utils
class Search(plugin.Plugin):
"""Provide access to search engines."""
@plugin.hook_add_command("urban")
@utils.require_params
@utils.spawn
def urban(self, message, params=None, **kwargs):
"""Search Urban Dictionary (ex: .urban <query>)."""
url = "http://www.urbandictionary.com/define.php"
response = request.get(url, params={"term": params})
if response.status_code != 200:
return
soup = BeautifulSoup(response.content)
try:
meaning = soup.find("div", {"class": "meaning"}).text
example = soup.find("div", {"class": "example"}).text
except AttributeError:
message.dispatch("No results found: '%s'" % params)
meaning = utils.decode_entities(meaning)
example = utils.decode_entities(example)
message.dispatch("%s (ex: %s)" % (meaning, example))
@plugin.hook_add_command("wikipedia")
@utils.require_params
@utils.spawn
def wikipedia(self, message, params=None, **kwargs):
"""Search Wikipedia (ex: .wikipedia <query>)."""
url = "https://en.wikipedia.org/w/api.php"
response = request.get(url, params={
"action": "query",
"generator": "allpages",
"gaplimit": 4,
"gapfrom": params,
"format": "json"
})
if response.status_code != 200:
return
pages = response.json()["query"]["pages"]
for page in pages.values():
title = page["title"]
title = title.replace(" ", "_")
message.dispatch("http://en.wikipedia.org/wiki/%s" % title)
|
[
"BeautifulSoup.BeautifulSoup",
"pyhole.core.utils.decode_entities",
"pyhole.core.request.get",
"pyhole.core.plugin.hook_add_command"
] |
[((839, 871), 'pyhole.core.plugin.hook_add_command', 'plugin.hook_add_command', (['"""urban"""'], {}), "('urban')\n", (862, 871), False, 'from pyhole.core import plugin\n'), ((1662, 1698), 'pyhole.core.plugin.hook_add_command', 'plugin.hook_add_command', (['"""wikipedia"""'], {}), "('wikipedia')\n", (1685, 1698), False, 'from pyhole.core import plugin\n'), ((1105, 1146), 'pyhole.core.request.get', 'request.get', (['url'], {'params': "{'term': params}"}), "(url, params={'term': params})\n", (1116, 1146), False, 'from pyhole.core import request\n'), ((1222, 1253), 'BeautifulSoup.BeautifulSoup', 'BeautifulSoup', (['response.content'], {}), '(response.content)\n', (1235, 1253), False, 'from BeautifulSoup import BeautifulSoup\n'), ((1514, 1544), 'pyhole.core.utils.decode_entities', 'utils.decode_entities', (['meaning'], {}), '(meaning)\n', (1535, 1544), False, 'from pyhole.core import utils\n'), ((1563, 1593), 'pyhole.core.utils.decode_entities', 'utils.decode_entities', (['example'], {}), '(example)\n', (1584, 1593), False, 'from pyhole.core import utils\n'), ((1926, 2051), 'pyhole.core.request.get', 'request.get', (['url'], {'params': "{'action': 'query', 'generator': 'allpages', 'gaplimit': 4, 'gapfrom':\n params, 'format': 'json'}"}), "(url, params={'action': 'query', 'generator': 'allpages',\n 'gaplimit': 4, 'gapfrom': params, 'format': 'json'})\n", (1937, 2051), False, 'from pyhole.core import request\n')]
|
# Generated by Django 2.2 on 2019-05-16 07:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('modelchimp', '0048_auto_20190515_1032'),
]
operations = [
migrations.RemoveField(
model_name='experiment',
name='algorithm',
),
migrations.RemoveField(
model_name='experiment',
name='features',
),
migrations.RemoveField(
model_name='experiment',
name='platform',
),
migrations.RemoveField(
model_name='experiment',
name='platform_library',
),
]
|
[
"django.db.migrations.RemoveField"
] |
[((228, 293), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""experiment"""', 'name': '"""algorithm"""'}), "(model_name='experiment', name='algorithm')\n", (250, 293), False, 'from django.db import migrations\n'), ((338, 402), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""experiment"""', 'name': '"""features"""'}), "(model_name='experiment', name='features')\n", (360, 402), False, 'from django.db import migrations\n'), ((447, 511), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""experiment"""', 'name': '"""platform"""'}), "(model_name='experiment', name='platform')\n", (469, 511), False, 'from django.db import migrations\n'), ((556, 628), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""experiment"""', 'name': '"""platform_library"""'}), "(model_name='experiment', name='platform_library')\n", (578, 628), False, 'from django.db import migrations\n')]
|
# --------------------------------------------------------------------------
#<pycode(py_expr)>
try:
import types
import ctypes
# Callback for IDC func callback (On Windows, we use stdcall)
# typedef error_t idaapi idc_func_t(idc_value_t *argv,idc_value_t *r);
try:
_IDCFUNC_CB_T = ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)
except:
_IDCFUNC_CB_T = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)
# A trampoline function that is called from idcfunc_t that will
# call the Python callback with the argv and r properly serialized to python
call_idc_func__ = ctypes.CFUNCTYPE(ctypes.c_long, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)(_ida_expr.py_get_call_idc_func())
except:
def call_idc_func__(*args):
warning("IDC extensions need ctypes library in order to work")
return 0
_IDCFUNC_CB_T = None
# --------------------------------------------------------------------------
EXTFUN_BASE = 0x0001
"""requires open database"""
EXTFUN_NORET = 0x0002
"""does not return. the interpreter may clean up its state before calling it."""
EXTFUN_SAFE = 0x0004
"""thread safe function. may be called"""
# --------------------------------------------------------------------------
class _IdcFunction(object):
"""
Internal class that calls pyw_call_idc_func() with a context
"""
def __init__(self, ctxptr):
self.ctxptr = ctxptr
# Take a reference to the ctypes callback
# (note: this will create a circular reference)
self.cb = _IDCFUNC_CB_T(self)
fp_ptr = property(lambda self: ctypes.cast(self.cb, ctypes.c_void_p).value)
def __call__(self, args, res):
return call_idc_func__(self.ctxptr, args, res)
# --------------------------------------------------------------------------
# Dictionary to remember IDC function names along with the context pointer
# retrieved by using the internal pyw_register_idc_func()
__IDC_FUNC_CTXS = {}
def del_idc_func(name):
"""
Unregisters the specified IDC function
@param name: IDC function name to unregister
@return: Boolean
"""
global __IDC_FUNC_CTXS
# Get the context
f = __IDC_FUNC_CTXS.get(name, None)
if f is None:
return False # Not registered
# Break circular reference
del f.cb
# Delete the name from the dictionary
del __IDC_FUNC_CTXS[name]
# Delete the context and unregister the function
return _ida_expr.pyw_unregister_idc_func(f.ctxptr)
# --------------------------------------------------------------------------
def add_idc_func(name, fp, args, defvals=None, flags=0):
"""
Extends the IDC language by exposing a new IDC function that is backed up by a Python function
@param name: IDC function name to expose
@param fp: Python callable that will receive the arguments and return a tuple.
@param args: Arguments. A tuple of idaapi.VT_XXX constants
@param flags: IDC function flags. A combination of EXTFUN_XXX constants
@return: Boolean
"""
global __IDC_FUNC_CTXS
# Get the context
f = __IDC_FUNC_CTXS.get(name, None)
# Registering a function that is already registered?
if f is not None:
# Unregister it first
del_idc_func(name)
# Convert the tupple argument info to a string
args = "".join([chr(x) for x in args])
# make sure we don't have an obvious discrepancy between
# the number of args, and the provided default values
if len(defvals) > len(args):
return False
vdefvals = idc_values_t()
if not _ida_expr.pyw_convert_defvals(vdefvals, defvals):
return False
# Create a context
ctxptr = _ida_expr.pyw_register_idc_func(name, args, fp)
if ctxptr == 0:
return False
# Bind the context with the IdcFunc object
f = _IdcFunction(ctxptr)
# Remember the Python context
__IDC_FUNC_CTXS[name] = f
# Register IDC function with a callback
return _ida_expr.py_add_idc_func(
name,
f.fp_ptr,
args,
vdefvals,
flags)
#</pycode(py_expr)>
#<pycode_BC695(py_expr)>
Compile=compile_idc_file
CompileEx=compile_idc_file
CompileLine=compile_idc_text
VT_STR2=VT_STR
VarCopy=copy_idcv
VarDelAttr=del_idcv_attr
VarDeref=deref_idcv
VarFirstAttr=first_idcv_attr
def VarGetAttr(obj, attr, res, may_use_getattr=False):
return get_idcv_attr(res, obj, attr, may_use_getattr)
VarGetClassName=get_idcv_class_name
VarGetSlice=get_idcv_slice
VarInt64=idcv_int64
VarLastAttr=last_idcv_attr
VarMove=move_idcv
VarNextAttr=next_idcv_attr
VarObject=idcv_object
VarPrevAttr=prev_idcv_attr
VarPrint=print_idcv
VarRef=create_idcv_ref
VarSetAttr=set_idcv_attr
VarSetSlice=set_idcv_slice
VarString2=idcv_string
VarSwap=swap_idcvs
def calc_idc_expr(where, expr, res):
return eval_idc_expr(res, where, expr)
def calcexpr(where, expr, res):
return eval_expr(res, where, expr)
def dosysfile(complain_if_no_file, fname):
return exec_system_script(fname, complain_if_no_file)
def execute(line):
return eval_idc_snippet(None, line)
py_set_idc_func_ex=py_add_idc_func
def set_idc_func_ex(name, fp=None, args=(), flags=0):
return add_idc_func(name, fp, args, (), flags)
#</pycode_BC695(py_expr)>
|
[
"ctypes.CFUNCTYPE",
"ctypes.cast",
"ctypes.WINFUNCTYPE"
] |
[((310, 376), 'ctypes.WINFUNCTYPE', 'ctypes.WINFUNCTYPE', (['ctypes.c_int', 'ctypes.c_void_p', 'ctypes.c_void_p'], {}), '(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)\n', (328, 376), False, 'import ctypes\n'), ((650, 737), 'ctypes.CFUNCTYPE', 'ctypes.CFUNCTYPE', (['ctypes.c_long', 'ctypes.c_void_p', 'ctypes.c_void_p', 'ctypes.c_void_p'], {}), '(ctypes.c_long, ctypes.c_void_p, ctypes.c_void_p, ctypes.\n c_void_p)\n', (666, 737), False, 'import ctypes\n'), ((413, 477), 'ctypes.CFUNCTYPE', 'ctypes.CFUNCTYPE', (['ctypes.c_int', 'ctypes.c_void_p', 'ctypes.c_void_p'], {}), '(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)\n', (429, 477), False, 'import ctypes\n'), ((1644, 1681), 'ctypes.cast', 'ctypes.cast', (['self.cb', 'ctypes.c_void_p'], {}), '(self.cb, ctypes.c_void_p)\n', (1655, 1681), False, 'import ctypes\n')]
|
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_artifacts.forms import ArtifactCreatorForm
from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype
from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor
class ArtifactCreatorFormTestCase(TestCase):
"""artifact creator form tests"""
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(
username='testuser_artifact_creator', password='<PASSWORD>'
)
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create object
System.objects.create(
system_name='system_1',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
System.objects.create(
system_name='system_2',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
# create object
tagcolor_1 = Tagcolor.objects.create(tagcolor_name='tagcolor_1')
# create object
Tag.objects.create(
tag_name='tag_1',
tagcolor=tagcolor_1,
)
Tag.objects.create(
tag_name='tag_2',
tagcolor=tagcolor_1,
)
# create object
Artifactpriority.objects.create(artifactpriority_name='prio_1')
# create object
Artifactstatus.objects.create(artifactstatus_name='artifactstatus_1')
# create object
Artifacttype.objects.create(artifacttype_name='artifacttype_1')
Artifacttype.objects.create(artifacttype_name='artifacttype_2')
def test_artifact_creator_artifactpriority_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifactpriority'].label, 'Artifactpriority (*)')
def test_artifact_creator_artifactstatus_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifactstatus'].label, 'Artifactstatus (*)')
def test_artifact_creator_artifacttype_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['artifacttype'].label,
'Artifacttypes (*) - Will also be set as artifact names',
)
def test_artifact_creator_system_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['system'].label, 'Systems (*)')
def test_artifact_creator_tag_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['tag'].label, 'Tags')
def test_artifact_creator_analysisresult_note_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['artifact_note_analysisresult'].label, 'Analysis result'
)
def test_artifact_creator_external_note_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifact_note_external'].label, 'External note')
def test_artifact_creator_internal_note_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifact_note_internal'].label, 'Internal note')
def test_artifact_creator_name_choice_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['alternative_artifact_name_choice'].label,
'Use alternative artifact name',
)
def test_artifact_creator_name_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['alternative_artifact_name'].label, 'Alternative artifact name'
)
def test_artifact_creator_source_path_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['artifact_source_path'].label,
'Artifact source path (attention: will be set for all artifacts regardless of type)',
)
def test_artifact_creator_form_empty(self):
"""test minimum form requirements / INVALID"""
# get object
form = ArtifactCreatorForm(data={})
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_artifacttype_form_filled(self):
"""test minimum form requirements / INVALID"""
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
form = ArtifactCreatorForm(
data={
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
}
)
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_artifactpriority_form_filled(self):
"""test minimum form requirements / INVALID"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
}
)
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_artifactstatus_form_filled(self):
"""test minimum form requirements / INVALID"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
}
)
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_system_form_filled(self):
"""test minimum form requirements / VALID"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
system_2_id = System.objects.get(system_name='system_2').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
'system': [
system_1_id,
system_2_id,
],
}
)
# compare
self.assertTrue(form.is_valid())
def test_artifact_creator_all_fields_form_filled(self):
"""test additional form content"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
system_2_id = System.objects.get(system_name='system_2').system_id
# get object
tag_1_id = Tag.objects.get(tag_name='tag_1').tag_id
tag_2_id = Tag.objects.get(tag_name='tag_2').tag_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
'system': [
system_1_id,
system_2_id,
],
'tag': [
tag_1_id,
tag_2_id,
],
'artifact_note_analysisresult': 'lorem ipsum',
'artifact_note_external': 'lorem ipsum',
'artifact_note_internal': 'lorem ipsum',
'artifact_source_path': 'evil.exe',
}
)
# compare
self.assertTrue(form.is_valid())
def test_artifact_creator_alternative_name_form_filled(self):
"""test custom field validation"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# create object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
],
'system': [
system_1_id,
],
'alternative_artifact_name': 'alternative name',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['alternative_artifact_name'],
['Either both or neither of the fields is required.'],
)
def test_artifact_creator_alternative_choice_form_filled(self):
"""test custom field validation"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# create object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
],
'system': [
system_1_id,
],
'alternative_artifact_name_choice': True,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['alternative_artifact_name'],
['Either both or neither of the fields is required.'],
)
def test_artifact_creator_alternative_both_form_filled(self):
"""test custom field validation"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# create object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
],
'system': [
system_1_id,
],
'alternative_artifact_name_choice': True,
'alternative_artifact_name': 'alternative name',
}
)
# compare
self.assertTrue(form.is_valid())
|
[
"dfirtrack_main.models.Tagcolor.objects.create",
"dfirtrack_artifacts.models.Artifacttype.objects.get",
"dfirtrack_main.models.Tag.objects.create",
"dfirtrack_main.models.Systemstatus.objects.create",
"dfirtrack_artifacts.models.Artifactpriority.objects.create",
"django.contrib.auth.models.User.objects.create_user",
"dfirtrack_artifacts.models.Artifactpriority.objects.get",
"dfirtrack_artifacts.forms.ArtifactCreatorForm",
"dfirtrack_artifacts.models.Artifactstatus.objects.create",
"dfirtrack_main.models.System.objects.create",
"dfirtrack_artifacts.models.Artifacttype.objects.create",
"dfirtrack_main.models.System.objects.get",
"dfirtrack_artifacts.models.Artifactstatus.objects.get",
"dfirtrack_main.models.Tag.objects.get"
] |
[((466, 556), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""testuser_artifact_creator"""', 'password': '"""<PASSWORD>"""'}), "(username='testuser_artifact_creator', password=\n '<PASSWORD>')\n", (490, 556), False, 'from django.contrib.auth.models import User\n'), ((624, 687), 'dfirtrack_main.models.Systemstatus.objects.create', 'Systemstatus.objects.create', ([], {'systemstatus_name': '"""systemstatus_1"""'}), "(systemstatus_name='systemstatus_1')\n", (651, 687), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((721, 874), 'dfirtrack_main.models.System.objects.create', 'System.objects.create', ([], {'system_name': '"""system_1"""', 'systemstatus': 'systemstatus_1', 'system_created_by_user_id': 'test_user', 'system_modified_by_user_id': 'test_user'}), "(system_name='system_1', systemstatus=systemstatus_1,\n system_created_by_user_id=test_user, system_modified_by_user_id=test_user)\n", (742, 874), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((938, 1091), 'dfirtrack_main.models.System.objects.create', 'System.objects.create', ([], {'system_name': '"""system_2"""', 'systemstatus': 'systemstatus_1', 'system_created_by_user_id': 'test_user', 'system_modified_by_user_id': 'test_user'}), "(system_name='system_2', systemstatus=systemstatus_1,\n system_created_by_user_id=test_user, system_modified_by_user_id=test_user)\n", (959, 1091), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((1193, 1244), 'dfirtrack_main.models.Tagcolor.objects.create', 'Tagcolor.objects.create', ([], {'tagcolor_name': '"""tagcolor_1"""'}), "(tagcolor_name='tagcolor_1')\n", (1216, 1244), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((1278, 1335), 'dfirtrack_main.models.Tag.objects.create', 'Tag.objects.create', ([], {'tag_name': '"""tag_1"""', 'tagcolor': 'tagcolor_1'}), "(tag_name='tag_1', tagcolor=tagcolor_1)\n", (1296, 1335), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((1379, 1436), 'dfirtrack_main.models.Tag.objects.create', 'Tag.objects.create', ([], {'tag_name': '"""tag_2"""', 'tagcolor': 'tagcolor_1'}), "(tag_name='tag_2', tagcolor=tagcolor_1)\n", (1397, 1436), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((1505, 1568), 'dfirtrack_artifacts.models.Artifactpriority.objects.create', 'Artifactpriority.objects.create', ([], {'artifactpriority_name': '"""prio_1"""'}), "(artifactpriority_name='prio_1')\n", (1536, 1568), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((1602, 1671), 'dfirtrack_artifacts.models.Artifactstatus.objects.create', 'Artifactstatus.objects.create', ([], {'artifactstatus_name': '"""artifactstatus_1"""'}), "(artifactstatus_name='artifactstatus_1')\n", (1631, 1671), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((1705, 1768), 'dfirtrack_artifacts.models.Artifacttype.objects.create', 'Artifacttype.objects.create', ([], {'artifacttype_name': '"""artifacttype_1"""'}), "(artifacttype_name='artifacttype_1')\n", (1732, 1768), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((1777, 1840), 'dfirtrack_artifacts.models.Artifacttype.objects.create', 'Artifacttype.objects.create', ([], {'artifacttype_name': '"""artifacttype_2"""'}), "(artifacttype_name='artifacttype_2')\n", (1804, 1840), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((1974, 1995), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (1993, 1995), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((2233, 2254), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (2252, 2254), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((2486, 2507), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (2505, 2507), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((2802, 2823), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (2821, 2823), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((3031, 3052), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (3050, 3052), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((3266, 3287), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (3285, 3287), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((3553, 3574), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (3572, 3574), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((3810, 3831), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (3829, 3831), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((4065, 4086), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (4084, 4086), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((4374, 4395), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (4393, 4395), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((4666, 4687), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {}), '()\n', (4685, 4687), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((5036, 5064), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {'data': '{}'}), '(data={})\n', (5055, 5064), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((5555, 5641), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {'data': "{'artifacttype': [artifacttype_1_id, artifacttype_2_id]}"}), "(data={'artifacttype': [artifacttype_1_id,\n artifacttype_2_id]})\n", (5574, 5641), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((6398, 6525), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {'data': "{'artifactpriority': artifactpriority_id, 'artifacttype': [\n artifacttype_1_id, artifacttype_2_id]}"}), "(data={'artifactpriority': artifactpriority_id,\n 'artifacttype': [artifacttype_1_id, artifacttype_2_id]})\n", (6417, 6525), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((7452, 7620), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {'data': "{'artifactpriority': artifactpriority_id, 'artifactstatus':\n artifactstatus_id, 'artifacttype': [artifacttype_1_id, artifacttype_2_id]}"}), "(data={'artifactpriority': artifactpriority_id,\n 'artifactstatus': artifactstatus_id, 'artifacttype': [artifacttype_1_id,\n artifacttype_2_id]})\n", (7471, 7620), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((8720, 8926), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {'data': "{'artifactpriority': artifactpriority_id, 'artifactstatus':\n artifactstatus_id, 'artifacttype': [artifacttype_1_id,\n artifacttype_2_id], 'system': [system_1_id, system_2_id]}"}), "(data={'artifactpriority': artifactpriority_id,\n 'artifactstatus': artifactstatus_id, 'artifacttype': [artifacttype_1_id,\n artifacttype_2_id], 'system': [system_1_id, system_2_id]})\n", (8739, 8926), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((10235, 10648), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {'data': "{'artifactpriority': artifactpriority_id, 'artifactstatus':\n artifactstatus_id, 'artifacttype': [artifacttype_1_id,\n artifacttype_2_id], 'system': [system_1_id, system_2_id], 'tag': [\n tag_1_id, tag_2_id], 'artifact_note_analysisresult': 'lorem ipsum',\n 'artifact_note_external': 'lorem ipsum', 'artifact_note_internal':\n 'lorem ipsum', 'artifact_source_path': 'evil.exe'}"}), "(data={'artifactpriority': artifactpriority_id,\n 'artifactstatus': artifactstatus_id, 'artifacttype': [artifacttype_1_id,\n artifacttype_2_id], 'system': [system_1_id, system_2_id], 'tag': [\n tag_1_id, tag_2_id], 'artifact_note_analysisresult': 'lorem ipsum',\n 'artifact_note_external': 'lorem ipsum', 'artifact_note_internal':\n 'lorem ipsum', 'artifact_source_path': 'evil.exe'})\n", (10254, 10648), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((11749, 11977), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {'data': "{'artifactpriority': artifactpriority_id, 'artifactstatus':\n artifactstatus_id, 'artifacttype': [artifacttype_1_id], 'system': [\n system_1_id], 'alternative_artifact_name': 'alternative name'}"}), "(data={'artifactpriority': artifactpriority_id,\n 'artifactstatus': artifactstatus_id, 'artifacttype': [artifacttype_1_id\n ], 'system': [system_1_id], 'alternative_artifact_name':\n 'alternative name'})\n", (11768, 11977), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((13083, 13300), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {'data': "{'artifactpriority': artifactpriority_id, 'artifactstatus':\n artifactstatus_id, 'artifacttype': [artifacttype_1_id], 'system': [\n system_1_id], 'alternative_artifact_name_choice': True}"}), "(data={'artifactpriority': artifactpriority_id,\n 'artifactstatus': artifactstatus_id, 'artifacttype': [artifacttype_1_id\n ], 'system': [system_1_id], 'alternative_artifact_name_choice': True})\n", (13102, 13300), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((14408, 14678), 'dfirtrack_artifacts.forms.ArtifactCreatorForm', 'ArtifactCreatorForm', ([], {'data': "{'artifactpriority': artifactpriority_id, 'artifactstatus':\n artifactstatus_id, 'artifacttype': [artifacttype_1_id], 'system': [\n system_1_id], 'alternative_artifact_name_choice': True,\n 'alternative_artifact_name': 'alternative name'}"}), "(data={'artifactpriority': artifactpriority_id,\n 'artifactstatus': artifactstatus_id, 'artifacttype': [artifacttype_1_id\n ], 'system': [system_1_id], 'alternative_artifact_name_choice': True,\n 'alternative_artifact_name': 'alternative name'})\n", (14427, 14678), False, 'from dfirtrack_artifacts.forms import ArtifactCreatorForm\n'), ((5293, 5353), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_1"""'}), "(artifacttype_name='artifacttype_1')\n", (5317, 5353), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((5420, 5480), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_2"""'}), "(artifacttype_name='artifacttype_2')\n", (5444, 5480), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((5984, 6044), 'dfirtrack_artifacts.models.Artifactpriority.objects.get', 'Artifactpriority.objects.get', ([], {'artifactpriority_name': '"""prio_1"""'}), "(artifactpriority_name='prio_1')\n", (6012, 6044), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((6136, 6196), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_1"""'}), "(artifacttype_name='artifacttype_1')\n", (6160, 6196), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((6263, 6323), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_2"""'}), "(artifacttype_name='artifacttype_2')\n", (6287, 6323), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((6882, 6942), 'dfirtrack_artifacts.models.Artifactpriority.objects.get', 'Artifactpriority.objects.get', ([], {'artifactpriority_name': '"""prio_1"""'}), "(artifactpriority_name='prio_1')\n", (6910, 6942), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((7034, 7100), 'dfirtrack_artifacts.models.Artifactstatus.objects.get', 'Artifactstatus.objects.get', ([], {'artifactstatus_name': '"""artifactstatus_1"""'}), "(artifactstatus_name='artifactstatus_1')\n", (7060, 7100), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((7190, 7250), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_1"""'}), "(artifacttype_name='artifacttype_1')\n", (7214, 7250), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((7317, 7377), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_2"""'}), "(artifacttype_name='artifacttype_2')\n", (7341, 7377), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((7979, 8039), 'dfirtrack_artifacts.models.Artifactpriority.objects.get', 'Artifactpriority.objects.get', ([], {'artifactpriority_name': '"""prio_1"""'}), "(artifactpriority_name='prio_1')\n", (8007, 8039), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((8131, 8197), 'dfirtrack_artifacts.models.Artifactstatus.objects.get', 'Artifactstatus.objects.get', ([], {'artifactstatus_name': '"""artifactstatus_1"""'}), "(artifactstatus_name='artifactstatus_1')\n", (8157, 8197), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((8287, 8347), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_1"""'}), "(artifacttype_name='artifacttype_1')\n", (8311, 8347), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((8414, 8474), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_2"""'}), "(artifacttype_name='artifacttype_2')\n", (8438, 8474), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((8556, 8598), 'dfirtrack_main.models.System.objects.get', 'System.objects.get', ([], {'system_name': '"""system_1"""'}), "(system_name='system_1')\n", (8574, 8598), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((8631, 8673), 'dfirtrack_main.models.System.objects.get', 'System.objects.get', ([], {'system_name': '"""system_2"""'}), "(system_name='system_2')\n", (8649, 8673), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((9353, 9413), 'dfirtrack_artifacts.models.Artifactpriority.objects.get', 'Artifactpriority.objects.get', ([], {'artifactpriority_name': '"""prio_1"""'}), "(artifactpriority_name='prio_1')\n", (9381, 9413), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((9505, 9571), 'dfirtrack_artifacts.models.Artifactstatus.objects.get', 'Artifactstatus.objects.get', ([], {'artifactstatus_name': '"""artifactstatus_1"""'}), "(artifactstatus_name='artifactstatus_1')\n", (9531, 9571), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((9661, 9721), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_1"""'}), "(artifacttype_name='artifacttype_1')\n", (9685, 9721), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((9788, 9848), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_2"""'}), "(artifacttype_name='artifacttype_2')\n", (9812, 9848), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((9930, 9972), 'dfirtrack_main.models.System.objects.get', 'System.objects.get', ([], {'system_name': '"""system_1"""'}), "(system_name='system_1')\n", (9948, 9972), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((10005, 10047), 'dfirtrack_main.models.System.objects.get', 'System.objects.get', ([], {'system_name': '"""system_2"""'}), "(system_name='system_2')\n", (10023, 10047), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((10098, 10131), 'dfirtrack_main.models.Tag.objects.get', 'Tag.objects.get', ([], {'tag_name': '"""tag_1"""'}), "(tag_name='tag_1')\n", (10113, 10131), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((10158, 10191), 'dfirtrack_main.models.Tag.objects.get', 'Tag.objects.get', ([], {'tag_name': '"""tag_2"""'}), "(tag_name='tag_2')\n", (10173, 10191), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((11207, 11267), 'dfirtrack_artifacts.models.Artifactpriority.objects.get', 'Artifactpriority.objects.get', ([], {'artifactpriority_name': '"""prio_1"""'}), "(artifactpriority_name='prio_1')\n", (11235, 11267), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((11359, 11425), 'dfirtrack_artifacts.models.Artifactstatus.objects.get', 'Artifactstatus.objects.get', ([], {'artifactstatus_name': '"""artifactstatus_1"""'}), "(artifactstatus_name='artifactstatus_1')\n", (11385, 11425), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((11518, 11578), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_1"""'}), "(artifacttype_name='artifacttype_1')\n", (11542, 11578), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((11660, 11702), 'dfirtrack_main.models.System.objects.get', 'System.objects.get', ([], {'system_name': '"""system_1"""'}), "(system_name='system_1')\n", (11678, 11702), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((12541, 12601), 'dfirtrack_artifacts.models.Artifactpriority.objects.get', 'Artifactpriority.objects.get', ([], {'artifactpriority_name': '"""prio_1"""'}), "(artifactpriority_name='prio_1')\n", (12569, 12601), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((12693, 12759), 'dfirtrack_artifacts.models.Artifactstatus.objects.get', 'Artifactstatus.objects.get', ([], {'artifactstatus_name': '"""artifactstatus_1"""'}), "(artifactstatus_name='artifactstatus_1')\n", (12719, 12759), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((12852, 12912), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_1"""'}), "(artifacttype_name='artifacttype_1')\n", (12876, 12912), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((12994, 13036), 'dfirtrack_main.models.System.objects.get', 'System.objects.get', ([], {'system_name': '"""system_1"""'}), "(system_name='system_1')\n", (13012, 13036), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n'), ((13866, 13926), 'dfirtrack_artifacts.models.Artifactpriority.objects.get', 'Artifactpriority.objects.get', ([], {'artifactpriority_name': '"""prio_1"""'}), "(artifactpriority_name='prio_1')\n", (13894, 13926), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((14018, 14084), 'dfirtrack_artifacts.models.Artifactstatus.objects.get', 'Artifactstatus.objects.get', ([], {'artifactstatus_name': '"""artifactstatus_1"""'}), "(artifactstatus_name='artifactstatus_1')\n", (14044, 14084), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((14177, 14237), 'dfirtrack_artifacts.models.Artifacttype.objects.get', 'Artifacttype.objects.get', ([], {'artifacttype_name': '"""artifacttype_1"""'}), "(artifacttype_name='artifacttype_1')\n", (14201, 14237), False, 'from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype\n'), ((14319, 14361), 'dfirtrack_main.models.System.objects.get', 'System.objects.get', ([], {'system_name': '"""system_1"""'}), "(system_name='system_1')\n", (14337, 14361), False, 'from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor\n')]
|
import numpy as np
import sympy
from lark import Transformer, Tree
from sortedcontainers import SortedList
from TS.State import Vector
class Rate:
def __init__(self, expression):
self.expression = expression
def __eq__(self, other):
return self.expression == other.expression
def __repr__(self):
return str(self)
def __str__(self):
return self.expression if type(self.expression) == str else "".join(tree_to_string(self.expression))
def __hash__(self):
return hash(str(self))
def vectorize(self, ordering: SortedList, definitions: dict) -> list:
"""
Converts all occurrences of Complexes (resp. sub trees named agent)
with its vector representation. These are directly replaced within
the tree expression.
Moreover, in the process parameters are replaces with their values
(if given).
:param ordering: given SortedList of Complexes
:param definitions: dict of (param_name, value)
:return: list of transformed States (just for testing)
"""
vec = Vectorizer(ordering, definitions)
self.expression = vec.transform(self.expression)
return vec.visited
def evaluate(self, state) -> float:
"""
Evaluates all occurrences of States to a float using Evaluater.
It is done as intersection of particular state with given state
and sum of resulting elements.
If the result is nan, None is returned instead.
:param state: given state
:return: Sympy object for expression representation
"""
evaluater = Evaluater(state)
result = evaluater.transform(self.expression)
try:
value = sympy.sympify("".join(tree_to_string(result)), locals=evaluater.locals)
if value == sympy.nan:
return None
return value
except TypeError:
return None
def to_symbolic(self):
"""
Translates rate from vector representation to symbolic one
as a sum of particular components.
e.g. [1, 0, 1] -> (x_0 + x_2)
"""
transformer = SymbolicAgents()
self.expression = transformer.transform(self.expression)
def reduce_context(self) -> 'Rate':
"""
Reduces context of all Complexes to minimum.
:return: new Rate with reduced context
"""
transformer = ContextReducer()
expression = transformer.transform(self.expression)
return Rate(expression)
def get_params_and_agents(self):
"""
Extracts all agents (Complex objects) and params (strings) used in the rate expression.
:return: set of agents and params
"""
transformer = Extractor()
transformer.transform(self.expression)
return transformer.agents, transformer.params
def evaluate_direct(self, values, params) -> float:
"""
Evaluates
If the result is nan, None is returned instead.
:param values: given mapping complex -> count
:return: Sympy object for expression representation
"""
evaluater = DirectEvaluater(values, params)
result = evaluater.transform(self.expression)
try:
value = sympy.sympify("".join(tree_to_string(result)))
if value == sympy.nan:
return None
return value
except TypeError:
return None
# Transformers for Tree
class ContextReducer(Transformer):
def agent(self, matches):
return Tree("agent", [matches[0].reduce_context()])
class SymbolicAgents(Transformer):
def agent(self, vector):
vector = "(" + vector[0].to_ODE_string() + ")"
return Tree("agent", [vector])
class Vectorizer(Transformer):
def __init__(self, ordering, definitions):
super(Transformer, self).__init__()
self.ordering = ordering
self.definitions = definitions
self.visited = []
def agent(self, complex):
complex = complex[0]
result = np.zeros(len(self.ordering))
for i in range(len(self.ordering)):
if complex.compatible(self.ordering[i]):
result[i] = 1
result = Vector(result)
self.visited.append(result)
return Tree("agent", [result])
def rate_agent(self, matches):
return matches[1]
def param(self, matches):
return self.definitions.get(str(matches[0]), Tree("param", matches))
class Evaluater(Transformer):
def __init__(self, state):
super(Transformer, self).__init__()
self.state = state
self.locals = dict()
def agent(self, state):
return sum(self.state.content * state[0])
def param(self, matches):
name = matches[0]
self.locals[name] = sympy.Symbol(name)
return name
class DirectEvaluater(Transformer):
def __init__(self, values, params):
super(Transformer, self).__init__()
self.values = values
self.params = params
def rate_agent(self, matches):
return Tree('fun', [matches[1]])
def agent(self, matches):
return self.values.get(matches[0], 0)
def param(self, matches):
par = self.params.get(str(matches[0]), str(matches[0]))
return Tree('fun', [par])
class Extractor(Transformer):
def __init__(self):
super(Extractor, self).__init__()
self.agents = set()
self.params = set()
def agent(self, matches):
self.agents.add(matches[0])
return Tree("agent", matches)
def param(self, matches):
self.params.add(matches[0])
return Tree("param", matches)
def tree_to_string(tree):
if type(tree) == Tree:
return sum(list(map(tree_to_string, tree.children)), [])
else:
return [str(tree)]
|
[
"sympy.Symbol",
"TS.State.Vector",
"lark.Tree"
] |
[((3779, 3802), 'lark.Tree', 'Tree', (['"""agent"""', '[vector]'], {}), "('agent', [vector])\n", (3783, 3802), False, 'from lark import Transformer, Tree\n'), ((4276, 4290), 'TS.State.Vector', 'Vector', (['result'], {}), '(result)\n', (4282, 4290), False, 'from TS.State import Vector\n'), ((4342, 4365), 'lark.Tree', 'Tree', (['"""agent"""', '[result]'], {}), "('agent', [result])\n", (4346, 4365), False, 'from lark import Transformer, Tree\n'), ((4863, 4881), 'sympy.Symbol', 'sympy.Symbol', (['name'], {}), '(name)\n', (4875, 4881), False, 'import sympy\n'), ((5133, 5158), 'lark.Tree', 'Tree', (['"""fun"""', '[matches[1]]'], {}), "('fun', [matches[1]])\n", (5137, 5158), False, 'from lark import Transformer, Tree\n'), ((5346, 5364), 'lark.Tree', 'Tree', (['"""fun"""', '[par]'], {}), "('fun', [par])\n", (5350, 5364), False, 'from lark import Transformer, Tree\n'), ((5601, 5623), 'lark.Tree', 'Tree', (['"""agent"""', 'matches'], {}), "('agent', matches)\n", (5605, 5623), False, 'from lark import Transformer, Tree\n'), ((5706, 5728), 'lark.Tree', 'Tree', (['"""param"""', 'matches'], {}), "('param', matches)\n", (5710, 5728), False, 'from lark import Transformer, Tree\n'), ((4512, 4534), 'lark.Tree', 'Tree', (['"""param"""', 'matches'], {}), "('param', matches)\n", (4516, 4534), False, 'from lark import Transformer, Tree\n')]
|
import sys, time, os, json
import numpy as np
import matplotlib.pylab as plt
from PIL import Image
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from google.colab import drive
def Unet(img_shape):
def conv2d(x, filters):
x = Conv2D(filters, 4, strides=2, padding='same')(x)
x = LeakyReLU(0.2)(x)
x = InstanceNormalization()(x)
return x
def deconv2d(x, contracting_path, filters, drop_rate=0):
x = UpSampling2D(2)(x)
x = Conv2D(filters, 4, padding='same', activation='relu')(x)
if drop_rate:
x = Dropout(drop_rate)(x)
x = InstanceNormalization()(x)
return Concatenate()([x, contracting_path])
img = Input(img_shape)
#エンコーダー
c1 = conv2d(img, 32)
c2 = conv2d(c1, 64)
c3 = conv2d(c2, 128)
#中間層
x = conv2d(c3, 256)
#デコーダー
x = deconv2d(x, c3, 128)
x = deconv2d(x, c2, 64)
x = deconv2d(x, c1, 32)
#元サイズ出力
x = UpSampling2D(2)(x)
x = Conv2D(img_shape[-1], 4, padding='same', activation='tanh')(x)
return Model(img, x)
def Discriminator(img_shape):
def d_layer(x, filters, bn=True):
x = Conv2D(filters, 4, strides=2, padding='same')(x)
x = LeakyReLU(0.2)(x)
if bn:
x = InstanceNormalization()(x)
return x
img = Input(img_shape)
#PatchGANのサイズまで畳み込み
x = d_layer(img, 64, False)
x = d_layer(x, 128)
x = d_layer(x, 256)
x = d_layer(x, 512)
#0〜1ラベル出力
x = Conv2D(1, 4, padding='same')(x)
return Model(img, x)
def CycleGAN(gen_AB, gen_BA, disc_A, disc_B, img_shape):
img_A = Input(img_shape)
img_B = Input(img_shape)
fake_B = gen_AB(img_A)
fake_A = gen_BA(img_B)
reconstr_A = gen_BA(fake_B)
reconstr_B = gen_AB(fake_A)
img_A_id = gen_BA(img_A)
img_B_id = gen_AB(img_B)
valid_A = disc_A(fake_A)
valid_B = disc_B(fake_B)
return Model([img_A, img_B],
[valid_A, valid_B, reconstr_A, reconstr_B, img_A_id, img_B_id])
def load_datasets(path, train_num, img_shape):
return np.memmap(path, dtype=np.uint8, mode="r", shape=(train_num,)+img_shape)
def get_json(json_name, init_func):
if os.path.isfile(json_name):
with open(json_name) as f:
return json.load(f)
else:
return init_func()
def train():
#ドライブをマウントしてフォルダ作成
drive_root = '/content/drive'
drive.mount(drive_root)
datasets_dir = "%s/My Drive/datasets"%drive_root
train_dir = "%s/My Drive/train/cycle128"%drive_root
imgs_dir = "%s/imgs"%train_dir
os.makedirs(imgs_dir, exist_ok=True)
#教師データ
train_num = 30000
test_num = 6000
img_size = 128
data_num = train_num + test_num
img_shape = (img_size,img_size,3)
train_A = load_datasets("%s/color%d_%d.npy"%(datasets_dir,img_size,data_num), data_num, img_shape)
train_B = load_datasets("%s/gray%d_%d.npy"%(datasets_dir,img_size,data_num), data_num, (img_size,img_size))
#訓練回数
epochs = 200
batch_size = 100
batch_num = train_num // batch_size
#前回までの訓練情報
info_path = "%s/info.json"%train_dir
info = get_json(info_path, lambda: {"epoch":0})
last_epoch = info["epoch"]
#PatchGAN
patch_shape = (img_size//16, img_size//16, 1)
real = np.ones((batch_size,) + patch_shape)
fake = np.zeros((batch_size,) + patch_shape)
#モデル
lambda_cycle = 10.0
lambda_id = 0.1 * lambda_cycle
opt = Adam(0.0002, 0.5)
gen_AB_path = "%s/gen_AB.h5"%train_dir
gen_BA_path = "%s/gen_BA.h5"%train_dir
disc_A_path = "%s/disc_A.h5"%train_dir
disc_B_path = "%s/disc_B.h5"%train_dir
if os.path.isfile(disc_B_path):
gen_AB = load_model(gen_AB_path, custom_objects={'InstanceNormalization': InstanceNormalization})
gen_BA = load_model(gen_BA_path, custom_objects={'InstanceNormalization': InstanceNormalization})
disc_A = load_model(disc_A_path, custom_objects={'InstanceNormalization': InstanceNormalization})
disc_B = load_model(disc_B_path, custom_objects={'InstanceNormalization': InstanceNormalization})
print_img(last_epoch, gen_BA, train_A, train_B, 0, train_num, "train", img_size)
print_img(last_epoch, gen_BA, train_A, train_B, train_num, test_num, "test", img_size)
else:
gen_AB = Unet(img_shape)
gen_BA = Unet(img_shape)
disc_A = Discriminator(img_shape)
disc_B = Discriminator(img_shape)
disc_A.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
disc_B.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
disc_A.trainable = False
disc_B.trainable = False
cycle_gan = CycleGAN(gen_AB, gen_BA, disc_A, disc_B, img_shape)
cycle_gan.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],
loss_weights=[1, 1, lambda_cycle, lambda_cycle, lambda_id, lambda_id], optimizer=opt)
#エポック
for e in range(last_epoch, epochs):
start = time.time()
#ミニバッチ
for i in range(batch_num):
#バッチ範囲をランダム選択
idx = np.random.choice(train_num, batch_size, replace=False)
imgs_A = train_A[idx].astype(np.float32) / 255
idx = np.random.choice(train_num, batch_size, replace=False)
imgs_B = convert_rgb(train_B[idx]).astype(np.float32) / 255
#識別訓練
fake_B = gen_AB.predict(imgs_A)
fake_A = gen_BA.predict(imgs_B)
d_loss_real = disc_A.train_on_batch(imgs_A, real)
d_loss_fake = disc_A.train_on_batch(fake_A, fake)
d_loss_A = np.add(d_loss_real, d_loss_fake) * 0.5
d_loss_real = disc_B.train_on_batch(imgs_B, real)
d_loss_fake = disc_B.train_on_batch(fake_B, fake)
d_loss_B = np.add(d_loss_real, d_loss_fake) * 0.5
d_loss = np.add(d_loss_A, d_loss_B) * 0.5
#生成訓練
g_loss = cycle_gan.train_on_batch([imgs_A, imgs_B],
[real, real, imgs_A, imgs_B, imgs_A, imgs_B])
#ログ
print("\repoch:%d/%d batch:%d/%d %ds d_loss:%s g_loss:%s" %
(e+1,epochs, (i+1),batch_num, (time.time()-start), d_loss[0], g_loss[0]), end="")
sys.stdout.flush()
print()
#画像生成テスト
if (e+1) % 10 == 0 or e == 0:
print_img(e+1, gen_BA, train_A, train_B, 0, train_num, "train", img_size)
print_img(e+1, gen_BA, train_A, train_B, train_num, test_num, "test", img_size)
#重みの保存
gen_AB.save(gen_AB_path)
gen_BA.save(gen_BA_path)
disc_A.save(disc_A_path)
disc_B.save(disc_B_path)
info["epoch"] += 1
with open(info_path, "w") as f:
json.dump(info, f)
def convert_rgb(train_B):
return np.array([np.asarray(Image.fromarray(x).convert("RGB")) for x in train_B])
def print_img(epoch, gen, train_A, train_B, offset, limit, title, img_size):
#データをランダム選択
num = 10
idx = np.random.choice(limit, num, replace=False) + offset
imgs_A = train_A[idx]
imgs_B = convert_rgb(train_B[idx])
#生成してみる
fake_A = gen.predict(imgs_B.astype(np.float32) / 255)
fake_A = (fake_A * 255).clip(0).astype(np.uint8)
#繋げる
imgs_A = np.concatenate(imgs_A, axis=1)
imgs_B = np.concatenate(imgs_B, axis=1)
fake_A = np.concatenate(fake_A, axis=1)
imgs = np.concatenate((imgs_B,imgs_A,fake_A), axis=0)
#プロット
plt.figure(figsize=(20, 6))
plt.title(title)
plt.imshow(imgs)
plt.axis('off')
plt.show()
#保存
Image.fromarray(imgs).save("%s/cycle%d_%d_%s.png"%(imgs_dir,img_size,epoch,title))
#実行
train()
|
[
"matplotlib.pylab.imshow",
"numpy.ones",
"matplotlib.pylab.axis",
"os.path.isfile",
"sys.stdout.flush",
"matplotlib.pylab.title",
"matplotlib.pylab.show",
"matplotlib.pylab.figure",
"numpy.random.choice",
"numpy.add",
"json.dump",
"google.colab.drive.mount",
"numpy.memmap",
"numpy.concatenate",
"json.load",
"os.makedirs",
"numpy.zeros",
"time.time",
"keras_contrib.layers.normalization.instancenormalization.InstanceNormalization",
"PIL.Image.fromarray"
] |
[((2246, 2319), 'numpy.memmap', 'np.memmap', (['path'], {'dtype': 'np.uint8', 'mode': '"""r"""', 'shape': '((train_num,) + img_shape)'}), "(path, dtype=np.uint8, mode='r', shape=(train_num,) + img_shape)\n", (2255, 2319), True, 'import numpy as np\n'), ((2365, 2390), 'os.path.isfile', 'os.path.isfile', (['json_name'], {}), '(json_name)\n', (2379, 2390), False, 'import sys, time, os, json\n'), ((2581, 2604), 'google.colab.drive.mount', 'drive.mount', (['drive_root'], {}), '(drive_root)\n', (2592, 2604), False, 'from google.colab import drive\n'), ((2757, 2793), 'os.makedirs', 'os.makedirs', (['imgs_dir'], {'exist_ok': '(True)'}), '(imgs_dir, exist_ok=True)\n', (2768, 2793), False, 'import sys, time, os, json\n'), ((3476, 3512), 'numpy.ones', 'np.ones', (['((batch_size,) + patch_shape)'], {}), '((batch_size,) + patch_shape)\n', (3483, 3512), True, 'import numpy as np\n'), ((3525, 3562), 'numpy.zeros', 'np.zeros', (['((batch_size,) + patch_shape)'], {}), '((batch_size,) + patch_shape)\n', (3533, 3562), True, 'import numpy as np\n'), ((3847, 3874), 'os.path.isfile', 'os.path.isfile', (['disc_B_path'], {}), '(disc_B_path)\n', (3861, 3874), False, 'import sys, time, os, json\n'), ((7463, 7493), 'numpy.concatenate', 'np.concatenate', (['imgs_A'], {'axis': '(1)'}), '(imgs_A, axis=1)\n', (7477, 7493), True, 'import numpy as np\n'), ((7508, 7538), 'numpy.concatenate', 'np.concatenate', (['imgs_B'], {'axis': '(1)'}), '(imgs_B, axis=1)\n', (7522, 7538), True, 'import numpy as np\n'), ((7553, 7583), 'numpy.concatenate', 'np.concatenate', (['fake_A'], {'axis': '(1)'}), '(fake_A, axis=1)\n', (7567, 7583), True, 'import numpy as np\n'), ((7596, 7644), 'numpy.concatenate', 'np.concatenate', (['(imgs_B, imgs_A, fake_A)'], {'axis': '(0)'}), '((imgs_B, imgs_A, fake_A), axis=0)\n', (7610, 7644), True, 'import numpy as np\n'), ((7659, 7686), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 6)'}), '(figsize=(20, 6))\n', (7669, 7686), True, 'import matplotlib.pylab as plt\n'), ((7692, 7708), 'matplotlib.pylab.title', 'plt.title', (['title'], {}), '(title)\n', (7701, 7708), True, 'import matplotlib.pylab as plt\n'), ((7714, 7730), 'matplotlib.pylab.imshow', 'plt.imshow', (['imgs'], {}), '(imgs)\n', (7724, 7730), True, 'import matplotlib.pylab as plt\n'), ((7736, 7751), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7744, 7751), True, 'import matplotlib.pylab as plt\n'), ((7757, 7767), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7765, 7767), True, 'import matplotlib.pylab as plt\n'), ((5166, 5177), 'time.time', 'time.time', ([], {}), '()\n', (5175, 5177), False, 'import sys, time, os, json\n'), ((7193, 7236), 'numpy.random.choice', 'np.random.choice', (['limit', 'num'], {'replace': '(False)'}), '(limit, num, replace=False)\n', (7209, 7236), True, 'import numpy as np\n'), ((473, 496), 'keras_contrib.layers.normalization.instancenormalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (494, 496), False, 'from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n'), ((757, 780), 'keras_contrib.layers.normalization.instancenormalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (778, 780), False, 'from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n'), ((2448, 2460), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2457, 2460), False, 'import sys, time, os, json\n'), ((5276, 5330), 'numpy.random.choice', 'np.random.choice', (['train_num', 'batch_size'], {'replace': '(False)'}), '(train_num, batch_size, replace=False)\n', (5292, 5330), True, 'import numpy as np\n'), ((5410, 5464), 'numpy.random.choice', 'np.random.choice', (['train_num', 'batch_size'], {'replace': '(False)'}), '(train_num, batch_size, replace=False)\n', (5426, 5464), True, 'import numpy as np\n'), ((6429, 6447), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6445, 6447), False, 'import sys, time, os, json\n'), ((6936, 6954), 'json.dump', 'json.dump', (['info', 'f'], {}), '(info, f)\n', (6945, 6954), False, 'import sys, time, os, json\n'), ((7782, 7803), 'PIL.Image.fromarray', 'Image.fromarray', (['imgs'], {}), '(imgs)\n', (7797, 7803), False, 'from PIL import Image\n'), ((1427, 1450), 'keras_contrib.layers.normalization.instancenormalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (1448, 1450), False, 'from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n'), ((5797, 5829), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (5803, 5829), True, 'import numpy as np\n'), ((5986, 6018), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (5992, 6018), True, 'import numpy as np\n'), ((6047, 6073), 'numpy.add', 'np.add', (['d_loss_A', 'd_loss_B'], {}), '(d_loss_A, d_loss_B)\n', (6053, 6073), True, 'import numpy as np\n'), ((7017, 7035), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (7032, 7035), False, 'from PIL import Image\n'), ((6365, 6376), 'time.time', 'time.time', ([], {}), '()\n', (6374, 6376), False, 'import sys, time, os, json\n')]
|
import argparse
import os
import sys
import cv2
import numpy as np
from matplotlib import pyplot as plt
from functools import cmp_to_key
from fhi_lib.geometry import Point, Line
class DistanceEstimator():
def __init__(self, img):
self.img = img
self.panel_length = 2235
self.scale_length = 100
def initialize(self):
self.__find_scales()
self.__form_reference_points()
self.__shift_accessory_coordinate_init()
print('Estimator initialized')
def initialize_with_pt(self, pt):
self.__find_scales()
self.__form_reference_points()
self.vertical_pt2 = Point(pt)
self.__shift_accessory_coordinate_init()
print('Estimator initialized')
def display_reference_pts(self, img):
img = cv2.circle(img, self.origin.get_point_tuple(), 20, (0,0,0), 3)
img = cv2.circle(img, self.horizontal_pt.get_point_tuple(), 20, (0,255,0), 3)
img = cv2.circle(img, self.vertical_pt.get_point_tuple(), 20, (255,0,0), 3)
img = cv2.circle(img, self.vertical_pt2.get_point_tuple(), 20, (255,0,0), 3)
img = cv2.circle(img, self.origin.get_point_tuple(), 0, (0,0,255), 3)
img = cv2.circle(img, self.horizontal_pt.get_point_tuple(), 0, (0,0,255), 3)
img = cv2.circle(img, self.vertical_pt.get_point_tuple(), 0, (0,0,255), 3)
img = cv2.circle(img, self.vertical_pt2.get_point_tuple(), 0, (0,0,255), 3)
return img
def estimate(self, pt_itr):
img_intersection = self.__shift_accessory_coordinate(pt_itr)
dist = self.__cross_ratio(img_intersection)
caption = '{}\n'.format(int(dist))
return caption
def __find_scales(self):
### Image Processing, convert rgb to hsv and find the scale by its color ###
blur = cv2.GaussianBlur(self.img, (5,5), 0)
img_hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
img_threshold = cv2.inRange(img_hsv, (45,20,230), (90,220,255))
morphology_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilation = cv2.dilate(img_threshold, morphology_kernel, iterations=3)
thresh = cv2.erode(dilation, morphology_kernel, iterations=3)
'''
compare_img = np.hstack((img_threshold,thresh))
plt.imshow(compare_img)
plt.show()
'''
### Crop the image as we know the scale is always on the left half of the image ###
cropped_thresh = thresh[:, 0:int(thresh.shape[1]/2)]
contours, _ = cv2.findContours(image=cropped_thresh,
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_SIMPLE)
### Discard contours that are not quadrilaterals and smaller than 4000 pixels###
result_contours = {}
epsilon = 30
minimal_area = 1000
for contour in contours:
contour_area = cv2.contourArea(contour)
if contour_area > minimal_area:
hull = cv2.convexHull(contour)
approxCurve = cv2.approxPolyDP(hull, epsilon, True)
if len(approxCurve) == 4:
result_contours.update({contour_area : [approxCurve, contour]})
self.__verify_shape(result_contours)
# sort the dictionary based on the size of the area
result_contours = sorted(result_contours.items())
# pick the contour with the largest area as near scale, and the second as far scale
self.near_scale = result_contours[1]
self.far_scale = result_contours[0]
def __verify_shape(self, result_contours):
# For a parallel shape, the length of the two opposite sides should be approximately the same.
tolerance = 0.55
remove_keys = []
for key in result_contours.keys():
pts = result_contours[key][0]
pts = pts[:,0,:]
pt1 = Point(pts[0])
pt2 = Point(pts[1])
pt3 = Point(pts[2])
pt4 = Point(pts[3])
dist1_2 = pt1.get_distance(pt2).astype(np.int)
dist3_4 = pt3.get_distance(pt4).astype(np.int)
dist1_4 = pt1.get_distance(pt4).astype(np.int)
dist2_3 = pt2.get_distance(pt3).astype(np.int)
if np.absolute(dist1_2 - dist3_4) / np.min([dist1_2, dist3_4])> tolerance:
remove_keys.append(key)
continue
elif np.absolute(dist1_4 - dist2_3) / np.min([dist1_4, dist2_3])> tolerance:
remove_keys.append(key)
continue
for remove_key in remove_keys:
del result_contours[remove_key]
def __form_reference_points(self):
self.near_scale[1][0] = self.near_scale[1][0][:,0,:]
self.far_scale[1][0] = self.far_scale[1][0][:,0,:]
self.far_scale[1][0] = self.__set_orientation_hull(self.far_scale[1][0])
self.near_scale[1][0] = self.__set_orientation_hull(self.near_scale[1][0])
self.origin = Point(self.near_scale[1][0][1])
self.vertical_pt = Point(self.near_scale[1][0][0])
self.horizontal_pt = Point(self.near_scale[1][0][3])
self.vertical_pt2 = Point(self.far_scale[1][0][0])
def __set_orientation_hull(self, scale):
# Assuming the scale is placed on the left half of the image.
# The first vertex should be top left. If it's not the case, then reorder the verticies.
order = scale[:,0].argsort()
if order[0].astype(int) == 0:
## 1 2 ##
## 0 3 ##
# The first vertex is at bottom left instead of top left. Reorder the verticies.
scale = scale[[1,0,3,2]]
elif order[0].astype(int) == 1:
## 2 3 ##
## 1 0 ##
# The first vertex is at bottom left instead of top left. Reorder the verticies.
scale = scale[[2,1,0,3]]
elif order[0].astype(int) == 2:
## 3 0 ##
## 2 1 ##
scale = scale[[3,2,1,0]]
elif order[0].astype(int) == 3:
## 0 1 ##
## 3 2 ##
scale = scale[[0,3,2,1]]
return scale
def __shift_accessory_coordinate_init(self):
math_origin = self.origin.switch_coordinate_system(self.img)
math_horizontal_pt = self.horizontal_pt.switch_coordinate_system(self.img)
math_vertical_pt2 = self.vertical_pt2.switch_coordinate_system(self.img)
math_vertical_pt = self.vertical_pt.switch_coordinate_system(self.img)
self.vertical_reference_line = Line(math_origin, math_vertical_pt2)
self.horizontal_reference_line = Line(math_vertical_pt, math_horizontal_pt)
def __shift_accessory_coordinate(self, pt):
math_pt = pt.switch_coordinate_system(self.img)
slope_proj, intercept_proj = math_pt.get_projected_line(self.horizontal_reference_line.get_slope())
math_intersection = self.vertical_reference_line.calculate_intersection(slope_proj, intercept_proj)
img_intersection = math_intersection.switch_coordinate_system(self.img)
return img_intersection
def __cross_ratio(self, intersection):
### AC*BD/(CD*AB) = A'C'*B'D'/(C'D'*A'B') ###
# Image cross ratio
# AB(scale_length): origin to vertical_pt (scale_pixel_dist)
# CD: accessory_pt to vertical_pt2
# BD: vertical_pt to vertical_pt2
# AC(interested_length): origin to accessory_pt
AB = self.origin.get_distance(self.vertical_pt.get_point())
CD = intersection.get_distance(self.vertical_pt2.get_point())
BD = self.vertical_pt.get_distance(self.vertical_pt2.get_point())
AC = self.origin.get_distance(intersection.get_point())
image_ratio = AC*BD/CD/AB
# World cross ratio
ABw = self.scale_length
ADw = self.panel_length
BDw = self.panel_length - self.scale_length
ACw = image_ratio*ABw*ADw/(BDw+image_ratio*ABw)
return ACw
|
[
"cv2.GaussianBlur",
"cv2.contourArea",
"numpy.absolute",
"cv2.dilate",
"cv2.cvtColor",
"cv2.getStructuringElement",
"cv2.approxPolyDP",
"fhi_lib.geometry.Line",
"numpy.min",
"fhi_lib.geometry.Point",
"cv2.convexHull",
"cv2.erode",
"cv2.inRange",
"cv2.findContours"
] |
[((574, 583), 'fhi_lib.geometry.Point', 'Point', (['pt'], {}), '(pt)\n', (579, 583), False, 'from fhi_lib.geometry import Point, Line\n'), ((1635, 1672), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.img', '(5, 5)', '(0)'], {}), '(self.img, (5, 5), 0)\n', (1651, 1672), False, 'import cv2\n'), ((1684, 1721), 'cv2.cvtColor', 'cv2.cvtColor', (['blur', 'cv2.COLOR_BGR2HSV'], {}), '(blur, cv2.COLOR_BGR2HSV)\n', (1696, 1721), False, 'import cv2\n'), ((1741, 1792), 'cv2.inRange', 'cv2.inRange', (['img_hsv', '(45, 20, 230)', '(90, 220, 255)'], {}), '(img_hsv, (45, 20, 230), (90, 220, 255))\n', (1752, 1792), False, 'import cv2\n'), ((1812, 1861), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 5)'], {}), '(cv2.MORPH_RECT, (5, 5))\n', (1837, 1861), False, 'import cv2\n'), ((1874, 1932), 'cv2.dilate', 'cv2.dilate', (['img_threshold', 'morphology_kernel'], {'iterations': '(3)'}), '(img_threshold, morphology_kernel, iterations=3)\n', (1884, 1932), False, 'import cv2\n'), ((1944, 1996), 'cv2.erode', 'cv2.erode', (['dilation', 'morphology_kernel'], {'iterations': '(3)'}), '(dilation, morphology_kernel, iterations=3)\n', (1953, 1996), False, 'import cv2\n'), ((2257, 2356), 'cv2.findContours', 'cv2.findContours', ([], {'image': 'cropped_thresh', 'mode': 'cv2.RETR_EXTERNAL', 'method': 'cv2.CHAIN_APPROX_SIMPLE'}), '(image=cropped_thresh, mode=cv2.RETR_EXTERNAL, method=cv2.\n CHAIN_APPROX_SIMPLE)\n', (2273, 2356), False, 'import cv2\n'), ((4327, 4358), 'fhi_lib.geometry.Point', 'Point', (['self.near_scale[1][0][1]'], {}), '(self.near_scale[1][0][1])\n', (4332, 4358), False, 'from fhi_lib.geometry import Point, Line\n'), ((4380, 4411), 'fhi_lib.geometry.Point', 'Point', (['self.near_scale[1][0][0]'], {}), '(self.near_scale[1][0][0])\n', (4385, 4411), False, 'from fhi_lib.geometry import Point, Line\n'), ((4435, 4466), 'fhi_lib.geometry.Point', 'Point', (['self.near_scale[1][0][3]'], {}), '(self.near_scale[1][0][3])\n', (4440, 4466), False, 'from fhi_lib.geometry import Point, Line\n'), ((4489, 4519), 'fhi_lib.geometry.Point', 'Point', (['self.far_scale[1][0][0]'], {}), '(self.far_scale[1][0][0])\n', (4494, 4519), False, 'from fhi_lib.geometry import Point, Line\n'), ((5652, 5688), 'fhi_lib.geometry.Line', 'Line', (['math_origin', 'math_vertical_pt2'], {}), '(math_origin, math_vertical_pt2)\n', (5656, 5688), False, 'from fhi_lib.geometry import Point, Line\n'), ((5726, 5768), 'fhi_lib.geometry.Line', 'Line', (['math_vertical_pt', 'math_horizontal_pt'], {}), '(math_vertical_pt, math_horizontal_pt)\n', (5730, 5768), False, 'from fhi_lib.geometry import Point, Line\n'), ((2565, 2589), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (2580, 2589), False, 'import cv2\n'), ((3414, 3427), 'fhi_lib.geometry.Point', 'Point', (['pts[0]'], {}), '(pts[0])\n', (3419, 3427), False, 'from fhi_lib.geometry import Point, Line\n'), ((3437, 3450), 'fhi_lib.geometry.Point', 'Point', (['pts[1]'], {}), '(pts[1])\n', (3442, 3450), False, 'from fhi_lib.geometry import Point, Line\n'), ((3460, 3473), 'fhi_lib.geometry.Point', 'Point', (['pts[2]'], {}), '(pts[2])\n', (3465, 3473), False, 'from fhi_lib.geometry import Point, Line\n'), ((3483, 3496), 'fhi_lib.geometry.Point', 'Point', (['pts[3]'], {}), '(pts[3])\n', (3488, 3496), False, 'from fhi_lib.geometry import Point, Line\n'), ((2636, 2659), 'cv2.convexHull', 'cv2.convexHull', (['contour'], {}), '(contour)\n', (2650, 2659), False, 'import cv2\n'), ((2678, 2715), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['hull', 'epsilon', '(True)'], {}), '(hull, epsilon, True)\n', (2694, 2715), False, 'import cv2\n'), ((3705, 3735), 'numpy.absolute', 'np.absolute', (['(dist1_2 - dist3_4)'], {}), '(dist1_2 - dist3_4)\n', (3716, 3735), True, 'import numpy as np\n'), ((3738, 3764), 'numpy.min', 'np.min', (['[dist1_2, dist3_4]'], {}), '([dist1_2, dist3_4])\n', (3744, 3764), True, 'import numpy as np\n'), ((3826, 3856), 'numpy.absolute', 'np.absolute', (['(dist1_4 - dist2_3)'], {}), '(dist1_4 - dist2_3)\n', (3837, 3856), True, 'import numpy as np\n'), ((3859, 3885), 'numpy.min', 'np.min', (['[dist1_4, dist2_3]'], {}), '([dist1_4, dist2_3])\n', (3865, 3885), True, 'import numpy as np\n')]
|
from distutils.core import setup, Extension
import glob
import numpy
import config
import sys
import os
from config import ROOT
includes = [os.path.join(ROOT,"Include"),os.path.join(ROOT,"PrivateInclude"),os.path.join("cmsisdsp_pkg","src")]
if sys.platform == 'win32':
cflags = ["-DWIN",config.cflags,"-DUNALIGNED_SUPPORT_DISABLE"]
# Custom because a customized arm_math.h is required to build on windows
# since the visual compiler and the win platform are
# not supported by default in arm_math.h
else:
cflags = ["-Wno-unused-variable","-Wno-implicit-function-declaration",config.cflags,"-D__GNUC_PYTHON__"]
transform = glob.glob(os.path.join(ROOT,"Source","TransformFunctions","*.c"))
#transform.remove(os.path.join(ROOT,"Source","TransformFunctions","arm_dct4_init_q15.c"))
#transform.remove(os.path.join(ROOT,"Source","TransformFunctions","arm_rfft_init_q15.c"))
transform.remove(os.path.join(ROOT,"Source","TransformFunctions","TransformFunctions.c"))
support = glob.glob(os.path.join(ROOT,"Source","SupportFunctions","*.c"))
support.remove(os.path.join(ROOT,"Source","SupportFunctions","SupportFunctions.c"))
fastmath = glob.glob(os.path.join(ROOT,"Source","FastMathFunctions","*.c"))
fastmath.remove(os.path.join(ROOT,"Source","FastMathFunctions","FastMathFunctions.c"))
filtering = glob.glob(os.path.join(ROOT,"Source","FilteringFunctions","*.c"))
filtering.remove(os.path.join(ROOT,"Source","FilteringFunctions","FilteringFunctions.c"))
matrix = glob.glob(os.path.join(ROOT,"Source","MatrixFunctions","*.c"))
matrix.remove(os.path.join(ROOT,"Source","MatrixFunctions","MatrixFunctions.c"))
statistics = glob.glob(os.path.join(ROOT,"Source","StatisticsFunctions","*.c"))
statistics.remove(os.path.join(ROOT,"Source","StatisticsFunctions","StatisticsFunctions.c"))
complexf = glob.glob(os.path.join(ROOT,"Source","ComplexMathFunctions","*.c"))
complexf.remove(os.path.join(ROOT,"Source","ComplexMathFunctions","ComplexMathFunctions.c"))
basic = glob.glob(os.path.join(ROOT,"Source","BasicMathFunctions","*.c"))
basic.remove(os.path.join(ROOT,"Source","BasicMathFunctions","BasicMathFunctions.c"))
controller = glob.glob(os.path.join(ROOT,"Source","ControllerFunctions","*.c"))
controller.remove(os.path.join(ROOT,"Source","ControllerFunctions","ControllerFunctions.c"))
common = glob.glob(os.path.join(ROOT,"Source","CommonTables","*.c"))
common.remove(os.path.join(ROOT,"Source","CommonTables","CommonTables.c"))
#modulesrc = glob.glob(os.path.join("cmsisdsp_pkg","src","*.c"))
modulesrc = []
modulesrc.append(os.path.join("cmsisdsp_pkg","src","cmsismodule.c"))
module1 = Extension(config.extensionName,
sources = (support
+ fastmath
+ filtering
+ matrix
+ statistics
+ complexf
+ basic
+ controller
+ transform
+ modulesrc
+ common
)
,
include_dirs = includes + [numpy.get_include()],
#extra_compile_args = ["-Wno-unused-variable","-Wno-implicit-function-declaration",config.cflags]
extra_compile_args = cflags
)
setup (name = config.setupName,
version = '0.0.1',
description = config.setupDescription,
ext_modules = [module1],
author = 'Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.',
url="https://github.com/ARM-software/CMSIS_5",
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
])
|
[
"numpy.get_include",
"os.path.join",
"distutils.core.setup"
] |
[((3420, 3826), 'distutils.core.setup', 'setup', ([], {'name': 'config.setupName', 'version': '"""0.0.1"""', 'description': 'config.setupDescription', 'ext_modules': '[module1]', 'author': '"""Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved."""', 'url': '"""https://github.com/ARM-software/CMSIS_5"""', 'classifiers': "['Programming Language :: Python',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent']"}), "(name=config.setupName, version='0.0.1', description=config.\n setupDescription, ext_modules=[module1], author=\n 'Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.'\n , url='https://github.com/ARM-software/CMSIS_5', classifiers=[\n 'Programming Language :: Python',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent'])\n", (3425, 3826), False, 'from distutils.core import setup, Extension\n'), ((141, 170), 'os.path.join', 'os.path.join', (['ROOT', '"""Include"""'], {}), "(ROOT, 'Include')\n", (153, 170), False, 'import os\n'), ((170, 206), 'os.path.join', 'os.path.join', (['ROOT', '"""PrivateInclude"""'], {}), "(ROOT, 'PrivateInclude')\n", (182, 206), False, 'import os\n'), ((206, 241), 'os.path.join', 'os.path.join', (['"""cmsisdsp_pkg"""', '"""src"""'], {}), "('cmsisdsp_pkg', 'src')\n", (218, 241), False, 'import os\n'), ((646, 703), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""TransformFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'TransformFunctions', '*.c')\n", (658, 703), False, 'import os\n'), ((899, 973), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""TransformFunctions"""', '"""TransformFunctions.c"""'], {}), "(ROOT, 'Source', 'TransformFunctions', 'TransformFunctions.c')\n", (911, 973), False, 'import os\n'), ((993, 1048), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""SupportFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'SupportFunctions', '*.c')\n", (1005, 1048), False, 'import os\n'), ((1062, 1132), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""SupportFunctions"""', '"""SupportFunctions.c"""'], {}), "(ROOT, 'Source', 'SupportFunctions', 'SupportFunctions.c')\n", (1074, 1132), False, 'import os\n'), ((1153, 1209), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""FastMathFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'FastMathFunctions', '*.c')\n", (1165, 1209), False, 'import os\n'), ((1224, 1296), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""FastMathFunctions"""', '"""FastMathFunctions.c"""'], {}), "(ROOT, 'Source', 'FastMathFunctions', 'FastMathFunctions.c')\n", (1236, 1296), False, 'import os\n'), ((1318, 1375), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""FilteringFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'FilteringFunctions', '*.c')\n", (1330, 1375), False, 'import os\n'), ((1391, 1465), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""FilteringFunctions"""', '"""FilteringFunctions.c"""'], {}), "(ROOT, 'Source', 'FilteringFunctions', 'FilteringFunctions.c')\n", (1403, 1465), False, 'import os\n'), ((1484, 1538), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""MatrixFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'MatrixFunctions', '*.c')\n", (1496, 1538), False, 'import os\n'), ((1551, 1619), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""MatrixFunctions"""', '"""MatrixFunctions.c"""'], {}), "(ROOT, 'Source', 'MatrixFunctions', 'MatrixFunctions.c')\n", (1563, 1619), False, 'import os\n'), ((1642, 1700), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""StatisticsFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'StatisticsFunctions', '*.c')\n", (1654, 1700), False, 'import os\n'), ((1717, 1793), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""StatisticsFunctions"""', '"""StatisticsFunctions.c"""'], {}), "(ROOT, 'Source', 'StatisticsFunctions', 'StatisticsFunctions.c')\n", (1729, 1793), False, 'import os\n'), ((1814, 1873), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""ComplexMathFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'ComplexMathFunctions', '*.c')\n", (1826, 1873), False, 'import os\n'), ((1888, 1966), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""ComplexMathFunctions"""', '"""ComplexMathFunctions.c"""'], {}), "(ROOT, 'Source', 'ComplexMathFunctions', 'ComplexMathFunctions.c')\n", (1900, 1966), False, 'import os\n'), ((1984, 2041), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""BasicMathFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'BasicMathFunctions', '*.c')\n", (1996, 2041), False, 'import os\n'), ((2053, 2127), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""BasicMathFunctions"""', '"""BasicMathFunctions.c"""'], {}), "(ROOT, 'Source', 'BasicMathFunctions', 'BasicMathFunctions.c')\n", (2065, 2127), False, 'import os\n'), ((2150, 2208), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""ControllerFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'ControllerFunctions', '*.c')\n", (2162, 2208), False, 'import os\n'), ((2225, 2301), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""ControllerFunctions"""', '"""ControllerFunctions.c"""'], {}), "(ROOT, 'Source', 'ControllerFunctions', 'ControllerFunctions.c')\n", (2237, 2301), False, 'import os\n'), ((2320, 2371), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""CommonTables"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'CommonTables', '*.c')\n", (2332, 2371), False, 'import os\n'), ((2384, 2446), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""CommonTables"""', '"""CommonTables.c"""'], {}), "(ROOT, 'Source', 'CommonTables', 'CommonTables.c')\n", (2396, 2446), False, 'import os\n'), ((2543, 2595), 'os.path.join', 'os.path.join', (['"""cmsisdsp_pkg"""', '"""src"""', '"""cmsismodule.c"""'], {}), "('cmsisdsp_pkg', 'src', 'cmsismodule.c')\n", (2555, 2595), False, 'import os\n'), ((3199, 3218), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3216, 3218), False, 'import numpy\n')]
|
# Copyright 2021, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.keras_models import dp_keras_model
def get_data():
# Data is for hidden weights of [3, 1] and bias of 2.
# With mean squared loss, we expect loss = 15^2 = 225, gradients of
# weights = [90, 120], and gradient of bias = 30.
data = np.array([[3, 4]])
labels = np.matmul(data, [[3], [1]]) + 2
return data, labels
class DPKerasModelTest(tf.test.TestCase, parameterized.TestCase):
def testBaseline(self):
"""Tests that DPSequential works when DP-SGD has no effect."""
train_data, train_labels = get_data()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=1.0e9,
noise_multiplier=0.0,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='zeros')
])
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
loss = tf.keras.losses.MeanSquaredError()
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=1)
model_weights = model.get_weights()
# Check parameters are as expected, taking into account the learning rate.
self.assertAllClose(model_weights[0], [[0.90], [1.20]])
self.assertAllClose(model_weights[1], [0.30])
@parameterized.named_parameters(
('l2_norm_clip 10.0', 10.0),
('l2_norm_clip 40.0', 40.0),
('l2_norm_clip 200.0', 200.0),
)
def testClippingNorm(self, l2_norm_clip):
"""Tests that clipping norm works."""
train_data, train_labels = get_data()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=l2_norm_clip,
noise_multiplier=0.0,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='zeros')
])
learning_rate = 0.01
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=1)
model_weights = model.get_weights()
unclipped_gradient = np.sqrt(90**2 + 120**2 + 30**2)
scale = min(1.0, l2_norm_clip / unclipped_gradient)
expected_weights = np.array([[90], [120]]) * scale * learning_rate
expected_bias = np.array([30]) * scale * learning_rate
# Check parameters are as expected, taking into account the learning rate.
self.assertAllClose(model_weights[0], expected_weights)
self.assertAllClose(model_weights[1], expected_bias)
def _compute_expected_gradients(self, data, labels, w, l2_norm_clip,
num_microbatches):
batch_size = data.shape[0]
if num_microbatches is None:
num_microbatches = batch_size
preds = np.matmul(data, w)
grads = 2 * data * (labels - preds)[:, np.newaxis]
grads = np.reshape(grads,
[num_microbatches, batch_size // num_microbatches, -1])
mb_grads = np.mean(grads, axis=1)
mb_grad_norms = np.linalg.norm(mb_grads, axis=1)
scale = np.minimum(l2_norm_clip / mb_grad_norms, 1.0)
mb_grads = mb_grads * scale[:, np.newaxis]
final_grads = np.mean(mb_grads, axis=0)
return final_grads
@parameterized.named_parameters(
('mb_test 0', 1.0, None),
('mb_test 1', 1.0, 1),
('mb_test 2', 1.0, 2),
('mb_test 4', 1.0, 4),
)
def testMicrobatches(self, l2_norm_clip, num_microbatches):
train_data = np.array([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])
w = np.zeros((2))
train_labels = np.array([1.0, 3.0, -2.0, -4.0])
learning_rate = 1.0
expected_grads = self._compute_expected_gradients(train_data, train_labels,
w, l2_norm_clip,
num_microbatches)
expected_weights = np.squeeze(learning_rate * expected_grads)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=l2_norm_clip,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer='zeros')
])
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=4, shuffle=False)
model_weights = np.squeeze(model.get_weights())
self.assertAllClose(model_weights, expected_weights)
@parameterized.named_parameters(
('noise_multiplier 3 2 1', 3.0, 2.0, 1),
('noise_multiplier 5 4 1', 5.0, 4.0, 1),
('noise_multiplier 3 2 2', 3.0, 2.0, 2),
('noise_multiplier 5 4 2', 5.0, 4.0, 2),
('noise_multiplier 3 2 4', 3.0, 2.0, 4),
('noise_multiplier 5 4 4', 5.0, 4.0, 4),
)
def testNoiseMultiplier(self, l2_norm_clip, noise_multiplier,
num_microbatches):
# The idea behind this test is to start with a model whose parameters
# are set to zero. We then run one step of a model that produces
# an un-noised gradient of zero, and then compute the standard deviation
# of the resulting weights to see if it matches the expected standard
# deviation.
# Data is one example of length 1000, set to zero, with label zero.
train_data = np.zeros((4, 1000))
train_labels = np.array([0.0, 0.0, 0.0, 0.0])
learning_rate = 1.0
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=num_microbatches,
layers=[
tf.keras.layers.InputLayer(input_shape=(1000,)),
tf.keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='zeros')
])
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=4)
model_weights = model.get_weights()
measured_std = np.std(model_weights[0])
expected_std = l2_norm_clip * noise_multiplier / num_microbatches
# Test standard deviation is close to l2_norm_clip * noise_multiplier.
self.assertNear(measured_std, expected_std, 0.1 * expected_std)
# Simple check to make sure dimensions are correct when output has
# dimension > 1.
@parameterized.named_parameters(
('mb_test None 1', None, 1),
('mb_test 1 2', 1, 2),
('mb_test 2 2', 2, 2),
('mb_test 4 4', 4, 4),
)
def testMultiDimensionalOutput(self, num_microbatches, output_dimension):
train_data = np.array([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])
train_labels = np.array([0, 1, 1, 0])
learning_rate = 1.0
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model = dp_keras_model.DPSequential(
l2_norm_clip=1.0e9,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
output_dimension, use_bias=False, kernel_initializer='zeros')
])
model.compile(optimizer=optimizer, loss=loss_fn)
model.fit(train_data, train_labels, epochs=1, batch_size=4, shuffle=False)
# Checks that calls to earlier API using `use_xla` as a positional argument
# raise an exception.
@parameterized.named_parameters(
('earlier API True', True),
('earlier API False', False),
)
def testEarlierAPIFails(self, use_xla):
with self.assertRaises(ValueError):
_ = dp_keras_model.DPSequential(
1.0e9,
0.0,
use_xla,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
2, use_bias=False, kernel_initializer='zeros')
])
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"numpy.minimum",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.layers.Dense",
"numpy.std",
"tensorflow.keras.optimizers.SGD",
"numpy.zeros",
"tensorflow.keras.layers.InputLayer",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.squeeze",
"absl.testing.parameterized.named_parameters",
"numpy.sqrt"
] |
[((946, 964), 'numpy.array', 'np.array', (['[[3, 4]]'], {}), '([[3, 4]])\n', (954, 964), True, 'import numpy as np\n'), ((2029, 2153), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('l2_norm_clip 10.0', 10.0)", "('l2_norm_clip 40.0', 40.0)", "('l2_norm_clip 200.0', 200.0)"], {}), "(('l2_norm_clip 10.0', 10.0), (\n 'l2_norm_clip 40.0', 40.0), ('l2_norm_clip 200.0', 200.0))\n", (2059, 2153), False, 'from absl.testing import parameterized\n'), ((4076, 4205), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('mb_test 0', 1.0, None)", "('mb_test 1', 1.0, 1)", "('mb_test 2', 1.0, 2)", "('mb_test 4', 1.0, 4)"], {}), "(('mb_test 0', 1.0, None), ('mb_test 1', 1.0,\n 1), ('mb_test 2', 1.0, 2), ('mb_test 4', 1.0, 4))\n", (4106, 4205), False, 'from absl.testing import parameterized\n'), ((5495, 5789), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('noise_multiplier 3 2 1', 3.0, 2.0, 1)", "('noise_multiplier 5 4 1', 5.0, 4.0, 1)", "('noise_multiplier 3 2 2', 3.0, 2.0, 2)", "('noise_multiplier 5 4 2', 5.0, 4.0, 2)", "('noise_multiplier 3 2 4', 3.0, 2.0, 4)", "('noise_multiplier 5 4 4', 5.0, 4.0, 4)"], {}), "(('noise_multiplier 3 2 1', 3.0, 2.0, 1), (\n 'noise_multiplier 5 4 1', 5.0, 4.0, 1), ('noise_multiplier 3 2 2', 3.0,\n 2.0, 2), ('noise_multiplier 5 4 2', 5.0, 4.0, 2), (\n 'noise_multiplier 3 2 4', 3.0, 2.0, 4), ('noise_multiplier 5 4 4', 5.0,\n 4.0, 4))\n", (5525, 5789), False, 'from absl.testing import parameterized\n'), ((7443, 7575), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('mb_test None 1', None, 1)", "('mb_test 1 2', 1, 2)", "('mb_test 2 2', 2, 2)", "('mb_test 4 4', 4, 4)"], {}), "(('mb_test None 1', None, 1), ('mb_test 1 2',\n 1, 2), ('mb_test 2 2', 2, 2), ('mb_test 4 4', 4, 4))\n", (7473, 7575), False, 'from absl.testing import parameterized\n'), ((8547, 8640), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('earlier API True', True)", "('earlier API False', False)"], {}), "(('earlier API True', True), (\n 'earlier API False', False))\n", (8577, 8640), False, 'from absl.testing import parameterized\n'), ((9049, 9063), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (9061, 9063), True, 'import tensorflow as tf\n'), ((976, 1003), 'numpy.matmul', 'np.matmul', (['data', '[[3], [1]]'], {}), '(data, [[3], [1]])\n', (985, 1003), True, 'import numpy as np\n'), ((1589, 1632), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (1612, 1632), True, 'import tensorflow as tf\n'), ((1644, 1678), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (1676, 1678), True, 'import tensorflow as tf\n'), ((2687, 2739), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (2710, 2739), True, 'import tensorflow as tf\n'), ((2751, 2785), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (2783, 2785), True, 'import tensorflow as tf\n'), ((2968, 3005), 'numpy.sqrt', 'np.sqrt', (['(90 ** 2 + 120 ** 2 + 30 ** 2)'], {}), '(90 ** 2 + 120 ** 2 + 30 ** 2)\n', (2975, 3005), True, 'import numpy as np\n'), ((3621, 3639), 'numpy.matmul', 'np.matmul', (['data', 'w'], {}), '(data, w)\n', (3630, 3639), True, 'import numpy as np\n'), ((3708, 3781), 'numpy.reshape', 'np.reshape', (['grads', '[num_microbatches, batch_size // num_microbatches, -1]'], {}), '(grads, [num_microbatches, batch_size // num_microbatches, -1])\n', (3718, 3781), True, 'import numpy as np\n'), ((3821, 3843), 'numpy.mean', 'np.mean', (['grads'], {'axis': '(1)'}), '(grads, axis=1)\n', (3828, 3843), True, 'import numpy as np\n'), ((3864, 3896), 'numpy.linalg.norm', 'np.linalg.norm', (['mb_grads'], {'axis': '(1)'}), '(mb_grads, axis=1)\n', (3878, 3896), True, 'import numpy as np\n'), ((3910, 3955), 'numpy.minimum', 'np.minimum', (['(l2_norm_clip / mb_grad_norms)', '(1.0)'], {}), '(l2_norm_clip / mb_grad_norms, 1.0)\n', (3920, 3955), True, 'import numpy as np\n'), ((4023, 4048), 'numpy.mean', 'np.mean', (['mb_grads'], {'axis': '(0)'}), '(mb_grads, axis=0)\n', (4030, 4048), True, 'import numpy as np\n'), ((4310, 4368), 'numpy.array', 'np.array', (['[[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]'], {}), '([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])\n', (4318, 4368), True, 'import numpy as np\n'), ((4377, 4388), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4385, 4388), True, 'import numpy as np\n'), ((4410, 4442), 'numpy.array', 'np.array', (['[1.0, 3.0, -2.0, -4.0]'], {}), '([1.0, 3.0, -2.0, -4.0])\n', (4418, 4442), True, 'import numpy as np\n'), ((4714, 4756), 'numpy.squeeze', 'np.squeeze', (['(learning_rate * expected_grads)'], {}), '(learning_rate * expected_grads)\n', (4724, 4756), True, 'import numpy as np\n'), ((4774, 4826), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4797, 4826), True, 'import tensorflow as tf\n'), ((4838, 4872), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (4870, 4872), True, 'import tensorflow as tf\n'), ((6323, 6342), 'numpy.zeros', 'np.zeros', (['(4, 1000)'], {}), '((4, 1000))\n', (6331, 6342), True, 'import numpy as np\n'), ((6362, 6392), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (6370, 6392), True, 'import numpy as np\n'), ((6434, 6486), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (6457, 6486), True, 'import tensorflow as tf\n'), ((6498, 6532), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (6530, 6532), True, 'import tensorflow as tf\n'), ((7112, 7136), 'numpy.std', 'np.std', (['model_weights[0]'], {}), '(model_weights[0])\n', (7118, 7136), True, 'import numpy as np\n'), ((7694, 7752), 'numpy.array', 'np.array', (['[[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]'], {}), '([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])\n', (7702, 7752), True, 'import numpy as np\n'), ((7772, 7794), 'numpy.array', 'np.array', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (7780, 7794), True, 'import numpy as np\n'), ((7836, 7888), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (7859, 7888), True, 'import tensorflow as tf\n'), ((7903, 7966), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (7948, 7966), True, 'import tensorflow as tf\n'), ((3079, 3102), 'numpy.array', 'np.array', (['[[90], [120]]'], {}), '([[90], [120]])\n', (3087, 3102), True, 'import numpy as np\n'), ((3147, 3161), 'numpy.array', 'np.array', (['[30]'], {}), '([30])\n', (3155, 3161), True, 'import numpy as np\n'), ((1408, 1452), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (1434, 1452), True, 'import tensorflow as tf\n'), ((1466, 1544), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(1, kernel_initializer='zeros', bias_initializer='zeros')\n", (1487, 1544), True, 'import tensorflow as tf\n'), ((2481, 2525), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (2507, 2525), True, 'import tensorflow as tf\n'), ((2539, 2617), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(1, kernel_initializer='zeros', bias_initializer='zeros')\n", (2560, 2617), True, 'import tensorflow as tf\n'), ((5097, 5141), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (5123, 5141), True, 'import tensorflow as tf\n'), ((5155, 5223), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'use_bias': '(False)', 'kernel_initializer': '"""zeros"""'}), "(1, use_bias=False, kernel_initializer='zeros')\n", (5176, 5223), True, 'import tensorflow as tf\n'), ((6770, 6817), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(1000,)'}), '(input_shape=(1000,))\n', (6796, 6817), True, 'import tensorflow as tf\n'), ((6831, 6909), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(1, kernel_initializer='zeros', bias_initializer='zeros')\n", (6852, 6909), True, 'import tensorflow as tf\n'), ((8139, 8183), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (8165, 8183), True, 'import tensorflow as tf\n'), ((8197, 8285), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dimension'], {'use_bias': '(False)', 'kernel_initializer': '"""zeros"""'}), "(output_dimension, use_bias=False, kernel_initializer=\n 'zeros')\n", (8218, 8285), True, 'import tensorflow as tf\n'), ((8858, 8902), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (8884, 8902), True, 'import tensorflow as tf\n'), ((8918, 8986), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'use_bias': '(False)', 'kernel_initializer': '"""zeros"""'}), "(2, use_bias=False, kernel_initializer='zeros')\n", (8939, 8986), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: philld
"""
import os
import json
from transifex.api import transifex_api
print(os.getenv("PWD"))
transifex_api.setup(auth=os.getenv("TX_TOKEN"))
organization = transifex_api.Organization.get(slug="hisp-uio")
projects = organization.fetch('projects')
langs = set([])
tr = {}
lang_stats = {}
lang_statsall = {}
word_stats = {}
projmap = {}
versions = ("29","30","31","32","33","34","35","36","37","master")
ft = open('data/feature-toggling.json',)
togglers = json.load(ft)
ft.close()
langmap = {}
for l in transifex_api.Language.all():
langmap[l.code] = l.name
for p in projects:
if p.name[0:4] in ("APP:","APP-"):
projmap[p.name] = p.slug
print(p.name)
resources = p.fetch('resources')
for r in resources:
# print('\t',r["name"], "[", r["slug"],"]")
r_slug = r.attributes['slug']
base_version = r_slug.split('--')[0].replace('2-','').replace('v','').replace('-x','')
# print(r["slug"],' ---> ',version)
version_list = [base_version]
if base_version == 'master':
if p.homepage_url in togglers:
version_list += togglers[p.homepage_url]
for version in version_list:
if version in versions:
if version not in lang_stats:
lang_stats[version] = {}
lang_statsall[version] = {}
for s in transifex_api.ResourceLanguageStats.filter(project=p, resource=r):
language = s.language.id.split(':')[1]
trans = s.attributes['translated_strings']
tot = s.attributes['total_strings']
if language in lang_stats[version]:
lang_stats[version][language] = lang_stats[version][language] + s.attributes['translated_strings']
else:
lang_stats[version][language] = s.attributes['translated_strings']
if p.name not in lang_statsall[version]:
lang_statsall[version][p.name] = {}
if r_slug not in lang_statsall[version][p.name]:
lang_statsall[version][p.name][r_slug] = {}
if tot == 0:
lang_statsall[version][p.name][r_slug][language] = "0%"
else:
lang_statsall[version][p.name][r_slug][language] = f"{trans/tot:.1%}"
mylangs = lang_stats["master"]
lango = {}
for l in mylangs:
name = langmap[l]
lango[l] = name
mysortedLangs = {k: v for k, v in sorted(mylangs.items(), key=lambda item: item[1],reverse=True)}
langos = {k: v for k, v in sorted(lango.items(), key=lambda item: item[1],reverse=False)}
stats = {"versions": versions, "overview" : lang_stats,"details":lang_statsall,"languages": langos, "projects":projmap }
f = open("./data/transifex.json","w")
f.write("transifex = "+json.dumps(stats,indent=2)+";")
f.close()
|
[
"json.load",
"json.dumps",
"transifex.api.transifex_api.Organization.get",
"transifex.api.transifex_api.ResourceLanguageStats.filter",
"transifex.api.transifex_api.Language.all",
"os.getenv"
] |
[((225, 272), 'transifex.api.transifex_api.Organization.get', 'transifex_api.Organization.get', ([], {'slug': '"""hisp-uio"""'}), "(slug='hisp-uio')\n", (255, 272), False, 'from transifex.api import transifex_api\n'), ((524, 537), 'json.load', 'json.load', (['ft'], {}), '(ft)\n', (533, 537), False, 'import json\n'), ((572, 600), 'transifex.api.transifex_api.Language.all', 'transifex_api.Language.all', ([], {}), '()\n', (598, 600), False, 'from transifex.api import transifex_api\n'), ((142, 158), 'os.getenv', 'os.getenv', (['"""PWD"""'], {}), "('PWD')\n", (151, 158), False, 'import os\n'), ((186, 207), 'os.getenv', 'os.getenv', (['"""TX_TOKEN"""'], {}), "('TX_TOKEN')\n", (195, 207), False, 'import os\n'), ((3430, 3457), 'json.dumps', 'json.dumps', (['stats'], {'indent': '(2)'}), '(stats, indent=2)\n', (3440, 3457), False, 'import json\n'), ((1634, 1699), 'transifex.api.transifex_api.ResourceLanguageStats.filter', 'transifex_api.ResourceLanguageStats.filter', ([], {'project': 'p', 'resource': 'r'}), '(project=p, resource=r)\n', (1676, 1699), False, 'from transifex.api import transifex_api\n')]
|
#!/usr/bin/env python
'''
A module for getting the stream info for a video.
Info:
type: eta.core.types.Module
version: 0.1.0
Copyright 2017-2018, Voxel51, LLC
voxel51.com
<NAME>, <EMAIL>
'''
# pragma pylint: disable=redefined-builtin
# pragma pylint: disable=unused-wildcard-import
# pragma pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
import logging
import sys
from eta.core.config import Config
import eta.core.module as etam
import eta.core.video as etav
logger = logging.getLogger(__name__)
class VideoStreamInfoConfig(etam.BaseModuleConfig):
'''Video stream info configuration settings.
Attributes:
data (DataConfig)
'''
def __init__(self, d):
super(VideoStreamInfoConfig, self).__init__(d)
self.data = self.parse_object_array(d, "data", DataConfig)
class DataConfig(Config):
'''Data configuration settings.
Inputs:
video (eta.core.types.Video): The input video
Outputs:
stream_info (eta.core.types.VideoStreamInfo): The video stream info
'''
def __init__(self, d):
self.video = self.parse_string(d, "video")
self.stream_info = self.parse_string(d, "stream_info")
def _get_stream_info(stream_info_config):
for data_config in stream_info_config.data:
logger.info("Reading stream info for %s", data_config.video)
vsi = etav.VideoStreamInfo.build_for(data_config.video)
vsi.write_json(data_config.stream_info)
def run(config_path, pipeline_config_path=None):
'''Run the video_stream_info module.
Args:
config_path: path to a VideoStreamInfoConfig file
pipeline_config_path: optional path to a PipelineConfig file
'''
stream_info_config = VideoStreamInfoConfig.from_json(config_path)
etam.setup(stream_info_config, pipeline_config_path=pipeline_config_path)
_get_stream_info(stream_info_config)
if __name__ == "__main__":
run(*sys.argv[1:])
|
[
"eta.core.video.VideoStreamInfo.build_for",
"eta.core.module.setup",
"logging.getLogger"
] |
[((770, 797), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (787, 797), False, 'import logging\n'), ((2058, 2131), 'eta.core.module.setup', 'etam.setup', (['stream_info_config'], {'pipeline_config_path': 'pipeline_config_path'}), '(stream_info_config, pipeline_config_path=pipeline_config_path)\n', (2068, 2131), True, 'import eta.core.module as etam\n'), ((1648, 1697), 'eta.core.video.VideoStreamInfo.build_for', 'etav.VideoStreamInfo.build_for', (['data_config.video'], {}), '(data_config.video)\n', (1678, 1697), True, 'import eta.core.video as etav\n')]
|
# Copyright 2020-present <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import asyncio
import logging
from datetime import datetime, timedelta, timezone
import discord
from discord.ext import commands
from ...bot import Salamander, SalamanderContext, UserFeedbackError
from ...checks import admin, mod_or_perms
from ...utils import Waterfall
from ...utils.parsing import parse_positive_number, parse_snowflake
log = logging.getLogger("salamander.extensions.cleanup")
class Cleanup(commands.Cog):
"""Quick message cleanup"""
@commands.max_concurrency(1, commands.BucketType.guild)
@commands.bot_has_guild_permissions(manage_messages=True, read_message_history=True)
@admin()
@commands.command()
async def removegone(self, ctx: SalamanderContext):
"""
Removes messages from those who can no longer see the channel
Can be used if handling deletion requests for privacy reasons
Is intentionally very slow, limited to admins only, and can only run one at a time
"""
assert not isinstance(ctx.channel, (discord.DMChannel, discord.PartialMessageable, discord.GroupChannel))
if not await ctx.yes_or_no(
"Are you sure you want to remove all messages from any user who cannot see this channel? (yes, no)",
delete_on_return=True,
):
return
informational = await ctx.send("This may take a while, I'll inform you when it is done.")
lock = asyncio.Lock()
async def safe_slow_delete(msgs):
async with lock:
if msgs:
if len(msgs) == 1:
try:
await msgs[0].delete()
except discord.NotFound:
pass
# some wiggle room included
cutoff = datetime.now(timezone.utc) - timedelta(days=13, hours=22)
mass_deletable = []
for m in msgs:
if m.created_at > cutoff:
mass_deletable.append(m)
else:
try:
await m.delete()
except discord.NotFound:
pass
await asyncio.sleep(2)
if mass_deletable:
assert not isinstance(
ctx.channel, (discord.DMChannel, discord.PartialMessageable, discord.GroupChannel)
)
await ctx.channel.delete_messages(mass_deletable)
await asyncio.sleep(1)
waterfall = Waterfall(12, 100, safe_slow_delete)
try:
waterfall.start()
member_ids = {m.id for m in ctx.channel.members}
async for msg in ctx.history(limit=None, before=informational):
# artificial delay to avoid saturating ratelimits for something allowed to be a slow process
# This one takes a hit once every 100 messages under the hood, making this ~ 8s/100m
await asyncio.sleep(0.08)
if msg.author.id not in member_ids:
waterfall.put(msg)
except Exception as exc:
log.exception("Error during removegone", exc_info=exc)
await waterfall.stop(wait=True)
await ctx.send(
f"{ctx.author.mention} something went wrong during the "
"message removal process. The error has been logged.",
allowed_mentions=discord.AllowedMentions(users=[ctx.author]),
)
else:
await waterfall.stop(wait=True)
await ctx.send(
f"{ctx.author.mention} The message removal process has finished.",
allowed_mentions=discord.AllowedMentions(users=[ctx.author]),
)
@removegone.error
async def concurrency_fail(self, ctx: SalamanderContext, exc: commands.CommandError):
if isinstance(exc, commands.MaxConcurrencyReached):
await ctx.send("That command is already running for a channel in this server.")
@commands.bot_has_guild_permissions(manage_messages=True, read_message_history=True)
@mod_or_perms(manage_messages=True)
@commands.group()
async def cleanup(self, ctx: SalamanderContext):
"""Message cleanup tools"""
if ctx.invoked_subcommand is None:
await ctx.send_help()
@cleanup.command(name="number")
async def cleanup_number(self, ctx: SalamanderContext, number):
"""Cleanup some number of messages within the last 10 days."""
limit = parse_positive_number(number, 1e7)
if not limit:
raise UserFeedbackError(custom_message="You must provide a positive number of 1 million or less.")
if limit > 100:
if not await ctx.yes_or_no(
f"Are you sure you want to delete up to {limit} messages?",
delete_on_return=True,
):
return
await self._cleanup(ctx, limit=limit)
@cleanup.command(name="before")
async def cleanup_before(self, ctx: SalamanderContext, before):
"""Cleanup messages before a specific message ID within the last 10 days."""
snowflake = parse_snowflake(before)
if not snowflake:
raise UserFeedbackError(custom_message="That did not look like a valid message ID.")
before_obj = discord.Object(id=snowflake)
if before_obj.created_at < ctx.message.created_at - timedelta(days=10):
raise UserFeedbackError(custom_message="This message is older than the 10 day cutoff.")
if not await ctx.yes_or_no(
"Are you sure you want to delete all the messages before this ID within the last 10 days?",
delete_on_return=True,
):
return
await self._cleanup(ctx, before=before_obj)
@cleanup.command(name="after")
async def cleanup_after(self, ctx: SalamanderContext, after):
"""Cleanup all messages after a specific message ID within the last 10 days."""
snowflake = parse_snowflake(after)
if not snowflake:
raise UserFeedbackError(custom_message="That did not look like a valid message ID.")
after_obj = discord.Object(id=snowflake)
if after_obj.created_at < ctx.message.created_at - timedelta(days=10):
raise UserFeedbackError(custom_message="This message is older than the 10 day cutoff.")
if not await ctx.yes_or_no(
"Are you sure you want to delete all the messages after the provided message ID?",
delete_on_return=True,
):
return
await self._cleanup(ctx, after=after_obj)
@cleanup.command(name="between")
async def cleanup_between(self, ctx: SalamanderContext, first, second):
"""
Cleanup messages between two provided message IDs within the last 10 days.
"""
snowflake = parse_snowflake(first)
if not snowflake:
raise UserFeedbackError(custom_message="The first provided ID did not look like a valid message ID.")
first_obj = discord.Object(id=snowflake)
if first_obj.created_at < ctx.message.created_at - timedelta(days=10):
raise UserFeedbackError(custom_message="The first provided message ID is older than the 10 day cutoff.")
snowflake = parse_snowflake(first)
if not snowflake:
raise UserFeedbackError(custom_message="The second provided ID did not look like a valid message ID.")
second_obj = discord.Object(id=snowflake)
if second_obj.created_at < ctx.message.created_at - timedelta(days=10):
raise UserFeedbackError(custom_message="The second provided message ID is older than the 10 day cutoff.")
if second.obj.created_at < first_obj.created_at:
raise UserFeedbackError(
custom_message="The first message ID provided should be the earlier one. (Not continuing in case of accidental misuse.)"
)
if not await ctx.yes_or_no(
"Are you sure you want to delete all the messages between the provided message IDs?",
delete_on_return=True,
):
return
await self._cleanup(ctx, before=second_obj, after=first_obj)
async def _cleanup(
self,
ctx: SalamanderContext,
*,
limit: int | None = None,
before: discord.Message | discord.Object | None = None,
after: discord.Message | discord.Object | None = None,
):
assert not isinstance(ctx.channel, (discord.DMChannel, discord.PartialMessageable, discord.GroupChannel))
# I think waterfall use might make sense here? IDK --Liz
# Maybe, but I get the feeling it won't feel responsive enough. -- Sinbad
to_delete = [ctx.message]
before = before or ctx.message
cutoff = after.created_at if after else ctx.message.created_at - timedelta(days=10)
# Don't use after param, changes API behavior. Can add oldest_first=False,
# but this will increase the needed underlying api calls.
async for message in ctx.history(limit=limit, before=before):
if message.created_at < cutoff:
break
if not message.pinned:
to_delete.append(message)
if len(to_delete) == 100:
await ctx.channel.delete_messages(to_delete)
to_delete = []
if to_delete:
if len(to_delete) == 1:
# Why does discord's API care about this?
await to_delete[0].delete()
else:
await ctx.channel.delete_messages(to_delete)
|
[
"discord.ext.commands.command",
"discord.AllowedMentions",
"asyncio.sleep",
"discord.ext.commands.max_concurrency",
"discord.Object",
"asyncio.Lock",
"discord.ext.commands.bot_has_guild_permissions",
"datetime.timedelta",
"discord.ext.commands.group",
"datetime.datetime.now",
"logging.getLogger"
] |
[((982, 1032), 'logging.getLogger', 'logging.getLogger', (['"""salamander.extensions.cleanup"""'], {}), "('salamander.extensions.cleanup')\n", (999, 1032), False, 'import logging\n'), ((1102, 1156), 'discord.ext.commands.max_concurrency', 'commands.max_concurrency', (['(1)', 'commands.BucketType.guild'], {}), '(1, commands.BucketType.guild)\n', (1126, 1156), False, 'from discord.ext import commands\n'), ((1162, 1249), 'discord.ext.commands.bot_has_guild_permissions', 'commands.bot_has_guild_permissions', ([], {'manage_messages': '(True)', 'read_message_history': '(True)'}), '(manage_messages=True,\n read_message_history=True)\n', (1196, 1249), False, 'from discord.ext import commands\n'), ((1264, 1282), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1280, 1282), False, 'from discord.ext import commands\n'), ((4789, 4876), 'discord.ext.commands.bot_has_guild_permissions', 'commands.bot_has_guild_permissions', ([], {'manage_messages': '(True)', 'read_message_history': '(True)'}), '(manage_messages=True,\n read_message_history=True)\n', (4823, 4876), False, 'from discord.ext import commands\n'), ((4918, 4934), 'discord.ext.commands.group', 'commands.group', ([], {}), '()\n', (4932, 4934), False, 'from discord.ext import commands\n'), ((2039, 2053), 'asyncio.Lock', 'asyncio.Lock', ([], {}), '()\n', (2051, 2053), False, 'import asyncio\n'), ((6108, 6136), 'discord.Object', 'discord.Object', ([], {'id': 'snowflake'}), '(id=snowflake)\n', (6122, 6136), False, 'import discord\n'), ((6954, 6982), 'discord.Object', 'discord.Object', ([], {'id': 'snowflake'}), '(id=snowflake)\n', (6968, 6982), False, 'import discord\n'), ((7836, 7864), 'discord.Object', 'discord.Object', ([], {'id': 'snowflake'}), '(id=snowflake)\n', (7850, 7864), False, 'import discord\n'), ((8268, 8296), 'discord.Object', 'discord.Object', ([], {'id': 'snowflake'}), '(id=snowflake)\n', (8282, 8296), False, 'import discord\n'), ((6197, 6215), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (6206, 6215), False, 'from datetime import datetime, timedelta, timezone\n'), ((7042, 7060), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (7051, 7060), False, 'from datetime import datetime, timedelta, timezone\n'), ((7924, 7942), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (7933, 7942), False, 'from datetime import datetime, timedelta, timezone\n'), ((8357, 8375), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (8366, 8375), False, 'from datetime import datetime, timedelta, timezone\n'), ((9672, 9690), 'datetime.timedelta', 'timedelta', ([], {'days': '(10)'}), '(days=10)\n', (9681, 9690), False, 'from datetime import datetime, timedelta, timezone\n'), ((3737, 3756), 'asyncio.sleep', 'asyncio.sleep', (['(0.08)'], {}), '(0.08)\n', (3750, 3756), False, 'import asyncio\n'), ((2430, 2456), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (2442, 2456), False, 'from datetime import datetime, timedelta, timezone\n'), ((2459, 2487), 'datetime.timedelta', 'timedelta', ([], {'days': '(13)', 'hours': '(22)'}), '(days=13, hours=22)\n', (2468, 2487), False, 'from datetime import datetime, timedelta, timezone\n'), ((4459, 4502), 'discord.AllowedMentions', 'discord.AllowedMentions', ([], {'users': '[ctx.author]'}), '(users=[ctx.author])\n', (4482, 4502), False, 'import discord\n'), ((3248, 3264), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (3261, 3264), False, 'import asyncio\n'), ((4198, 4241), 'discord.AllowedMentions', 'discord.AllowedMentions', ([], {'users': '[ctx.author]'}), '(users=[ctx.author])\n', (4221, 4241), False, 'import discord\n'), ((2903, 2919), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (2916, 2919), False, 'import asyncio\n')]
|
import z3
from mythril.laser.smt.model import Model
from mythril.laser.smt.bool import Bool
from mythril.laser.smt.solver.solver_statistics import stat_smt_query
from typing import Set, Tuple, Dict, List, cast
def _get_expr_variables(expression: z3.ExprRef) -> List[z3.ExprRef]:
"""
Gets the variables that make up the current expression
:param expression:
:return:
"""
result = []
if not expression.children() and not isinstance(expression, z3.BitVecNumRef):
result.append(expression)
for child in expression.children():
c_children = _get_expr_variables(child)
result.extend(c_children)
return result
class DependenceBucket:
"""Bucket object to contain a set of conditions that are dependent on each other"""
def __init__(self, variables=None, conditions=None):
"""
Initializes a DependenceBucket object
:param variables: Variables contained in the conditions
:param conditions: The conditions that are dependent on each other
"""
self.variables = variables or [] # type: List[z3.ExprRef]
self.conditions = conditions or [] # type: List[z3.ExprRef]
class DependenceMap:
"""DependenceMap object that maintains a set of dependence buckets, used to separate independent smt queries"""
def __init__(self):
"""Initializes a DependenceMap object"""
self.buckets = [] # type: List[DependenceBucket]
self.variable_map = {} # type: Dict[str, DependenceBucket]
def add_condition(self, condition: z3.BoolRef) -> None:
"""
Add condition to the dependence map
:param condition: The condition that is to be added to the dependence map
"""
variables = set(_get_expr_variables(condition))
relevant_buckets = set()
for variable in variables:
try:
bucket = self.variable_map[str(variable)]
relevant_buckets.add(bucket)
except KeyError:
continue
new_bucket = DependenceBucket(variables, [condition])
self.buckets.append(new_bucket)
if relevant_buckets:
# Merge buckets, and rewrite variable map accordingly
relevant_buckets.add(new_bucket)
new_bucket = self._merge_buckets(relevant_buckets)
for variable in new_bucket.variables:
self.variable_map[str(variable)] = new_bucket
def _merge_buckets(self, bucket_list: Set[DependenceBucket]) -> DependenceBucket:
"""Merges the buckets in bucket list"""
variables = [] # type: List[str]
conditions = [] # type: List[z3.BoolRef]
for bucket in bucket_list:
self.buckets.remove(bucket)
variables += bucket.variables
conditions += bucket.conditions
new_bucket = DependenceBucket(variables, conditions)
self.buckets.append(new_bucket)
return new_bucket
class IndependenceSolver:
"""An SMT solver object that uses independence optimization"""
def __init__(self):
""""""
self.raw = z3.Solver()
self.constraints = []
self.models = []
def set_timeout(self, timeout: int) -> None:
"""Sets the timeout that will be used by this solver, timeout is in
milliseconds.
:param timeout:
"""
self.raw.set(timeout=timeout)
def add(self, *constraints: Tuple[Bool]) -> None:
"""Adds the constraints to this solver.
:param constraints: constraints to add
"""
raw_constraints = [
c.raw for c in cast(Tuple[Bool], constraints)
] # type: List[z3.BoolRef]
self.constraints.extend(raw_constraints)
def append(self, *constraints: Tuple[Bool]) -> None:
"""Adds the constraints to this solver.
:param constraints: constraints to add
"""
raw_constraints = [
c.raw for c in cast(Tuple[Bool], constraints)
] # type: List[z3.BoolRef]
self.constraints.extend(raw_constraints)
@stat_smt_query
def check(self) -> z3.CheckSatResult:
"""Returns z3 smt check result."""
dependence_map = DependenceMap()
for constraint in self.constraints:
dependence_map.add_condition(constraint)
self.models = []
for bucket in dependence_map.buckets:
self.raw.reset()
self.raw.append(*bucket.conditions)
check_result = self.raw.check()
if check_result == z3.sat:
self.models.append(self.raw.model())
else:
return check_result
return z3.sat
def model(self) -> Model:
"""Returns z3 model for a solution."""
return Model(self.models)
def reset(self) -> None:
"""Reset this solver."""
self.constraints = []
def pop(self, num) -> None:
"""Pop num constraints from this solver."""
self.constraints.pop(num)
|
[
"typing.cast",
"mythril.laser.smt.model.Model",
"z3.Solver"
] |
[((3114, 3125), 'z3.Solver', 'z3.Solver', ([], {}), '()\n', (3123, 3125), False, 'import z3\n'), ((4774, 4792), 'mythril.laser.smt.model.Model', 'Model', (['self.models'], {}), '(self.models)\n', (4779, 4792), False, 'from mythril.laser.smt.model import Model\n'), ((3622, 3652), 'typing.cast', 'cast', (['Tuple[Bool]', 'constraints'], {}), '(Tuple[Bool], constraints)\n', (3626, 3652), False, 'from typing import Set, Tuple, Dict, List, cast\n'), ((3959, 3989), 'typing.cast', 'cast', (['Tuple[Bool]', 'constraints'], {}), '(Tuple[Bool], constraints)\n', (3963, 3989), False, 'from typing import Set, Tuple, Dict, List, cast\n')]
|
import numpy as np
import pytest
from gtd.ml.vocab import SimpleVocab, SimpleEmbeddings
@pytest.fixture
def vocab():
return SimpleVocab(['a', 'b', 'c'])
@pytest.fixture
def embeds(vocab):
array = np.eye(len(vocab))
return SimpleEmbeddings(array, vocab)
class TestSimpleVocab(object):
def test_save_load(self, vocab, tmpdir):
path = str(tmpdir.join('vocab.txt'))
vocab.save(path)
new_vocab = SimpleVocab.load(path)
assert vocab == new_vocab
|
[
"gtd.ml.vocab.SimpleVocab.load",
"gtd.ml.vocab.SimpleEmbeddings",
"gtd.ml.vocab.SimpleVocab"
] |
[((131, 159), 'gtd.ml.vocab.SimpleVocab', 'SimpleVocab', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (142, 159), False, 'from gtd.ml.vocab import SimpleVocab, SimpleEmbeddings\n'), ((239, 269), 'gtd.ml.vocab.SimpleEmbeddings', 'SimpleEmbeddings', (['array', 'vocab'], {}), '(array, vocab)\n', (255, 269), False, 'from gtd.ml.vocab import SimpleVocab, SimpleEmbeddings\n'), ((438, 460), 'gtd.ml.vocab.SimpleVocab.load', 'SimpleVocab.load', (['path'], {}), '(path)\n', (454, 460), False, 'from gtd.ml.vocab import SimpleVocab, SimpleEmbeddings\n')]
|
# Import a library related to my test called unittest
import unittest
from pandas import DataFrame
from lambdata.assi import add_state_names_column
class TestAssi(unittest.TestCase):
def test_assi(self):
df = DataFrame({"abbrev": ["CA", "CO", "CT", "DC", "TX"]})
self.assertEqual(len(df.columns), 1)
self.assertEqual(list(df.columns), ['abbrev'])
self.assertEqual(df.iloc[3]["abbrev"],'DC')
result_map = add_state_names_column(df)
self.assertEqual(len(result_map.columns), 2)
self.assertEqual(list(result_map.columns), ['abbrev'])
self.assertEqual(result_map.iloc[3]["abbrev"], 'DC')
self.assertEqual(result_map.iloc[3]["name"], 'Washington')
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"lambdata.assi.add_state_names_column",
"pandas.DataFrame"
] |
[((774, 789), 'unittest.main', 'unittest.main', ([], {}), '()\n', (787, 789), False, 'import unittest\n'), ((231, 284), 'pandas.DataFrame', 'DataFrame', (["{'abbrev': ['CA', 'CO', 'CT', 'DC', 'TX']}"], {}), "({'abbrev': ['CA', 'CO', 'CT', 'DC', 'TX']})\n", (240, 284), False, 'from pandas import DataFrame\n'), ((471, 497), 'lambdata.assi.add_state_names_column', 'add_state_names_column', (['df'], {}), '(df)\n', (493, 497), False, 'from lambdata.assi import add_state_names_column\n')]
|
"""
Here I am going to convert array to image from it's pixel value and put those images in their respective directory for
both in train and test set.
train set -------> [A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z]
test set -------> [A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z]
"""
# Import required packages
import os
import numpy as np
import cv2
word_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J', 10: 'K', 11: 'L',
12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', 17: 'R', 18: 'S', 19: 'T', 20: 'U', 21: 'V', 22: 'W', 23: 'X',
24: 'Y', 25: 'Z'}
def test_images_creation():
# Open file of test.csv in read mode
file = open('test.csv', 'r')
count = 0
labels = []
# Directory where test image save
parent_dir = os.path.join(os.getcwd(), 'test')
while True:
# read line of file
line = file.readline()
# Break if line not found
if not line:
break
# Split line on ',' and create list of row values
row = line.split(',')
# extract label and pixel value from row
# label = str(row[0]) --orignal
lab_num = int(row[0])
label = word_dict.get(lab_num)
pixel = row[1:]
# Convert pixel in numpy array of 28 x 28
pixel = np.asarray(pixel, dtype=np.uint8).reshape((28, 28, 1))
# join path of directories
path = os.path.join(parent_dir, label)
# count line number and use with image name
count += 1
# list of contents(directory and file both) in directory
labels = os.listdir(parent_dir)
if label in labels:
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - not created directory only image add")
else:
try:
os.mkdir(path)
except OSError as error:
print(error)
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - created directory and image add")
file.close()
test_images_creation()
def train_images_creation():
# Open file of train.csv in read mode
file = open('train.csv', 'r')
count = 0
labels = []
# Directory where train image save
parent_dir = os.path.join(os.getcwd(), 'train')
while True:
# read line of file
line = file.readline()
# Break if line not found
if not line:
break
# Split line on ',' and create list of row values
row = line.split(',')
# extract label and pixel value from row
# label = str(row[0]) --orignal
lab_num = int(row[0])
label = word_dict.get(lab_num)
pixel = row[1:]
# Convert pixel in numpy array of 28 x 28
pixel = np.asarray(pixel, dtype=np.uint8).reshape((28, 28, 1))
# join path of directories
path = os.path.join(parent_dir, label)
# count line number and use with image name
count += 1
# list of contents(directory and file both) in directory
labels = os.listdir(parent_dir)
if label in labels:
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - not created directory only image add")
else:
try:
os.mkdir(path)
except OSError as error:
print(error)
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - created directory and image add")
file.close()
# train_images_creation()
|
[
"os.mkdir",
"os.getcwd",
"cv2.imwrite",
"numpy.asarray",
"os.path.join",
"os.listdir"
] |
[((841, 852), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (850, 852), False, 'import os\n'), ((1457, 1488), 'os.path.join', 'os.path.join', (['parent_dir', 'label'], {}), '(parent_dir, label)\n', (1469, 1488), False, 'import os\n'), ((1644, 1666), 'os.listdir', 'os.listdir', (['parent_dir'], {}), '(parent_dir)\n', (1654, 1666), False, 'import os\n'), ((2411, 2422), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2420, 2422), False, 'import os\n'), ((3028, 3059), 'os.path.join', 'os.path.join', (['parent_dir', 'label'], {}), '(parent_dir, label)\n', (3040, 3059), False, 'import os\n'), ((3215, 3237), 'os.listdir', 'os.listdir', (['parent_dir'], {}), '(parent_dir)\n', (3225, 3237), False, 'import os\n'), ((1750, 1797), 'cv2.imwrite', 'cv2.imwrite', (['f"""{path}/image_{count}.png"""', 'pixel'], {}), "(f'{path}/image_{count}.png', pixel)\n", (1761, 1797), False, 'import cv2\n'), ((2049, 2096), 'cv2.imwrite', 'cv2.imwrite', (['f"""{path}/image_{count}.png"""', 'pixel'], {}), "(f'{path}/image_{count}.png', pixel)\n", (2060, 2096), False, 'import cv2\n'), ((3321, 3368), 'cv2.imwrite', 'cv2.imwrite', (['f"""{path}/image_{count}.png"""', 'pixel'], {}), "(f'{path}/image_{count}.png', pixel)\n", (3332, 3368), False, 'import cv2\n'), ((3620, 3667), 'cv2.imwrite', 'cv2.imwrite', (['f"""{path}/image_{count}.png"""', 'pixel'], {}), "(f'{path}/image_{count}.png', pixel)\n", (3631, 3667), False, 'import cv2\n'), ((1351, 1384), 'numpy.asarray', 'np.asarray', (['pixel'], {'dtype': 'np.uint8'}), '(pixel, dtype=np.uint8)\n', (1361, 1384), True, 'import numpy as np\n'), ((1914, 1928), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1922, 1928), False, 'import os\n'), ((2922, 2955), 'numpy.asarray', 'np.asarray', (['pixel'], {'dtype': 'np.uint8'}), '(pixel, dtype=np.uint8)\n', (2932, 2955), True, 'import numpy as np\n'), ((3485, 3499), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (3493, 3499), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, bizmap technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
# lst = []
# for i in frappe.get_all('Task',filters,['asset']):
# lst.append(i.asset)
# return [(d,) for d in lst]
@frappe.whitelist()
def get_asset_filter(doctype, txt, searchfield, start, page_len, filters):
lst = []
for i in frappe.get_all('Task',{"name":filters.get("task")},['asset']):
lst.append(i.get("asset"))
return [(d,) for d in lst]
|
[
"frappe.whitelist"
] |
[((322, 340), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (338, 340), False, 'import frappe\n')]
|
import pytest
from icevision.all import *
@pytest.fixture
def dummy_class_map():
return ClassMap(["dummy-1", "dummy-2"], background=None)
@pytest.fixture
def dummy_class_map_elaborate():
return ClassMap(["dummy-1", "dummy-2", "dummy-3", "dummy-4"], background=None)
def test_classification_multilabel(dummy_class_map):
rec = BaseRecord([ClassificationLabelsRecordComponent(is_multilabel=True)])
rec.classification.set_class_map(dummy_class_map)
rec.classification.set_labels_by_id([0, 1])
assert rec.classification.label_ids == [0, 1]
assert (rec.classification.one_hot_encoded() == np.array([1, 1])).all()
@pytest.mark.parametrize(
"label_ids",
[
([0, 1]),
([0]),
],
)
def test_classification_single_label(dummy_class_map, label_ids):
rec = BaseRecord([ClassificationLabelsRecordComponent(is_multilabel=False)])
rec.classification.set_class_map(dummy_class_map)
rec.classification.set_labels_by_id(label_ids)
if len(label_ids) > 1:
# label_ids == [0, 1]
# Setting two labels when `is_multilabel=False` raises an error
with pytest.raises(AutofixAbort):
rec.classification._autofix()
else:
# label_ids == [0]
# Only one label must be assigned
assert all(rec.classification._autofix().values())
assert rec.classification.one_hot_encoded().sum() == 1
@pytest.mark.parametrize(
"label_ids",
[
([0, 1, 2]),
([0, 1]),
([0]),
],
)
def test_one_hot_encodings(dummy_class_map_elaborate, label_ids):
rec = BaseRecord([ClassificationLabelsRecordComponent(is_multilabel=True)])
rec.classification.set_class_map(dummy_class_map_elaborate)
rec.classification.set_labels_by_id(label_ids)
assert all(rec.classification._autofix().values())
# Ensure we have the correct no. of labels and that they are indeed
# one-hot encoded
one_hot_values = rec.classification.one_hot_encoded()
assert one_hot_values.sum() == len(label_ids)
assert np.unique(one_hot_values).tolist() == [0, 1]
|
[
"pytest.mark.parametrize",
"pytest.raises"
] |
[((645, 696), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""label_ids"""', '[[0, 1], [0]]'], {}), "('label_ids', [[0, 1], [0]])\n", (668, 696), False, 'import pytest\n'), ((1405, 1467), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""label_ids"""', '[[0, 1, 2], [0, 1], [0]]'], {}), "('label_ids', [[0, 1, 2], [0, 1], [0]])\n", (1428, 1467), False, 'import pytest\n'), ((1130, 1157), 'pytest.raises', 'pytest.raises', (['AutofixAbort'], {}), '(AutofixAbort)\n', (1143, 1157), False, 'import pytest\n')]
|
from .models import * # Change as necessary
from django.forms import ModelForm
from django import forms
class TodoListForm(ModelForm):
class Meta:
model = Cabecera
exclude =('trabajador',)
widgets = {
'codigo': forms.TextInput(attrs={'class': 'form-control'}),
'distribuidor': forms.Select(attrs={'class': 'form-control'}),
'laboratorio': forms.Select(attrs={'class': 'form-control'}),
}
class TodoItemForm(forms.ModelForm):
class Meta:
model = DetalleCompra
exclude = ('list',)
widgets = {
'medicamento': forms.Select(attrs={'class': 'form-control'}),
'cantidad': forms.NumberInput(attrs={'class': 'form-control'}),
}
class RangoForm (forms.Form):
fecha_i = forms.DateField(widget = forms.TextInput(attrs={'class':'form-control', 'id':'Fecha_i', 'data-date-format':'dd/mm/yyyy'}))
fecha_f = forms.DateField(widget = forms.TextInput(attrs={'class':'form-control', 'id':'Fecha_f', 'data-date-format':'dd/mm/yyyy'}))
|
[
"django.forms.NumberInput",
"django.forms.TextInput",
"django.forms.Select"
] |
[((233, 281), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (248, 281), False, 'from django import forms\n'), ((303, 348), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (315, 348), False, 'from django import forms\n'), ((369, 414), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (381, 414), False, 'from django import forms\n'), ((558, 603), 'django.forms.Select', 'forms.Select', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (570, 603), False, 'from django import forms\n'), ((621, 671), 'django.forms.NumberInput', 'forms.NumberInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (638, 671), False, 'from django import forms\n'), ((753, 856), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'id': 'Fecha_i', 'data-date-format': 'dd/mm/yyyy'}"}), "(attrs={'class': 'form-control', 'id': 'Fecha_i',\n 'data-date-format': 'dd/mm/yyyy'})\n", (768, 856), False, 'from django import forms\n'), ((890, 993), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'id': 'Fecha_f', 'data-date-format': 'dd/mm/yyyy'}"}), "(attrs={'class': 'form-control', 'id': 'Fecha_f',\n 'data-date-format': 'dd/mm/yyyy'})\n", (905, 993), False, 'from django import forms\n')]
|
import pandas as pd
import datetime
import json
def parse_txt_report(path: str, path_tpl:str, separators: tuple = (':', ',')):
report = {}
with open(path) as file:
tpl = pd.read_csv(path_tpl)
for row in tpl['predicted_class']:
report.update({row: []})
for line in file:
act_cls = line[line.find(separators[0])+2 : line.find(separators[1])]
pred_class = line[line.find(separators[1])+2 : ]
pred_class = pred_class[pred_class.find(separators[0]) + 2:]
if pred_class.endswith('\n'):
pred_class = pred_class[:-1]
tpl_act_cls_row = tpl.loc[tpl['actual_class'] == act_cls]
tpl_pred_class = tpl_act_cls_row['predicted_class'].values[0]
if(pred_class == tpl_pred_class):
report[tpl_pred_class].append(1)
else:
report[tpl_pred_class].append(0)
with open('stat/' + "corresponds" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.txt', 'w') as file:
file.write(json.dumps(report))
return report
def analyze(data):
vals = data.values()
k_s = list(data.keys())
short_report = {}
i = 0
for val in vals:
accurancy = sum(val)/len(val)
short_report.update({k_s[i]:accurancy})
i += 1
with open('stat/' + "short_report" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.txt', 'w') as file:
file.write(json.dumps(short_report))
if __name__ == '__main__':
analyze(parse_txt_report("reports/report_lstm.txt", 'templates/sa_tpl.csv'))
|
[
"pandas.read_csv",
"datetime.datetime.now",
"json.dumps"
] |
[((187, 208), 'pandas.read_csv', 'pd.read_csv', (['path_tpl'], {}), '(path_tpl)\n', (198, 208), True, 'import pandas as pd\n'), ((1057, 1075), 'json.dumps', 'json.dumps', (['report'], {}), '(report)\n', (1067, 1075), False, 'import json\n'), ((1456, 1480), 'json.dumps', 'json.dumps', (['short_report'], {}), '(short_report)\n', (1466, 1480), False, 'import json\n'), ((964, 987), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (985, 987), False, 'import datetime\n'), ((1363, 1386), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1384, 1386), False, 'import datetime\n')]
|
import argparse
import numpy as np
from astropy.io import fits
from numba import jit
class DragonPedestal:
n_pixels = 7
roisize = 40
size4drs = 4*1024
high_gain = 0
low_gain = 1
def __init__(self):
self.first_capacitor = np.zeros((2, 8))
self.meanped = np.zeros((2, self.n_pixels, self.size4drs))
self.numped = np.zeros((2, self.n_pixels, self.size4drs))
def fill_pedestal_event(self, event, nr):
first_cap = event.lst.tel[0].evt.first_capacitor_id[nr * 8:(nr + 1) * 8]
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 1, 2, 2, 3]):
self.first_capacitor[self.high_gain, i] = first_cap[j]
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [4, 4, 5, 5, 6, 6, 7]):
self.first_capacitor[self.low_gain, i] = first_cap[j]
waveform = event.r0.tel[0].waveform[:, :, :]
expected_pixel_id = event.lst.tel[0].svc.pixel_ids
self._fill_pedestal_event_jit(nr, waveform, expected_pixel_id, self.first_capacitor, self.meanped, self.numped)
@staticmethod
@jit(parallel=True)
def _fill_pedestal_event_jit(nr, waveform, expected_pixel_id, first_cap, meanped, numped):
size4drs = 4096
roisize = 40
for i in range(0, 2):
for j in range(0, 7):
fc = int(first_cap[i, j])
pixel = expected_pixel_id[nr*7 + j]
posads0 = int((2+fc)%size4drs)
if posads0 + 40 < 4096:
meanped[i, j, posads0:(posads0+36)] += waveform[i, pixel, 2:38]
numped[i, j, posads0:(posads0 + 36)] += 1
else:
for k in range(2, roisize-2):
posads = int((k+fc)%size4drs)
val = waveform[i, pixel, k]
meanped[i, j, posads] += val
numped[i, j, posads] += 1
def finalize_pedestal(self):
try:
self.meanped = self.meanped/self.numped
except Exception as err:
print("Not enough events to coverage all capacitor. Please use more events to create pedestal file.")
print(err)
def get_first_capacitor(event, nr):
hg = 0
lg = 1
fc = np.zeros((2, 8))
first_cap = event.lst.tel[0].evt.first_capacitor_id[nr * 8:(nr + 1) * 8]
# First capacitor order according Dragon v5 board data format
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 1, 2, 2, 3]):
fc[hg, i] = first_cap[j]
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [4, 4, 5, 5, 6, 6, 7]):
fc[lg, i] = first_cap[j]
return fc
|
[
"numpy.zeros",
"numba.jit"
] |
[((1068, 1086), 'numba.jit', 'jit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (1071, 1086), False, 'from numba import jit\n'), ((2238, 2254), 'numpy.zeros', 'np.zeros', (['(2, 8)'], {}), '((2, 8))\n', (2246, 2254), True, 'import numpy as np\n'), ((256, 272), 'numpy.zeros', 'np.zeros', (['(2, 8)'], {}), '((2, 8))\n', (264, 272), True, 'import numpy as np\n'), ((296, 339), 'numpy.zeros', 'np.zeros', (['(2, self.n_pixels, self.size4drs)'], {}), '((2, self.n_pixels, self.size4drs))\n', (304, 339), True, 'import numpy as np\n'), ((362, 405), 'numpy.zeros', 'np.zeros', (['(2, self.n_pixels, self.size4drs)'], {}), '((2, self.n_pixels, self.size4drs))\n', (370, 405), True, 'import numpy as np\n')]
|
from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-Headless',
version=get_version('mopidy_headless/__init__.py'),
url='',
license='Apache License, Version 2.0',
author='<NAME>',
author_email='<EMAIL>',
description='Mopidy extension for controlling via input devices',
long_description=open('README.md').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 0.19',
'Pykka >= 1.1',
],
entry_points={
'mopidy.ext': [
'headless = mopidy_headless:Extension',
],
},
)
|
[
"re.findall",
"setuptools.find_packages"
] |
[((181, 228), 're.findall', 're.findall', (['"""__([a-z]+)__ = \'([^\']+)\'"""', 'content'], {}), '("__([a-z]+)__ = \'([^\']+)\'", content)\n', (191, 228), False, 'import re\n'), ((588, 631), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', 'tests.*']"}), "(exclude=['tests', 'tests.*'])\n", (601, 631), False, 'from setuptools import find_packages, setup\n')]
|
#TODO: move this to pioneer.das.acquisition
from pioneer.das.api import platform
try:
import folium #pip3 install folium
except:
pass
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import tqdm
import utm
def easting_northing_from_lat_long(latitude, longitude):
easting, northing, _, _ = utm.from_latlon(latitude, longitude)
return easting, northing
def distance_traj_step(easting, northing, t=None):
d_e = np.diff(easting)
d_n = np.diff(northing)
if t is not None:
d_t = np.diff(t)
return (d_e**2 + d_n**2)**0.5/d_t
def get_trajectory(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel'):
'''simple: return easting, northing, points list, and time of the trajectory following the timestamps of ref_ts_sensor
'''
n = len(pfsynch)
easting, northing, ts = [], [], []
points = []
for mu in tqdm.tqdm(range(n)):
ref_ts = pfsynch[mu][ref_ts_sensor].timestamp
imu = pfsynch.platform[imu_nav].get_at_timestamp(ref_ts).raw
lati, longi = imu['latitude'], imu['longitude']
eg, ng = easting_northing_from_lat_long(lati, longi)
easting.append(eg)
northing.append(ng)
ts.append(ref_ts/1e6)
points.append([lati, longi])
return np.array(easting, dtype=np.float64), np.array(northing, dtype=np.float64), points, np.array(ts, dtype=np.float64)-ts[0]
def compute_neighbour_step_ratio(xt, yt, t, min_epsilon_precision=1e-5):
step_ratio_norm = []
step_ratio_norm.append(0)
for i in range(1,len(xt)-1):
d_t_l = np.abs(t[i-1]-t[i])
d_t_r = np.abs(t[i+1]-t[i])
d_xt_l = np.maximum(np.abs(xt[i-1]-xt[i])/d_t_l, min_epsilon_precision)
d_xt_r = np.maximum(np.abs(xt[i+1]-xt[i])/d_t_r, min_epsilon_precision)
d_yt_l = np.maximum(np.abs(yt[i-1]-yt[i])/d_t_l, min_epsilon_precision)
d_yt_r = np.maximum(np.abs(yt[i+1]-yt[i])/d_t_r, min_epsilon_precision)
step_ratio_xt = np.maximum(d_xt_l, d_xt_r) / np.minimum(d_xt_l, d_xt_r)
step_ratio_yt = np.maximum(d_yt_l, d_yt_r) / np.minimum(d_yt_l, d_yt_r)
step_ratio_norm.append((step_ratio_xt**2 + step_ratio_yt**2)**0.5)
step_ratio_norm.append(0)
return np.array(step_ratio_norm, dtype=np.float)
def compute_standard_score(x, seq_memory: int=200, start_at_zero: bool=True, outliers_threshold: float=100.0):
'''return the standard score based on a memory sequence of certain length.
'''
m = len(x)
epsilon_ = 1e-4 # 0.1 mm precision
z_score = []
z_score.append(0)
flag_outliers = np.zeros_like(x, dtype=bool)
for mu in tqdm.tqdm(range(1, m)):
a, b = np.maximum(mu - seq_memory, 0), mu
if mu < seq_memory and not start_at_zero:
z_score.append(0)
continue
window_seq = x[a:b][~flag_outliers[a:b]]
# if mu > seq_memory and len(window_seq) < 0.25*seq_memory:
# z_score.append(0)
# continue
seq_mean = np.mean(window_seq)
seq_std = np.std(window_seq)
z_ = np.abs((x[mu] - seq_mean)/(seq_std + epsilon_))
if z_ > outliers_threshold:
flag_outliers[mu] = 1
z_score.append(np.copy(z_))
return np.array(z_score)
def get_trajectory_standard_score(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel',
traj_seq_memory:int=200):
'''estimation of the smoothness of a trajectory based on the standard score.
'''
easting, northing, _, t = get_trajectory(pfsynch, ref_ts_sensor, imu_nav)
traj_step = distance_traj_step(easting, northing, t)
z_scores = np.zeros_like(easting)
z_scores[1:] = compute_standard_score(traj_step, traj_seq_memory-1, False)
return z_scores
def get_trajectory_step_ratio(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel',
traj_min_epsilon_precision:float=1e-6):
'''estimation of the smoothness of the trajectory based on the ratio of left-right epsilons step
'''
easting, northing, _, t = get_trajectory(pfsynch, ref_ts_sensor, imu_nav)
return compute_neighbour_step_ratio(easting, northing, t, traj_min_epsilon_precision)
def find_trajectory_jump(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel',
traj_seq_memory:int=200,
traj_jump_threshold:float=15.5,
show_result:bool=True):
'''Compute the list of ranges of intervall from pfsynch which are smooth according to traj_jump_threshold.
'''
print('Computing trajectory')
easting, northing, points, t = get_trajectory(pfsynch, ref_ts_sensor, imu_nav)
traj_step = distance_traj_step(easting, northing)
print('Validate trajectory')
z_scores = np.zeros_like(easting)
z_scores[1:] = compute_standard_score(traj_step, traj_seq_memory-1, False)
jump_flag = (z_scores > traj_jump_threshold).astype(bool)
list_intervals = []
ids = np.arange(len(jump_flag))[jump_flag]
for mu in range(len(ids)):
if mu == 0:
list_intervals.append([0 , ids[mu]-1])
continue
if ids[mu]-ids[mu-1] >= traj_seq_memory:
list_intervals.append([ids[mu-1], ids[mu]-1])
if show_result:
t = np.arange(len(easting))
fig, ax = plt.subplots(2, 1, figsize=(9,10))
fig.suptitle('Trajectory positions and jumps')
ax[0].scatter(t, easting)
ax[0].scatter(t[jump_flag], easting[jump_flag], label='jump flags')
ax[0].legend()
ax[0].set_xlabel('Frame number')
ax[0].set_ylabel('Easting')
ax[1].scatter(t, northing)
ax[1].scatter(t[jump_flag], northing[jump_flag], label='jump flags')
ax[1].legend()
ax[1].set_xlabel('Frame number')
ax[1].set_ylabel('Northing')
plt.show()
my_map = folium.Map(location=points[0], zoom_start=15)
folium.PolyLine(points).add_to(my_map)
for mu in ids:
folium.CircleMarker(
location=points[mu],
radius=5.5,
popup='IMU jump: '+ str(mu),
color='red',
fill=True,
fill_color='red'
).add_to(my_map)
return jump_flag, list_intervals, my_map
return jump_flag, list_intervals
if __name__ == '__main__':
#example of use:
#see this dataset:
_dataset = '/nas/pixset/exportedDataset/20200610_195655_rec_dataset_quartier_pierre_exported'
_ignore = ['radarTI_bfc']
pf = platform.Platform(_dataset, ignore=_ignore)
# get the platform synchronized:
sync_labels = ['*ech*', '*_img*', '*_trr*', '*_trf*',' *_ftrr*', '*xyzit-*']
interp_labels = ['*_xyzit', 'sbgekinox_*', 'peakcan_*', '*temp', '*_pos*', '*_agc*']
synch = pf.synchronized(sync_labels=sync_labels, interp_labels=interp_labels, tolerance_us=1e3)
flags, inters, my_map = find_trajectory_jump(synch,
ref_ts_sensor='flir_bfc_img',
imu_nav='sbgekinox_bcc_navposvel',
traj_seq_memory=200,
traj_jump_threshold=4.0,
show_result=True)
print('Intervals:', inters)
|
[
"utm.from_latlon",
"numpy.zeros_like",
"numpy.abs",
"matplotlib.pyplot.show",
"numpy.maximum",
"numpy.minimum",
"numpy.std",
"numpy.copy",
"pioneer.das.api.platform.Platform",
"numpy.diff",
"numpy.array",
"numpy.mean",
"folium.Map",
"folium.PolyLine",
"matplotlib.pyplot.subplots"
] |
[((327, 363), 'utm.from_latlon', 'utm.from_latlon', (['latitude', 'longitude'], {}), '(latitude, longitude)\n', (342, 363), False, 'import utm\n'), ((455, 471), 'numpy.diff', 'np.diff', (['easting'], {}), '(easting)\n', (462, 471), True, 'import numpy as np\n'), ((482, 499), 'numpy.diff', 'np.diff', (['northing'], {}), '(northing)\n', (489, 499), True, 'import numpy as np\n'), ((2351, 2392), 'numpy.array', 'np.array', (['step_ratio_norm'], {'dtype': 'np.float'}), '(step_ratio_norm, dtype=np.float)\n', (2359, 2392), True, 'import numpy as np\n'), ((2708, 2736), 'numpy.zeros_like', 'np.zeros_like', (['x'], {'dtype': 'bool'}), '(x, dtype=bool)\n', (2721, 2736), True, 'import numpy as np\n'), ((3396, 3413), 'numpy.array', 'np.array', (['z_score'], {}), '(z_score)\n', (3404, 3413), True, 'import numpy as np\n'), ((3933, 3955), 'numpy.zeros_like', 'np.zeros_like', (['easting'], {}), '(easting)\n', (3946, 3955), True, 'import numpy as np\n'), ((5306, 5328), 'numpy.zeros_like', 'np.zeros_like', (['easting'], {}), '(easting)\n', (5319, 5328), True, 'import numpy as np\n'), ((7117, 7160), 'pioneer.das.api.platform.Platform', 'platform.Platform', (['_dataset'], {'ignore': '_ignore'}), '(_dataset, ignore=_ignore)\n', (7134, 7160), False, 'from pioneer.das.api import platform\n'), ((536, 546), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (543, 546), True, 'import numpy as np\n'), ((1375, 1410), 'numpy.array', 'np.array', (['easting'], {'dtype': 'np.float64'}), '(easting, dtype=np.float64)\n', (1383, 1410), True, 'import numpy as np\n'), ((1412, 1448), 'numpy.array', 'np.array', (['northing'], {'dtype': 'np.float64'}), '(northing, dtype=np.float64)\n', (1420, 1448), True, 'import numpy as np\n'), ((1674, 1697), 'numpy.abs', 'np.abs', (['(t[i - 1] - t[i])'], {}), '(t[i - 1] - t[i])\n', (1680, 1697), True, 'import numpy as np\n'), ((1710, 1733), 'numpy.abs', 'np.abs', (['(t[i + 1] - t[i])'], {}), '(t[i + 1] - t[i])\n', (1716, 1733), True, 'import numpy as np\n'), ((3153, 3172), 'numpy.mean', 'np.mean', (['window_seq'], {}), '(window_seq)\n', (3160, 3172), True, 'import numpy as np\n'), ((3191, 3209), 'numpy.std', 'np.std', (['window_seq'], {}), '(window_seq)\n', (3197, 3209), True, 'import numpy as np\n'), ((3224, 3273), 'numpy.abs', 'np.abs', (['((x[mu] - seq_mean) / (seq_std + epsilon_))'], {}), '((x[mu] - seq_mean) / (seq_std + epsilon_))\n', (3230, 3273), True, 'import numpy as np\n'), ((5855, 5890), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(9, 10)'}), '(2, 1, figsize=(9, 10))\n', (5867, 5890), True, 'import matplotlib.pyplot as plt\n'), ((6377, 6387), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6385, 6387), True, 'import matplotlib.pyplot as plt\n'), ((6406, 6451), 'folium.Map', 'folium.Map', ([], {'location': 'points[0]', 'zoom_start': '(15)'}), '(location=points[0], zoom_start=15)\n', (6416, 6451), False, 'import folium\n'), ((1458, 1488), 'numpy.array', 'np.array', (['ts'], {'dtype': 'np.float64'}), '(ts, dtype=np.float64)\n', (1466, 1488), True, 'import numpy as np\n'), ((2085, 2111), 'numpy.maximum', 'np.maximum', (['d_xt_l', 'd_xt_r'], {}), '(d_xt_l, d_xt_r)\n', (2095, 2111), True, 'import numpy as np\n'), ((2114, 2140), 'numpy.minimum', 'np.minimum', (['d_xt_l', 'd_xt_r'], {}), '(d_xt_l, d_xt_r)\n', (2124, 2140), True, 'import numpy as np\n'), ((2165, 2191), 'numpy.maximum', 'np.maximum', (['d_yt_l', 'd_yt_r'], {}), '(d_yt_l, d_yt_r)\n', (2175, 2191), True, 'import numpy as np\n'), ((2194, 2220), 'numpy.minimum', 'np.minimum', (['d_yt_l', 'd_yt_r'], {}), '(d_yt_l, d_yt_r)\n', (2204, 2220), True, 'import numpy as np\n'), ((2790, 2820), 'numpy.maximum', 'np.maximum', (['(mu - seq_memory)', '(0)'], {}), '(mu - seq_memory, 0)\n', (2800, 2820), True, 'import numpy as np\n'), ((3367, 3378), 'numpy.copy', 'np.copy', (['z_'], {}), '(z_)\n', (3374, 3378), True, 'import numpy as np\n'), ((1759, 1784), 'numpy.abs', 'np.abs', (['(xt[i - 1] - xt[i])'], {}), '(xt[i - 1] - xt[i])\n', (1765, 1784), True, 'import numpy as np\n'), ((1839, 1864), 'numpy.abs', 'np.abs', (['(xt[i + 1] - xt[i])'], {}), '(xt[i + 1] - xt[i])\n', (1845, 1864), True, 'import numpy as np\n'), ((1928, 1953), 'numpy.abs', 'np.abs', (['(yt[i - 1] - yt[i])'], {}), '(yt[i - 1] - yt[i])\n', (1934, 1953), True, 'import numpy as np\n'), ((2008, 2033), 'numpy.abs', 'np.abs', (['(yt[i + 1] - yt[i])'], {}), '(yt[i + 1] - yt[i])\n', (2014, 2033), True, 'import numpy as np\n'), ((6461, 6484), 'folium.PolyLine', 'folium.PolyLine', (['points'], {}), '(points)\n', (6476, 6484), False, 'import folium\n')]
|
import os.path as osp
import numpy as np
import mmcv
from . import XMLDataset
from .builder import DATASETS
import xml.etree.ElementTree as ET
from PIL import Image
@DATASETS.register_module()
class LogosDataset(XMLDataset):
def load_annotations(self, ann_file):
"""Load annotation from XML style ann_file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
if not self.CLASSES:
self.CLASSES = set()
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'JPEGImages/{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
# Get image size data
size = root.find('size')
if size is not None:
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_path = osp.join(self.img_prefix, 'JPEGImages',
'{}.jpg'.format(img_id))
img = Image.open(img_path)
width, height = img.size
# Get object classes
self.CLASSES |= {x.text for x in tree.findall("object/name")}
data_infos.append(
dict(id=img_id, filename=filename, width=width, height=height))
self.CLASSES = sorted(list(self.CLASSES))
return data_infos
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls/mAPs. If set to a list, the average of all IoUs will
also be computed. Default: np.arange(0.5, 0.96, 0.05).
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
return dict()
|
[
"xml.etree.ElementTree.parse",
"PIL.Image.open",
"numpy.arange",
"mmcv.list_from_file",
"os.path.join"
] |
[((578, 607), 'mmcv.list_from_file', 'mmcv.list_from_file', (['ann_file'], {}), '(ann_file)\n', (597, 607), False, 'import mmcv\n'), ((1875, 1901), 'numpy.arange', 'np.arange', (['(0.5)', '(0.96)', '(0.05)'], {}), '(0.5, 0.96, 0.05)\n', (1884, 1901), True, 'import numpy as np\n'), ((712, 769), 'os.path.join', 'osp.join', (['self.img_prefix', '"""Annotations"""', 'f"""{img_id}.xml"""'], {}), "(self.img_prefix, 'Annotations', f'{img_id}.xml')\n", (720, 769), True, 'import os.path as osp\n'), ((821, 839), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (829, 839), True, 'import xml.etree.ElementTree as ET\n'), ((1255, 1275), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1265, 1275), False, 'from PIL import Image\n')]
|
import json
import random
import unittest
from model.position import Position
class PositionTest(unittest.TestCase):
def test_given_a_position_then_it_is_serializable(self):
x = random.randint(1, 100)
y = random.randint(1, 100)
z = random.randint(1, 100)
expected_json = {
"x": x,
"y": y,
"z": z,
}
position = Position(x, y, z)
self.assertEqual(expected_json, json.loads(position.toJSON()))
|
[
"random.randint",
"model.position.Position"
] |
[((193, 215), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (207, 215), False, 'import random\n'), ((228, 250), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (242, 250), False, 'import random\n'), ((263, 285), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (277, 285), False, 'import random\n'), ((402, 419), 'model.position.Position', 'Position', (['x', 'y', 'z'], {}), '(x, y, z)\n', (410, 419), False, 'from model.position import Position\n')]
|
import os
from collections import OrderedDict, MutableMapping, MutableSequence
from operator import itemgetter, attrgetter
from copy import deepcopy
import xmltodict
TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<gexf xmlns="http://www.gexf.net/1.2draft"
xmlns:viz="http://www.gexf.net/1.1draft/viz"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd"
version="1.2"
></gexf>"""
class BaseElement:
def __init__(self, parent, data):
self.parent = parent
self.data = data or OrderedDict()
def set(self, name, _text=None, **kwargs):
element = OrderedDict([('@%s' % k, str(kwargs[k])) for k in sorted(kwargs)])
if _text:
element['#text'] = _text
self.data[name] = element
return element
def _mklst(self, tag, **kwargs):
try:
el = self.data['%ss' % tag]
if type(el[tag]).__name__ != 'list':
el[tag] = [el[tag]]
except (KeyError, TypeError):
el = self.set('%ss' % tag, **kwargs)
el[tag] = []
return el
def get(self, name, default=None):
return self.data.get(name, default)
def __getattribute__(self, attr):
try:
return super().__getattribute__(attr)
except AttributeError:
try:
return self.data['@%s' % attr]
except KeyError:
raise AttributeError(attr)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data['key'] = value
def __delitem__(self, key):
del self.data['key']
class Gexf(BaseElement):
def __init__(self, path):
self.path = os.path.realpath(path)
try:
xml = open(self.path, 'r', encoding='utf-8').read()
except IOError:
xml = TEMPLATE
self.tree = xmltodict.parse(xml)
self._mklst('graph')
@property
def data(self):
return self.tree['gexf']
@data.setter
def data(self, value):
self.tree['gexf'] = value
def write(self):
open(self.path, 'w+', encoding='utf-8').write(str(self))
@property
def _graphs(self):
return self.data['graphs']['graph']
@property
def graphs(self):
return [Graph(self, graph) for graph in self._graphs]
def add_graph(self, **kwargs):
el = OrderedDict([('@%s' % k, str(kwargs[k])) for k in sorted(kwargs)])
self._graphs.append(el)
return Graph(self, el)
@property
def clean_tree(self):
# TODO: Remove all empty lists.
return self.tree
def __str__(self):
return xmltodict.unparse(self.clean_tree, pretty=True)
# class GexfGraphs(MutableSequence):
class Graph(BaseElement):
def __init__(self, *args):
super().__init__(*args)
try:
attr_wrapper_list = self.data['attributes']
# Ensure `attr_wrapper_list` is a list:
if type(attr_wrapper_list).__name__ != 'list':
self.data['attributes'] = [self.data['attributes']]
attr_wrapper_list = self.data['attributes']
except (KeyError, TypeError):
attr_wrapper_list = []
self.data['attributes'] = attr_wrapper_list
for _class in ['node', 'edge']:
try:
[attr_wrapper] = filter(lambda x: x['@class'] == _class,
attr_wrapper_list)
except ValueError:
attr_wrapper = OrderedDict([
('@class', _class),
('attribute', [])
])
attr_wrapper_list.append(attr_wrapper)
# If there is only one attribute in the parsed data,
# it will not be a list, so we need to fix that:
if type(attr_wrapper['attribute']).__name__ != 'list':
attr_wrapper['attribute'] = [attr_wrapper['attribute']]
self._mklst('node')
self._mklst('edge')
self.edges = GraphEdges(self)
@property
def _nodes(self):
return self.data['nodes']['node']
@_nodes.setter
def _nodes(self, value):
self.data['nodes']['node'] = value
@property
def _edges(self):
return self.data['edges']['edge']
@_edges.setter
def _edges(self, value):
self.data['edges']['edge'] = value
@property
def nodes(self):
return [Node(self, node) for node in self._nodes]
def add_node(self, **kwargs):
el = OrderedDict([('@%s' % k, str(kwargs[k])) for k in sorted(kwargs)])
self._nodes.append(el)
return Node(self, el)
def sort_nodes(self, key=None, attr=None, type_cast=int, reverse=False):
if key:
_key = key
elif attr:
_key = lambda x: type_cast(x.attributes[attr])
self._nodes = list(map(attrgetter('data'),
sorted(self.nodes, key=_key, reverse=reverse)))
@property
def _class_mapped_attributes(self):
return {w['@class']: w['attribute'] for w in self.data['attributes']}
def get_id_mapped_attributes(self, _class):
return {int(attr['@id']): {
'title': attr['@title'],
'type': attr['@type']
} for attr in self._class_mapped_attributes[_class]}
def get_attributes(self, _class):
return OrderedDict([(attr['@title'], {
'id': int(attr['@id']),
'type': attr['@type']
}) for attr in self._class_mapped_attributes[_class]])
@property
def node_attributes(self):
return self.get_attributes('node')
@property
def edge_attributes(self):
return self.get_attributes('edge')
def define_attributes(self, attributes, _class='node'):
defined = self.get_attributes(_class).keys()
_attributes = self._class_mapped_attributes[_class]
for attr, _type in attributes:
if attr in defined:
continue
el = OrderedDict([
('@id', str(len(_attributes))),
('@title', str(attr)),
('@type', str(_type))
])
_attributes.append(el)
class GraphEdges(MutableSequence):
def __init__(self, graph):
self.graph = graph
def __getitem__(self, index):
edge = Edge(None, None)
edge.data = self.graph._edges[index]
edge._create_attributes(self.graph)
return edge
def __setitem__(self, index, edge):
self.graph._edges[index] = edge.data
def __delitem__(self, index):
del self.graph._edges[index]
def __len__(self):
return len(self.graph._edges)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<GraphEdges %s>" % self.graph.edges
def insert(self, index, edge):
edge._create_attributes(self.graph)
# No duplicates:
if edge in self.graph.edges:
return
self.graph._edges.insert(index, edge.data)
for i, el in enumerate(self.graph._edges):
el['@id'] = str(i)
def append(self, edge):
self.insert(len(self.graph._edges), edge)
class Edge(BaseElement):
def __init__(self, source, target, id=None, type=None):
self.data = OrderedDict([
('@id', str(id)),
('@source', str(source)),
('@target', str(target))
])
if type:
self.data['@type'] = str(type)
self._mklst('attvalue')
def _create_attributes(self, graph):
self._attributes = EdgeAttributes(graph, self)
@property
def attributes(self):
try:
return self._attributes
except AttributeError:
raise AttributeError('Attributes are not available before edge '
'has been added to a graph')
def __str__(self):
return self.__repr__()
def __repr__(self):
return '<Node %s -> %s>' % (self.source, self.target)
def __eq__(self, other):
return str(self) == str(other)
class Node(BaseElement):
def __init__(self, *args):
super().__init__(*args)
self._mklst('attvalue')
self.attributes = NodeAttributes(self.parent, self)
class NodeAttributes(MutableMapping):
_class = 'node'
def __init__(self, graph, obj, *args, **kwargs):
self.graph = graph
self.obj = obj
self.update(dict(*args, **kwargs))
@property
def _attvalues(self):
return self.obj.data['attvalues']['attvalue']
@property
def _mapped_attvalues(self):
return {int(v['@for']): v for v in self._attvalues}
def __getitem__(self, key):
tkey = self.__keytransform__(key)
return self._mapped_attvalues[tkey]['@value']
def __setitem__(self, key, value):
tkey = self.__keytransform__(key)
_value = str(value)
if self.graph.get_attributes(self._class)[key]['type'] == 'boolean':
_value = _value.lower()
try:
self._mapped_attvalues[tkey]['@value'] = _value
except KeyError:
# Create a new <attvalue/> element:
# XML Output:
# <attvalue for="<tkey>" value="<value>"></attvalue>
el = OrderedDict([('@for', str(tkey)), ('@value', _value)])
self._attvalues.append(el)
def __delitem__(self, key):
# Should not have to find the actual index as the id should be the
# same as the index:
tkey = self.__keytransform__(key)
del self._attvalues[tkey]
def __iter__(self):
return iter(map(lambda id: self.graph.get_id_mapped_attributes(self._class)[id]['title'],
self._mapped_attvalues.keys()))
def __len__(self):
return len(self._attvalues)
def __keytransform__(self, key):
return int(self.graph.get_attributes(self._class)[key]['id'])
class EdgeAttributes(NodeAttributes):
_class = 'edge'
|
[
"os.path.realpath",
"operator.attrgetter",
"xmltodict.unparse",
"xmltodict.parse",
"collections.OrderedDict"
] |
[((1811, 1833), 'os.path.realpath', 'os.path.realpath', (['path'], {}), '(path)\n', (1827, 1833), False, 'import os\n'), ((1984, 2004), 'xmltodict.parse', 'xmltodict.parse', (['xml'], {}), '(xml)\n', (1999, 2004), False, 'import xmltodict\n'), ((2774, 2821), 'xmltodict.unparse', 'xmltodict.unparse', (['self.clean_tree'], {'pretty': '(True)'}), '(self.clean_tree, pretty=True)\n', (2791, 2821), False, 'import xmltodict\n'), ((618, 631), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (629, 631), False, 'from collections import OrderedDict, MutableMapping, MutableSequence\n'), ((5009, 5027), 'operator.attrgetter', 'attrgetter', (['"""data"""'], {}), "('data')\n", (5019, 5027), False, 'from operator import itemgetter, attrgetter\n'), ((3642, 3694), 'collections.OrderedDict', 'OrderedDict', (["[('@class', _class), ('attribute', [])]"], {}), "([('@class', _class), ('attribute', [])])\n", (3653, 3694), False, 'from collections import OrderedDict, MutableMapping, MutableSequence\n')]
|
from typing import Callable, Any, List
from machin.parallel.distributed import (
get_world, get_cur_name
)
from machin.parallel.server import (
PushPullGradServerImpl,
PushPullModelServerImpl
)
from torch.optim import Adam
def grad_server_helper(model_creators: List[Callable],
optimizer: Any = Adam,
learning_rate: float = 1e-3):
"""
Helper function for creating a tuple of grad servers,
used by A3C, IMPALE, etc. This function requires all processes
in the world to enter.
Warning:
You should never run this function twice!
Args:
model_creators: A list of model creator functions,
each one corresponds to one gradient reduction server.
optimizer: Optimizer type, default is Adam.
learning_rate: Learning rate of the optimizer.
Returns:
A tuple of accessors to gradient servers, the tuple has the
same size as ``model_creators``
"""
# Note:
# passing a list of creator functions instead of passing a list of models
# directly is designed to remove the unnecessary model creation cost on
# not-the-primary-reducer processes.
DEFAULT_GROUP_NAME = "server_group"
# create groups first
world = get_world()
server_group = world.create_rpc_group(DEFAULT_GROUP_NAME,
world.get_members())
# create servers
primary_reducer = world.get_members()[0]
servers = [
PushPullGradServerImpl("grad_server_" + str(i),
server_group,
primary_reducer=primary_reducer)
for i in range(len(model_creators))
]
if get_cur_name() == primary_reducer:
for model_creator, server in zip(model_creators, servers):
model = model_creator()
server.manage_model(model,
optimizer(model.parameters(),
lr=learning_rate))
server.start()
server_group.barrier()
servers = tuple(
server_group.get_paired("grad_server_" + str(i)).to_here()
for i in range(len(model_creators))
)
# accessors instead of actual implementation instance
# will be returned because of __reduce__
return servers
def model_server_helper(model_num):
"""
Helper function for creating a tuple of model servers,
used by APEX, etc. This function requires all processes
in the world to enter.
Warning:
You should never run this function twice!
Returns:
A tuple of accessors to model servers, the size of tuple is
``model_num``
"""
DEFAULT_GROUP_NAME = "server_group"
# create groups first
world = get_world()
server_group = world.create_rpc_group(DEFAULT_GROUP_NAME,
world.get_members())
# create servers
# In current implementation, only one process will initialize the server
if get_cur_name() == world.get_members()[0]:
for i in range(model_num):
_server = PushPullModelServerImpl("model_server_" + str(i),
server_group)
server_group.barrier()
servers = tuple(
server_group.get_paired("model_server_" + str(i)).to_here()
for i in range(model_num)
)
# accessors instead of actual implementation instance
# will be returned because of __reduce__
return servers
|
[
"machin.parallel.distributed.get_cur_name",
"machin.parallel.distributed.get_world"
] |
[((1275, 1286), 'machin.parallel.distributed.get_world', 'get_world', ([], {}), '()\n', (1284, 1286), False, 'from machin.parallel.distributed import get_world, get_cur_name\n'), ((2780, 2791), 'machin.parallel.distributed.get_world', 'get_world', ([], {}), '()\n', (2789, 2791), False, 'from machin.parallel.distributed import get_world, get_cur_name\n'), ((1717, 1731), 'machin.parallel.distributed.get_cur_name', 'get_cur_name', ([], {}), '()\n', (1729, 1731), False, 'from machin.parallel.distributed import get_world, get_cur_name\n'), ((3023, 3037), 'machin.parallel.distributed.get_cur_name', 'get_cur_name', ([], {}), '()\n', (3035, 3037), False, 'from machin.parallel.distributed import get_world, get_cur_name\n')]
|
"""
Module for locating and accessing [[https://takeout.google.com][Google Takeout]] data
"""
from dataclasses import dataclass
from typing import Optional
from my.config import google as user_config
from ..core.common import Paths
@dataclass
class google(user_config):
# directory to unzipped takeout data
takeout_path: Paths
# this is the directory that my google drive gets mirrored to locally
# when it detects a new takeout, it sends a warning, so I can run
# the script to move it to takeout_path
# see HPI/scripts/unzip_google_takeout
google_drive_local_path: Optional[Paths]
from ..core.cfg import make_config
config = make_config(google)
import warnings
from pathlib import Path
from typing import Iterable
from more_itertools import last
from ..core.common import get_files
from ..core.kompress import kexists
def get_takeouts(*, path: Optional[str] = None) -> Iterable[Path]:
check_for_new_takeouts()
for takeout in get_files(config.takeout_path):
if path is None or kexists(takeout, path):
yield takeout
def get_last_takeout(*, path: Optional[str] = None) -> Path:
return last(get_takeouts(path=path))
# if there are any new takeouts, warn me
def check_for_new_takeouts():
if config.google_drive_local_path:
new_takeouts = list(
Path(config.google_drive_local_path).expanduser().absolute().rglob("*")
)
if new_takeouts:
# this may be temporary, once I'm confident the script works fine over
# some period, I'll just automate this
warnings.warn(
f"Theres a new takeout at {new_takeouts[0]}, run ./scripts/unzip_google_takeout to update the data!"
)
|
[
"warnings.warn",
"pathlib.Path"
] |
[((1595, 1720), 'warnings.warn', 'warnings.warn', (['f"""Theres a new takeout at {new_takeouts[0]}, run ./scripts/unzip_google_takeout to update the data!"""'], {}), "(\n f'Theres a new takeout at {new_takeouts[0]}, run ./scripts/unzip_google_takeout to update the data!'\n )\n", (1608, 1720), False, 'import warnings\n'), ((1342, 1378), 'pathlib.Path', 'Path', (['config.google_drive_local_path'], {}), '(config.google_drive_local_path)\n', (1346, 1378), False, 'from pathlib import Path\n')]
|
import re
from datetime import date
import boundaries
boundaries.register('Toronto wards (2010)',
singular='Toronto ward',
domain='Toronto, ON',
last_updated=date(2018, 1, 16),
name_func=boundaries.attr('NAME'),
id_func=lambda f: re.sub(r'\A0', '', f.get('SCODE_NAME')),
authority='City of Toronto',
source_url='https://www.toronto.ca/city-government/data-research-maps/open-data/open-data-catalogue/#29b6fadf-0bd6-2af9-4a8c-8c41da285ad7',
licence_url='https://www.toronto.ca/city-government/data-research-maps/open-data/open-data-licence/',
data_url='http://opendata.toronto.ca/gcc/wards_may2010_wgs84.zip',
encoding='iso-8859-1',
extra={'division_id': 'ocd-division/country:ca/csd:3520005'},
)
|
[
"boundaries.attr",
"datetime.date"
] |
[((172, 189), 'datetime.date', 'date', (['(2018)', '(1)', '(16)'], {}), '(2018, 1, 16)\n', (176, 189), False, 'from datetime import date\n'), ((205, 228), 'boundaries.attr', 'boundaries.attr', (['"""NAME"""'], {}), "('NAME')\n", (220, 228), False, 'import boundaries\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-11-08 04:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guides', '0015_auto_20200220_1619'),
]
operations = [
migrations.AddField(
model_name='participant',
name='attending',
field=models.CharField(choices=[('YES', 'Yes'), ('NO', 'No')], default='NO', max_length=32, verbose_name='Will you be attending the next IETF?'),
),
]
|
[
"django.db.models.CharField"
] |
[((407, 549), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('YES', 'Yes'), ('NO', 'No')]", 'default': '"""NO"""', 'max_length': '(32)', 'verbose_name': '"""Will you be attending the next IETF?"""'}), "(choices=[('YES', 'Yes'), ('NO', 'No')], default='NO',\n max_length=32, verbose_name='Will you be attending the next IETF?')\n", (423, 549), False, 'from django.db import migrations, models\n')]
|
from obb.models.platform import Platform, Train
from django.shortcuts import get_object_or_404
from obb.serializers.serializers import PlatformSerializer
from rest_framework import viewsets
from rest_framework.response import Response
class PlatformViewSet(viewsets.ViewSet):
def list(self, request):
queryset = Platform.objects.all()
serializer = PlatformSerializer(queryset, many=True)
return Response(serializer.data)
# Create a Platform, in Comment type "name: <Platform_name> train_id: <Train_id>" (USE QUOTES)
def create(self, request):
kwargs = {}
data = request.data.split(' ')
if len(data[1]) > 0:
kwargs.update({'name': data[1]})
if data[3] > 0 and data[3].is_numeric():
queryset = Train.objects.all()
train = get_object_or_404(queryset, pk=data[3])
if type(train) is Train:
kwargs.update({'train_id': train.id})
platform = Platform(**kwargs)
platform.save()
serializer = PlatformSerializer(platform)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Platform.objects.all()
platform = get_object_or_404(queryset, pk=pk)
serializer = PlatformSerializer(platform)
return Response(serializer.data)
# Update a Train station name, in Comment type "name: <Train_Station_name>" or "train: <Train_id>" (USE QUOTES)
def update(self, request, pk=None):
data = request.data.split(' ')
platform = Platform.objects.get(pk=pk)
if data[0] == "name":
platform.name = data[1]
platform.save()
elif data[0] == "train":
queryset = Train.objects.all()
train = get_object_or_404(queryset, pk=data[1])
if type(train) is Train:
platform.train_id = data[1]
platform.save()
return Response(PlatformSerializer(platform).data)
|
[
"obb.models.platform.Train.objects.all",
"obb.models.platform.Platform.objects.get",
"obb.serializers.serializers.PlatformSerializer",
"django.shortcuts.get_object_or_404",
"rest_framework.response.Response",
"obb.models.platform.Platform.objects.all",
"obb.models.platform.Platform"
] |
[((327, 349), 'obb.models.platform.Platform.objects.all', 'Platform.objects.all', ([], {}), '()\n', (347, 349), False, 'from obb.models.platform import Platform, Train\n'), ((371, 410), 'obb.serializers.serializers.PlatformSerializer', 'PlatformSerializer', (['queryset'], {'many': '(True)'}), '(queryset, many=True)\n', (389, 410), False, 'from obb.serializers.serializers import PlatformSerializer\n'), ((427, 452), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (435, 452), False, 'from rest_framework.response import Response\n'), ((982, 1000), 'obb.models.platform.Platform', 'Platform', ([], {}), '(**kwargs)\n', (990, 1000), False, 'from obb.models.platform import Platform, Train\n'), ((1046, 1074), 'obb.serializers.serializers.PlatformSerializer', 'PlatformSerializer', (['platform'], {}), '(platform)\n', (1064, 1074), False, 'from obb.serializers.serializers import PlatformSerializer\n'), ((1091, 1116), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1099, 1116), False, 'from rest_framework.response import Response\n'), ((1179, 1201), 'obb.models.platform.Platform.objects.all', 'Platform.objects.all', ([], {}), '()\n', (1199, 1201), False, 'from obb.models.platform import Platform, Train\n'), ((1221, 1255), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['queryset'], {'pk': 'pk'}), '(queryset, pk=pk)\n', (1238, 1255), False, 'from django.shortcuts import get_object_or_404\n'), ((1277, 1305), 'obb.serializers.serializers.PlatformSerializer', 'PlatformSerializer', (['platform'], {}), '(platform)\n', (1295, 1305), False, 'from obb.serializers.serializers import PlatformSerializer\n'), ((1322, 1347), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1330, 1347), False, 'from rest_framework.response import Response\n'), ((1563, 1590), 'obb.models.platform.Platform.objects.get', 'Platform.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (1583, 1590), False, 'from obb.models.platform import Platform, Train\n'), ((791, 810), 'obb.models.platform.Train.objects.all', 'Train.objects.all', ([], {}), '()\n', (808, 810), False, 'from obb.models.platform import Platform, Train\n'), ((831, 870), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['queryset'], {'pk': 'data[3]'}), '(queryset, pk=data[3])\n', (848, 870), False, 'from django.shortcuts import get_object_or_404\n'), ((1741, 1760), 'obb.models.platform.Train.objects.all', 'Train.objects.all', ([], {}), '()\n', (1758, 1760), False, 'from obb.models.platform import Platform, Train\n'), ((1781, 1820), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['queryset'], {'pk': 'data[1]'}), '(queryset, pk=data[1])\n', (1798, 1820), False, 'from django.shortcuts import get_object_or_404\n'), ((1959, 1987), 'obb.serializers.serializers.PlatformSerializer', 'PlatformSerializer', (['platform'], {}), '(platform)\n', (1977, 1987), False, 'from obb.serializers.serializers import PlatformSerializer\n')]
|
import csv
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
#from sklearn.neighbors import NearestNeighbors
from path import Path
from vector_math import *
from find_matches import *
import search_matches
#********************
#**** this compares two sets of angles to see how close the two paths are
#********************
#@profile
def compare_two_sets_of_angles(path1, path2):
match_comparison = []
max_distance = 0
if len(path2.angles>0):
angles2 = path2.angles[:,0]
distances1_2 = path2.angles[:,1]
distances2_2 = path2.angles[:,2]
path2_angles = path2.angles[:,0:3]
path2_angles_test = path2.angles[:,0:3].tolist()
else:
return # if we don't have any angles then break out of the loop and go to the next path
if len(path1.angles>0):
path1_angles = path1.angles[:,0:3]
path1_angles_test = path1.angles[:,0:3].tolist()
else:
return # if we don't have any angles then break out of the loop and go to the next path
angle_tolerance = 4.2
distance_tolerance = 18.0
#cdef int cnt2
matching_angles, num_matched = search_matches.match_angles(path1_angles, path2_angles, angle_tolerance, distance_tolerance)
match_comparison = matching_angles[0:num_matched,:]
## path1 is being compared against path 2
#for cnt in xrange (0, len(path1.angles)):
# angle1 = path1.angles[cnt,0]
# matches = np.where( (abs(path2_angles[:,0]-angle1) <= angle_tolerance) & ( abs(path2_angles[:,1]-path1_angles[cnt,1]) <= 16) & ( abs(path2_angles[:,2]-path1_angles[cnt,2]) <= 16) )
# if (len(matches[0]) > 0):
# match_score = [1, cnt, matches[0][0], 1.0, angle1] # remember this angle
# match_comparison.append(match_score)
#
#
##while( path1_angles_test and path2_angles_test ): # look for matches and pop from the list
## if ( path1_angles_test[-1][0] > path2_angles_test[-1][0] ): # pop the highest angle
## path1_angles_test.pop()
## else:
## path2_angles_test.pop()
#
#for cnt, match_check in enumerate(match_comparison):
# if ( abs(match_check[0]-matching_angles[cnt,0]) > .01 ):
# print("here 1",abs(match_check[0]-matching_angles[cnt,0]) )
# print(0,cnt, match_check[0], matching_angles[cnt,0])
# sys.exit(0)
# if ( match_check[1] != matching_angles[cnt,1] ):
# print(1,cnt, match_check[1], matching_angles[cnt,1])
# sys.exit(0)
# if ( match_check[2] != matching_angles[cnt,2] ):
# print(2, cnt, match_check[2], matching_angles[cnt,2])
# sys.exit(0)
# if ( match_check[3] != matching_angles[cnt,3] ):
# print(3, cnt, match_check[3], matching_angles[cnt,3])
# sys.exit(0)
# if ( abs(match_check[4] - matching_angles[cnt,4]) > .01 ):
# print(4, cnt, match_check[4], matching_angles[cnt,4])
# sys.exit(0)
#
#
#
exact_match_cnt = 0
matched_points1 = []
matched_points2 = []
for cnt, match_score in enumerate(match_comparison):
if (match_score[0] ==1):
exact_match_cnt += 1
loc1 = match_score[1]
loc2 = match_score[2]
# remember all of the matching points
matched_points1.append( match_score[1])
matched_points2.append( match_score[2])
match_found =0
if ( exact_match_cnt >= 2 ):
path1_matching = [path2.routeid, 1, loc1]
for match_point in matched_points1:
path1_matching.append(match_point)
path2_matching = [path1.routeid, 1, loc2]
for match_point in matched_points2:
path2_matching.append(match_point)
path1_matching_angle_list = path1_matching[3:]
path2_matching_angle_list = path2_matching[3:]
if ( exact_match_cnt >= 3 ): # we need at least 3 points to check for an exact match
# loop through each of the angles that was a good match and see how many of the points line up
match_found = 0
for cnt, angle1 in enumerate(path1_matching_angle_list):
angle2 = path2_matching_angle_list[cnt]
if (match_found ==0):
#print
#print
#print
match_found = align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list )
#path1, path2, match_found = align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list )
#print
#print
#print
#if (match_found != match_found2):
# print("***************** no match*******************",match_found,match_found2)
if (match_found == 1):
path1.comparison.append( path1_matching ) # remember that we matched and remember which RDP points had a good match
path2.comparison.append( path2_matching )
if (path1.matched<0):
path1.matched=0
if (path2.matched<0):
path2.matched = 0
path1.matched += 1
path2.matched += 1
path1.print_flag = 1
# if we don't have a match, check 2 points to see if we have anything resembling a match
if ( (path1.matched < 0 or path2.matched < 0) and exact_match_cnt >=2):
#if (match_found ==1):
if (len(path1_matching_angle_list) < 5):
pass
# find the distances between each of these angles and see if we can get any matching pairs
for cnt1 in range(0,len(path1_matching_angle_list)-1):
for cnt2 in range(cnt1+1,len(path1_matching_angle_list)):
angle_id_1_1 = path1_matching_angle_list[cnt1]
angle_id_1_2 = path1_matching_angle_list[cnt2]
angle_id_2_1 = path2_matching_angle_list[cnt1]
angle_id_2_2 = path2_matching_angle_list[cnt2]
distance1 = path1.angle_distances[angle_id_1_1, angle_id_1_2]
distance2 = path2.angle_distances[angle_id_2_1, angle_id_2_2]
#if( abs(distance1-distance2) < 30): # if these angles are the same distance, count it
# print ("here 1")
if(distance1 != 0 and distance2 != 0 and abs(distance1-distance2) < 30): # if these angles are the same distance, count it
if (path1.matched < 0):
path1.matched = 0 # these could be a match, so move them off of the definitely not matched list
if (path2.matched < 0):
path2.matched = 0 # these could be match
return
#********************
#**** end this compares two sets of angles to see how close the two paths are
#********************
#**************************************************************************************
#***** this gets the x, y, location of an rdp point
#**************************************************************************************
def get_RDP_xy(path, RDP_point):
#x = path.route[path.feature_loc[RDP_point,2], 0]
#y = path.route[path.feature_loc[RDP_point,2], 1]
# saves time to not assign them to another variable
return path.route[path.feature_loc[RDP_point,2], 0], path.route[path.feature_loc[RDP_point,2], 1]
# ****************************************************************************
# This returns 3 RDP points for each angle
# ********************************************************************
def get_RDP_point_from_angle(path, angle_num):
#path_rdp1 = path.angles[angle_num, 3] # the is the before point
#path_rdp2 = path.angles[angle_num, 4] # center point
#path_rdp3 = path.angles[angle_num, 5] # after point
#return path_rdp1, path_rdp2, path_rdp3
return path.angles[angle_num, 3], path.angles[angle_num, 4], path.angles[angle_num, 5]
# ****************************************************************************
# This returns 3 RDP points for each angle
# ********************************************************************
def get_one_RDP_point_from_angle(path, angle_num):
#path_rdp1 = path.angles[angle_num, 3] # the is the before point
#path_rdp2 = path.angles[angle_num, 4] # center point
#path_rdp3 = path.angles[angle_num, 5] # after point
#return path_rdp1, path_rdp2, path_rdp3
return path.angles[angle_num, 4]
#********************
#**** this aligns two paths and gives a score of that alignment
#********************
#@profile
def align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list ):
# assign criteria for how closely we have to match teh vector and distance depending on how close the angle is
matching_criteria = [ [2.0, 4.5, 30.0], [3.0, 3.0, 20.0], [4.0, 2.5, 17.0], [15.0, 2.0, 15.0] ]
# find out which feature to center on for point 1
path1_rdp2 = get_one_RDP_point_from_angle(path1, angle1)
# find out which feature to center on for point 2
path2_rdp2 = get_one_RDP_point_from_angle(path2, angle2)
path1_rdp2_x, path1_rdp2_y = get_RDP_xy(path1, path1_rdp2)
path2_rdp2_x, path2_rdp2_y = get_RDP_xy(path2, path2_rdp2)
# center the path1
index_array = np.array([path1_rdp2_x, path1_rdp2_y])
path1.route = np.subtract(path1.route, index_array)
# center the path2
index_array = np.array([path2_rdp2_x, path2_rdp2_y])
path2.route = np.subtract(path2.route, index_array)
path1_rdp2_x, path1_rdp2_y = get_RDP_xy(path1, path1_rdp2)
path2_rdp2_x, path2_rdp2_y = get_RDP_xy(path2, path2_rdp2)
match_found = 0
# try aligning with the other RDP points
for cnt3, path1_aligning_angle in enumerate(path1_matching_angle_list):
good_angle_found_2 = 0
good_distance = 1
if (match_found ==0):
path2_aligning_angle = path2_matching_angle_list[cnt3] # find the MSE error between all of our points
# find out which feature to center on for point 1
path1_aligning_rdp2 = get_one_RDP_point_from_angle(path1, path1_aligning_angle)
# find out which feature to center on for point 2
path2_aligning_rdp2 = get_one_RDP_point_from_angle(path2, path2_aligning_angle)
path1_aligning_rdp2_x, path1_aligning_rdp2_y = get_RDP_xy(path1, path1_aligning_rdp2) #
path2_aligning_rdp2_x, path2_aligning_rdp2_y = get_RDP_xy(path2, path2_aligning_rdp2) #
distance1 = get_distance(path1_rdp2_x, path1_rdp2_y, path1_aligning_rdp2_x, path1_aligning_rdp2_y)
distance2 = get_distance(path2_rdp2_x, path2_rdp2_y, path2_aligning_rdp2_x, path2_aligning_rdp2_y)
if (match_found == 0 and abs(distance1 - distance2) < matching_criteria[0][2]+5 and
path1_rdp2 != path1_aligning_rdp2 and path2_rdp2 != path2_aligning_rdp2 and
path1_rdp2_x != path1_aligning_rdp2_x and path2_rdp2_x != path2_aligning_rdp2_x ):
path1_angle = np.arctan( (path1_rdp2_y-path1_aligning_rdp2_y) / (path1_rdp2_x-path1_aligning_rdp2_x) )
path2_angle = np.arctan( (path2_rdp2_y-path2_aligning_rdp2_y) / (path2_rdp2_x-path2_aligning_rdp2_x) )
path1.rotate_path(path1_angle)
path2.rotate_path(path2_angle) # rotate the paths to the same angle
path1_aligning_rdp2_x, path1_aligning_rdp2_y = get_RDP_xy(path1, path1_aligning_rdp2) #
path2_aligning_rdp2_x, path2_aligning_rdp2_y = get_RDP_xy(path2, path2_aligning_rdp2) #
# if the x signs values of our aligning points don't match, flip the x of number 2
if ( np.sign(path1_aligning_rdp2_x) != np.sign(path2_aligning_rdp2_x) ):
path2.flip_x_coords()
for rotation in range(0,2):
if ( rotation== 1 or rotation== 3): # on the second loop, flip the y coordinates of the second path
path2.flip_y_coords()
close_count = 0
good_angle_found = 0
close_list = []
close_list2 = []
close_list3 = []
for cnt, path1_angle in enumerate(path1_matching_angle_list):
path2_angle = path2_matching_angle_list[cnt] # find the MSE error between all of our points
path1_angle_degrees = path1.angles[path1_angle][0]
path2_angle_degrees = path2.angles[path2_angle][0]
angle_diff = abs(path1_angle_degrees - path2_angle_degrees)
distance_criteria = 30.0 # initially assume it needs to be within 10 meters
vector_criteria = 6.0 # assume it needs to be within 1 degrees
for criteria in matching_criteria:
if (angle_diff <= criteria[0]): # if the angle is less than the criteria, assign the distance and vector criteria
vector_criteria = criteria[1]
distance_criteria = criteria[2]
break
path1_test_rdp1, path1_test_rdp2, path1_test_rdp3 = get_RDP_point_from_angle(path1, path1_angle)
path2_test_rdp1, path2_test_rdp2, path2_test_rdp3 = get_RDP_point_from_angle(path2, path2_angle)
# get the location of the center points of the angle
path1_test_rdp2_x, path1_test_rdp2_y = get_RDP_xy(path1, path1_test_rdp2)
path2_test_rdp2_x, path2_test_rdp2_y = get_RDP_xy(path2, path2_test_rdp2)
# see how close the center points are
distance_off = get_distance(path1_test_rdp2_x, path1_test_rdp2_y, path2_test_rdp2_x, path2_test_rdp2_y)
# see how many points are close to matching, but make sure not to double count any
if ( distance_off < distance_criteria and path1_test_rdp2 not in close_list and path2_test_rdp2 not in close_list2):
if (path1_test_rdp1 < path1_test_rdp2):
path1_test_rdp1 = path1_test_rdp2 - 1
path1_test_rdp3 = path1_test_rdp2 + 1
else:
path1_test_rdp1 = path1_test_rdp2 + 1
path1_test_rdp3 = path1_test_rdp2 - 1
if (path2_test_rdp1 < path2_test_rdp2):
path2_test_rdp1 = path2_test_rdp2 - 1
path2_test_rdp3 = path2_test_rdp2 + 1
else:
path2_test_rdp1 = path2_test_rdp2 + 1
path2_test_rdp3 = path2_test_rdp2 - 1
# the the location of the rdp points adjacent to the center for each angle, to calculate vectors
path1_test_rdp1_x, path1_test_rdp1_y = get_RDP_xy(path1, path1_test_rdp1)
path1_test_rdp3_x, path1_test_rdp3_y = get_RDP_xy(path1, path1_test_rdp3)
path2_test_rdp1_x, path2_test_rdp1_y = get_RDP_xy(path2, path2_test_rdp1)
path2_test_rdp3_x, path2_test_rdp3_y = get_RDP_xy(path2, path2_test_rdp3)
# get the unit vectors for the path
path1_vector1 = [ path1_test_rdp2_x - path1_test_rdp1_x, path1_test_rdp2_y - path1_test_rdp1_y]
path1_vector2 = [ path1_test_rdp2_x - path1_test_rdp3_x, path1_test_rdp2_y - path1_test_rdp3_y]
path2_vector1 = [ path2_test_rdp2_x - path2_test_rdp1_x, path2_test_rdp2_y - path2_test_rdp1_y]
path2_vector2 = [ path2_test_rdp2_x - path2_test_rdp3_x, path2_test_rdp2_y - path2_test_rdp3_y]
# get the angle between path1 vector1 and path2 vector1 and 2
# and the angle between path2 vector2 and path2 vector1 and 2
angle1_1 = angle_between(path1_vector1, path2_vector1) * 57.2957795130823 # the angle of the angle in degrees
angle2_1 = angle_between(path1_vector2, path2_vector1) * 57.2957795130823 # the angle of the angle in degrees
angle1_2 = angle_between(path1_vector1, path2_vector2) * 57.2957795130823 # the angle of the angle in degrees
angle2_2 = angle_between(path1_vector2, path2_vector2) * 57.2957795130823 # the angle of the angle in degrees
not_a_match=1
# see if the first vector and the vector from path 2 are mostly aligned
if ( angle1_1 < vector_criteria or angle1_1 > (180-vector_criteria) or angle1_2 < vector_criteria or angle1_2 > (180-vector_criteria)):
# see if the second vector from path1 is mostly aligned with a vector from path 1
if ( angle2_1 < vector_criteria or angle2_1 > (180-vector_criteria) or angle2_2 < vector_criteria or angle2_2 > (180-vector_criteria)):
not_a_match=0 # this is a good enough match to continue
if (not_a_match ==0): # if the vectors are properly aligned
close_count += 1
close_list.append( path1_test_rdp2)
close_list2.append( path2_test_rdp2)
close_list3.append( [path1_test_rdp2, path2_test_rdp2] )
if (path1_angle_degrees < 135): # look for angles that aren't completely flat
good_angle_found =1
#if (path1_angle_degrees < 160): # look for angles that aren't completely flat
# good_angle_found_2 = 1
#if ( angle1_1 > 6 and angle1_1 < (180-6) and angle1_2 > 6 and angle1_2 < (180-6)):
# good_distance = 0
if ( close_count >= 3): # hold onto the lowest error case
#close_list3.sort()
#matching_distance_count = 0
#for rdp_cnt in range(0,len(close_list3)-1):
# rdp1_1 = close_list3[rdp_cnt][0] # get the path distance betwee these points
# rdp1_2 = close_list3[rdp_cnt+1][0]
#
# rdp2_1 = close_list3[rdp_cnt][1]
# rdp2_2 = close_list3[rdp_cnt+1][1]
#
# route_distance1 = path1.get_route_distance(int(path1.feature_loc[rdp1_1,2]), int(path1.feature_loc[rdp1_2,2]))
# route_distance2 = path2.get_route_distance(int(path2.feature_loc[rdp2_1,2]), int(path2.feature_loc[rdp2_2,2]))
#
# max_distance = max(route_distance1,route_distance2)
# min_distance = min(route_distance1,route_distance2)
#
# if (max_distance/min_distance < 1.25 or max_distance-min_distance < 20):
# matching_distance_count+=1
#
#if (matching_distance_count < 2):
# path1.print_flag = 1
matching_distance_count = 0
diff1 = max(close_list) - min(close_list)
diff2 = max(close_list2) - min(close_list2)
if (close_count >=5 or good_angle_found==1 or diff1 > 5 or diff2>5):
close_list3.sort()
matching_distance_count = 0
#print(path1.routeid, path2.routeid)
for rdp_cnt in range(0,len(close_list3)-1):
rdp1_1 = close_list3[rdp_cnt][0] # get the path distance betwee these points
rdp1_2 = close_list3[rdp_cnt+1][0]
rdp2_1 = close_list3[rdp_cnt][1]
rdp2_2 = close_list3[rdp_cnt+1][1]
#route_distance1 = path1.get_route_distance(int(path1.feature_loc[rdp1_1,2]), int(path1.feature_loc[rdp1_2,2]))
#route_distance2 = path2.get_route_distance(int(path2.feature_loc[rdp2_1,2]), int(path2.feature_loc[rdp2_2,2]))
path1_segment_start = int(path1.feature_loc[rdp1_1,2])
path1_segment_end = int(path1.feature_loc[rdp1_2,2])
path2_segment_start = int(path2.feature_loc[rdp2_1,2])
path2_segment_end = int(path2.feature_loc[rdp2_2,2])
max_distance = 0
max_distance = search_matches.max_distance_between_segments(path1.route, path2.route, path1_segment_start, path1_segment_end, \
path2_segment_start, path2_segment_end)
#print("Max distance is ",max_distance)
if ( max_distance < 18):
matching_distance_count+=1
#if (matching_distance_count < 2):
# path1.print_flag = 1
if (matching_distance_count >= 2):
# the current RDP has a problem with matching up gentle curves
# to combat this, we will look for either, 4 matching points, or 1 point with a sharp enough turn
# which I am starting to SWAG at 145 degrees, or that the three matching RDP points aren't all in a row
# for either path1 or path2
if (close_count >=5 or good_angle_found==1): # if we have at least 4 matches, or 1 of them was a good angle, count it
match_found = 1
#if (good_distance ==0):
# path1.print_flag = 1
# #print("here1")
return match_found
else:
diff1 = max(close_list) - min(close_list)
diff2 = max(close_list2) - min(close_list2)
if (diff1 > 5 or diff2>5): # if all of the RDP points aren't sequential then count it
match_found = 1
#if (good_distance ==0):
# path1.print_flag = 1
# #print("here2")
return match_found
return match_found
#********************
#**** this aligns and orients two matching paths the same before plotting and saving them two a file for viewing
#********************
def align_two_paths(path1, path2,driver_id,rdp_tolerance):
path1_matching_angle_list = path1.comparison[-1][3:]
path2_matching_angle_list = path2.comparison[-1][3:]
# loop through each of the angles that was a good match, and see which one makes the lowest error when they are aligned
match_found = 0
for cnt, angle1 in enumerate(path1_matching_angle_list):
angle2 = path2_matching_angle_list[cnt]
if (match_found ==0):
match_found = align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list )
#print ("here2")
#print("match_found is ",match_found)
if (match_found == 1):
# if one path is a lot longer than the other, zoom in on the shorter one
#if (path1.distance < path2.distance / 5.0 or path2.distance < path1.distance / 5.0):
x1_max = np.amax ( path1.route[:,0] )
x1_min = np.amin ( path1.route[:,0] )
x2_max = np.amax ( path2.route[:,0] )
x2_min = np.amin ( path2.route[:,0] )
y1_max = np.amax ( path1.route[:,1] )
y1_min = np.amin ( path1.route[:,1] )
y2_max = np.amax ( path2.route[:,1] )
y2_min = np.amin ( path2.route[:,1] )
x_upper_bound = min( x1_max, x2_max) + 500
x_lower_bound = max( x1_min, x2_min) - 500
y_upper_bound = min( y1_max, y2_max) + 500
y_lower_bound = max( y1_min, y2_min) - 500
x_upper_bound2 = min( x1_max + 250, x2_max + 250, 1000)
x_lower_bound2 = max( x1_min - 250, x2_min - 250, -1000)
y_upper_bound2 = min( y1_max + 250, y2_max + 250, 1000)
y_lower_bound2 = max( y1_min - 250, y2_min - 250, -1000)
plt.figure()
plt.plot(path1.route[:,0],path1.route[:,1],markersize=2.0)
plt.plot(path2.route[:,0],path2.route[:,1],markersize=2.0)
feature_list1 = []
feature_list2 = []
for cnt, path1_angle in enumerate(path1_matching_angle_list):
path2_angle = path2_matching_angle_list[cnt] # find the MSE error between all of our points
path1_test_rdp1, path1_test_rdp2, path1_test_rdp3 = get_RDP_point_from_angle(path1, path1_angle)
path2_test_rdp1, path2_test_rdp2, path2_test_rdp3 = get_RDP_point_from_angle(path2, path2_angle)
path1_test_rdp2_x, path1_test_rdp2_y = get_RDP_xy(path1, path1_test_rdp2)
path2_test_rdp2_x, path2_test_rdp2_y = get_RDP_xy(path2, path2_test_rdp2)
feature_list1.append( [path1_test_rdp2_x, path1_test_rdp2_y] )
feature_list2.append( [path2_test_rdp2_x, path2_test_rdp2_y] )
# #* Temporary
path1.update_feature_loc()
path2.update_feature_loc()
path1_features = path1.feature_loc[:,0:2]
path2_features = path2.feature_loc[:,0:2]
#plt.scatter(path1_features[:,0],path1_features[:,1])
#plt.scatter(path2_features[:,0],path2_features[:,1])
# #* Temporary
#file1 = open("test1.csv",'wb')
#file1_csv = csv.writer(file1)
#for angle in path1.angles:
# file1_csv.writerow(angle)
#file1.close()
#file2 = open("test2.csv",'wb')
#file2_csv = csv.writer(file2)
#for angle in path2.angles:
# file2_csv.writerow(angle)
#file2.close()
feature_list1 = np.array(feature_list1)
plt.scatter(feature_list1[:,0],feature_list1[:,1],c='red')
feature_list2 = np.array(feature_list2)
plt.scatter(feature_list2[:,0],feature_list2[:,1],c='red')
plt.show()
#print ("here 3")
# if one path is a lot longer than the other, zoom in on the shorter one
if (path1.distance < path2.distance / 5.0 or path2.distance < path1.distance / 5.0):
plt.axis( (x_lower_bound, x_upper_bound, y_lower_bound, y_upper_bound) )
#else:
# plt.axis( (x_lower_bound2, x_upper_bound2, y_lower_bound2, y_upper_bound2) )
#plt.show()
plt.savefig("Test_Set\\Driver_" + str(driver_id)+"_" + str(path1.routeid) + "__" + str(path2.routeid) +"__"+ str(rdp_tolerance)+"m.png")
#plt.savefig("Test_Set\\Driver_1_" + str(path2.routeid) + "__" + str(path1.routeid) +".png")
plt.close()
return
#********************
#**** end aligns and orients two matching paths the same before plotting and saving them two a file for viewing
#********************
|
[
"search_matches.match_angles",
"matplotlib.pyplot.show",
"numpy.amin",
"numpy.subtract",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.amax",
"matplotlib.pyplot.figure",
"search_matches.max_distance_between_segments",
"numpy.array",
"numpy.sign",
"numpy.arctan"
] |
[((1257, 1353), 'search_matches.match_angles', 'search_matches.match_angles', (['path1_angles', 'path2_angles', 'angle_tolerance', 'distance_tolerance'], {}), '(path1_angles, path2_angles, angle_tolerance,\n distance_tolerance)\n', (1284, 1353), False, 'import search_matches\n'), ((9746, 9784), 'numpy.array', 'np.array', (['[path1_rdp2_x, path1_rdp2_y]'], {}), '([path1_rdp2_x, path1_rdp2_y])\n', (9754, 9784), True, 'import numpy as np\n'), ((9803, 9840), 'numpy.subtract', 'np.subtract', (['path1.route', 'index_array'], {}), '(path1.route, index_array)\n', (9814, 9840), True, 'import numpy as np\n'), ((9884, 9922), 'numpy.array', 'np.array', (['[path2_rdp2_x, path2_rdp2_y]'], {}), '([path2_rdp2_x, path2_rdp2_y])\n', (9892, 9922), True, 'import numpy as np\n'), ((9941, 9978), 'numpy.subtract', 'np.subtract', (['path2.route', 'index_array'], {}), '(path2.route, index_array)\n', (9952, 9978), True, 'import numpy as np\n'), ((24342, 24368), 'numpy.amax', 'np.amax', (['path1.route[:, 0]'], {}), '(path1.route[:, 0])\n', (24349, 24368), True, 'import numpy as np\n'), ((24387, 24413), 'numpy.amin', 'np.amin', (['path1.route[:, 0]'], {}), '(path1.route[:, 0])\n', (24394, 24413), True, 'import numpy as np\n'), ((24432, 24458), 'numpy.amax', 'np.amax', (['path2.route[:, 0]'], {}), '(path2.route[:, 0])\n', (24439, 24458), True, 'import numpy as np\n'), ((24477, 24503), 'numpy.amin', 'np.amin', (['path2.route[:, 0]'], {}), '(path2.route[:, 0])\n', (24484, 24503), True, 'import numpy as np\n'), ((24531, 24557), 'numpy.amax', 'np.amax', (['path1.route[:, 1]'], {}), '(path1.route[:, 1])\n', (24538, 24557), True, 'import numpy as np\n'), ((24576, 24602), 'numpy.amin', 'np.amin', (['path1.route[:, 1]'], {}), '(path1.route[:, 1])\n', (24583, 24602), True, 'import numpy as np\n'), ((24621, 24647), 'numpy.amax', 'np.amax', (['path2.route[:, 1]'], {}), '(path2.route[:, 1])\n', (24628, 24647), True, 'import numpy as np\n'), ((24666, 24692), 'numpy.amin', 'np.amin', (['path2.route[:, 1]'], {}), '(path2.route[:, 1])\n', (24673, 24692), True, 'import numpy as np\n'), ((25220, 25232), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25230, 25232), True, 'import matplotlib.pyplot as plt\n'), ((25240, 25302), 'matplotlib.pyplot.plot', 'plt.plot', (['path1.route[:, 0]', 'path1.route[:, 1]'], {'markersize': '(2.0)'}), '(path1.route[:, 0], path1.route[:, 1], markersize=2.0)\n', (25248, 25302), True, 'import matplotlib.pyplot as plt\n'), ((25306, 25368), 'matplotlib.pyplot.plot', 'plt.plot', (['path2.route[:, 0]', 'path2.route[:, 1]'], {'markersize': '(2.0)'}), '(path2.route[:, 0], path2.route[:, 1], markersize=2.0)\n', (25314, 25368), True, 'import matplotlib.pyplot as plt\n'), ((26892, 26915), 'numpy.array', 'np.array', (['feature_list1'], {}), '(feature_list1)\n', (26900, 26915), True, 'import numpy as np\n'), ((26923, 26985), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_list1[:, 0]', 'feature_list1[:, 1]'], {'c': '"""red"""'}), "(feature_list1[:, 0], feature_list1[:, 1], c='red')\n", (26934, 26985), True, 'import matplotlib.pyplot as plt\n'), ((27013, 27036), 'numpy.array', 'np.array', (['feature_list2'], {}), '(feature_list2)\n', (27021, 27036), True, 'import numpy as np\n'), ((27044, 27106), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_list2[:, 0]', 'feature_list2[:, 1]'], {'c': '"""red"""'}), "(feature_list2[:, 0], feature_list2[:, 1], c='red')\n", (27055, 27106), True, 'import matplotlib.pyplot as plt\n'), ((27115, 27125), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27123, 27125), True, 'import matplotlib.pyplot as plt\n'), ((27817, 27828), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27826, 27828), True, 'import matplotlib.pyplot as plt\n'), ((11578, 11672), 'numpy.arctan', 'np.arctan', (['((path1_rdp2_y - path1_aligning_rdp2_y) / (path1_rdp2_x -\n path1_aligning_rdp2_x))'], {}), '((path1_rdp2_y - path1_aligning_rdp2_y) / (path1_rdp2_x -\n path1_aligning_rdp2_x))\n', (11587, 11672), True, 'import numpy as np\n'), ((11691, 11785), 'numpy.arctan', 'np.arctan', (['((path2_rdp2_y - path2_aligning_rdp2_y) / (path2_rdp2_x -\n path2_aligning_rdp2_x))'], {}), '((path2_rdp2_y - path2_aligning_rdp2_y) / (path2_rdp2_x -\n path2_aligning_rdp2_x))\n', (11700, 11785), True, 'import numpy as np\n'), ((27355, 27425), 'matplotlib.pyplot.axis', 'plt.axis', (['(x_lower_bound, x_upper_bound, y_lower_bound, y_upper_bound)'], {}), '((x_lower_bound, x_upper_bound, y_lower_bound, y_upper_bound))\n', (27363, 27425), True, 'import matplotlib.pyplot as plt\n'), ((12251, 12281), 'numpy.sign', 'np.sign', (['path1_aligning_rdp2_x'], {}), '(path1_aligning_rdp2_x)\n', (12258, 12281), True, 'import numpy as np\n'), ((12285, 12315), 'numpy.sign', 'np.sign', (['path2_aligning_rdp2_x'], {}), '(path2_aligning_rdp2_x)\n', (12292, 12315), True, 'import numpy as np\n'), ((21339, 21497), 'search_matches.max_distance_between_segments', 'search_matches.max_distance_between_segments', (['path1.route', 'path2.route', 'path1_segment_start', 'path1_segment_end', 'path2_segment_start', 'path2_segment_end'], {}), '(path1.route, path2.route,\n path1_segment_start, path1_segment_end, path2_segment_start,\n path2_segment_end)\n', (21383, 21497), False, 'import search_matches\n')]
|
from matplotlib import pyplot as plt
from fastai.callback import Callback
from fastai.callbacks import hook_output
def request_lr(learn, **kwargs):
learn.lr_find(**kwargs)
learn.recorder.plot()#suggestion=False
plt.show()
return float(input('Select LR: '))
def auto_lr(learn, **kwargs):
learn.lr_find(**kwargs)
learn.recorder.plot()
return learn.recorder.min_grad_lr
def transfer_from_dae(learn_cls, learn_dae):
learn_cls.model.embeds.load_state_dict(learn_dae.model.embeds.state_dict())
learn_cls.model.bn_cont.load_state_dict(learn_dae.model.bn_cont.state_dict())
learn_cls.model.layers[:-1].load_state_dict(learn_dae.model.layers[:-1].state_dict())
def freeze_layer(m):
for param in m.parameters(): param.requires_grad=False
def freeze_but_last(learn):
freeze_layer(learn.model.embeds)
freeze_layer(learn.model.bn_cont)
freeze_layer(learn.model.layers[:-1])
def unfreeze_all(learn):
for param in learn.model.parameters(): param.requires_grad=True
class StoreHook(Callback):
def __init__(self, module):
super().__init__()
self.custom_hook = hook_output(module)
self.outputs = []
def on_batch_end(self, train, **kwargs):
if (not train): self.outputs.append(self.custom_hook.stored)
|
[
"fastai.callbacks.hook_output",
"matplotlib.pyplot.show"
] |
[((224, 234), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (232, 234), True, 'from matplotlib import pyplot as plt\n'), ((1131, 1150), 'fastai.callbacks.hook_output', 'hook_output', (['module'], {}), '(module)\n', (1142, 1150), False, 'from fastai.callbacks import hook_output\n')]
|
import asyncio
import discord
from discord.ext import commands
from crimsobot.bot import CrimsoBOT
from crimsobot.utils import tarot
from crimsobot.utils import tools as c
from crimsobot.utils.tarot import Deck, Suit
class Mystery(commands.Cog):
def __init__(self, bot: CrimsoBOT):
self.bot = bot
@commands.group(invoke_without_command=True, brief='Delve into the mysteries of tarot.')
async def tarot(self, ctx: commands.Context) -> None:
"""Do you seek wisdom and guidance?
Unveil the Mysteries of the past, the present, and the future with a tarot reading.
A brief meaning of each card appears next to its name.
Meditate deeply upon the words of wise crimsoBOT, and all shall become clear...
You may choose to have a specific question in mind before you ask for your cards.
However, taking a reading without a question in mind
may help coax from you the reason you seek the tarot's guidance.
"""
# if no subcommand provided, give a three-card reading
await self.ppf(ctx)
@tarot.command(name='one', brief='Get a single reading.')
@commands.cooldown(3, 300, commands.BucketType.user)
async def one(self, ctx: commands.Context, spread: str = 'one') -> None:
"""This single-card reading is your answer to any question you may have."""
fp, descriptions = await tarot.reading(spread)
filename = 'reading.png'
f = discord.File(fp, 'reading.png')
embed = c.crimbed(
title="{}'s reading".format(ctx.author),
descr=None,
attachment=filename,
footer='Type ">tarot card" for more on a specific card.',
)
card_tuple = descriptions[0]
embed.add_field(name=card_tuple[0], value='**{}**\n{}'.format(card_tuple[1], card_tuple[2]))
await ctx.send(file=f, embed=embed)
@tarot.command(name='ppf', brief='Past, present, and future.')
@commands.cooldown(3, 300, commands.BucketType.user)
async def ppf(self, ctx: commands.Context, spread: str = 'ppf') -> None:
"""This three-card spread is read from left to right to explore your past, present, and future."""
fp, descriptions = await tarot.reading(spread)
filename = 'reading.png'
f = discord.File(fp, filename)
embed = c.crimbed(
title="{}'s reading".format(ctx.author),
descr=None,
attachment=filename,
footer='Type ">tarot card" for more on a specific card.',
)
for card_tuple in descriptions:
embed.add_field(name=card_tuple[0], value='**{}**\n{}'.format(card_tuple[1], card_tuple[2]))
await ctx.send(file=f, embed=embed)
@tarot.command(name='five', brief='Look deeper into your Reason and Potential.')
@commands.cooldown(3, 300, commands.BucketType.user)
async def five(self, ctx: commands.Context, spread: str = 'five') -> None:
"""This spread delves deeper into the present, exploring your Reason for seeking guidance.
The Future card speaks toward the outcome should you stay on your current path.
The Potential card looks toward the outcome should you change paths."""
fp, descriptions = await tarot.reading(spread)
filename = 'reading.png'
f = discord.File(fp, filename)
embed = c.crimbed(
title="{}'s reading".format(ctx.author),
descr=None,
attachment=filename,
footer='Type ">tarot card" for more on a specific card.',
)
for card_tuple in descriptions:
embed.add_field(name=card_tuple[0], value='**{}**\n{}'.format(card_tuple[1], card_tuple[2]))
await ctx.send(file=f, embed=embed)
@tarot.command(name='card', brief='Inspect an individual card.')
async def card(self, ctx: commands.Context) -> None:
"""Inspect an individual tarot card. A longer description is given for each."""
# the suits
suits = [s for s in Suit]
suit_list = []
for idx, suit in enumerate(suits):
suit_list.append('{}. {}'.format(idx + 1, suit))
# prompt 1 of 2: choose suit
embed = c.crimbed(
title='Choose a suit:',
descr='\n'.join(suit_list),
thumb_name='wizard',
footer='Type the number to choose.'
)
prompt_suit = await ctx.send(embed=embed)
# define check for suit
def suit_check(msg: discord.Message) -> bool:
try:
valid_choice = 0 < int(msg.content) <= len(suits)
in_channel = msg.channel == ctx.message.channel
is_author = msg.author == ctx.message.author
return valid_choice and in_channel and is_author
except ValueError:
return False
# wait for user to spcify suit
try:
msg = await self.bot.wait_for('message', check=suit_check, timeout=45)
except asyncio.TimeoutError:
await prompt_suit.delete()
return
await prompt_suit.delete()
if msg is None:
return
suit_choice = int(msg.content)
await msg.delete()
# prompt 2 of 2: choose card in suit
suit = suits[suit_choice - 1]
cards = await Deck.get_cards_in_suit(suit)
card_list = []
for card in cards:
card_list.append('{}. {}'.format(card.number, card.name))
embed = c.crimbed(
title='Choose a card:',
descr='\n'.join(card_list),
thumb_name='wizard',
footer='Type the number to choose.',
)
prompt_card = await ctx.send(embed=embed)
# define check for card
def card_check(msg: discord.Message) -> bool:
try:
card_numbers = [c.number for c in cards]
valid_choice = int(msg.content) in card_numbers
in_channel = msg.channel == ctx.message.channel
is_author = msg.author == ctx.message.author
return valid_choice and in_channel and is_author
except ValueError:
return False
# wait for user to spcify suit
try:
msg = await self.bot.wait_for('message', check=card_check, timeout=20)
except asyncio.TimeoutError:
await prompt_card.delete()
return
await prompt_card.delete()
if msg is None:
return
card_number = int(msg.content)
await msg.delete()
card = await Deck.get_card(suit, card_number)
fp = await card.get_image_buff()
filename = 'card.png'
f = discord.File(fp, filename)
embed = c.crimbed(
title='**{}**'.format(card.name.upper()),
descr='\n'.join([
'**Element:** {}'.format(card.element),
'**Upright:** {}'.format(card.description_upright),
'**Reversed:** {}'.format(card.description_reversed),
'\n{}'.format(card.description_long),
]),
attachment=filename,
)
await ctx.send(file=f, embed=embed)
def setup(bot: CrimsoBOT) -> None:
bot.add_cog(Mystery(bot))
|
[
"crimsobot.utils.tarot.command",
"discord.File",
"crimsobot.utils.tarot.reading",
"crimsobot.utils.tarot.Deck.get_card",
"discord.ext.commands.cooldown",
"discord.ext.commands.group",
"crimsobot.utils.tarot.Deck.get_cards_in_suit"
] |
[((319, 411), 'discord.ext.commands.group', 'commands.group', ([], {'invoke_without_command': '(True)', 'brief': '"""Delve into the mysteries of tarot."""'}), "(invoke_without_command=True, brief=\n 'Delve into the mysteries of tarot.')\n", (333, 411), False, 'from discord.ext import commands\n'), ((1091, 1147), 'crimsobot.utils.tarot.command', 'tarot.command', ([], {'name': '"""one"""', 'brief': '"""Get a single reading."""'}), "(name='one', brief='Get a single reading.')\n", (1104, 1147), False, 'from crimsobot.utils import tarot\n'), ((1153, 1204), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(3)', '(300)', 'commands.BucketType.user'], {}), '(3, 300, commands.BucketType.user)\n', (1170, 1204), False, 'from discord.ext import commands\n'), ((1907, 1968), 'crimsobot.utils.tarot.command', 'tarot.command', ([], {'name': '"""ppf"""', 'brief': '"""Past, present, and future."""'}), "(name='ppf', brief='Past, present, and future.')\n", (1920, 1968), False, 'from crimsobot.utils import tarot\n'), ((1974, 2025), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(3)', '(300)', 'commands.BucketType.user'], {}), '(3, 300, commands.BucketType.user)\n', (1991, 2025), False, 'from discord.ext import commands\n'), ((2753, 2832), 'crimsobot.utils.tarot.command', 'tarot.command', ([], {'name': '"""five"""', 'brief': '"""Look deeper into your Reason and Potential."""'}), "(name='five', brief='Look deeper into your Reason and Potential.')\n", (2766, 2832), False, 'from crimsobot.utils import tarot\n'), ((2838, 2889), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(3)', '(300)', 'commands.BucketType.user'], {}), '(3, 300, commands.BucketType.user)\n', (2855, 2889), False, 'from discord.ext import commands\n'), ((3779, 3842), 'crimsobot.utils.tarot.command', 'tarot.command', ([], {'name': '"""card"""', 'brief': '"""Inspect an individual card."""'}), "(name='card', brief='Inspect an individual card.')\n", (3792, 3842), False, 'from crimsobot.utils import tarot\n'), ((1467, 1498), 'discord.File', 'discord.File', (['fp', '"""reading.png"""'], {}), "(fp, 'reading.png')\n", (1479, 1498), False, 'import discord\n'), ((2311, 2337), 'discord.File', 'discord.File', (['fp', 'filename'], {}), '(fp, filename)\n', (2323, 2337), False, 'import discord\n'), ((3337, 3363), 'discord.File', 'discord.File', (['fp', 'filename'], {}), '(fp, filename)\n', (3349, 3363), False, 'import discord\n'), ((6747, 6773), 'discord.File', 'discord.File', (['fp', 'filename'], {}), '(fp, filename)\n', (6759, 6773), False, 'import discord\n'), ((1400, 1421), 'crimsobot.utils.tarot.reading', 'tarot.reading', (['spread'], {}), '(spread)\n', (1413, 1421), False, 'from crimsobot.utils import tarot\n'), ((2244, 2265), 'crimsobot.utils.tarot.reading', 'tarot.reading', (['spread'], {}), '(spread)\n', (2257, 2265), False, 'from crimsobot.utils import tarot\n'), ((3270, 3291), 'crimsobot.utils.tarot.reading', 'tarot.reading', (['spread'], {}), '(spread)\n', (3283, 3291), False, 'from crimsobot.utils import tarot\n'), ((5358, 5386), 'crimsobot.utils.tarot.Deck.get_cards_in_suit', 'Deck.get_cards_in_suit', (['suit'], {}), '(suit)\n', (5380, 5386), False, 'from crimsobot.utils.tarot import Deck, Suit\n'), ((6630, 6662), 'crimsobot.utils.tarot.Deck.get_card', 'Deck.get_card', (['suit', 'card_number'], {}), '(suit, card_number)\n', (6643, 6662), False, 'from crimsobot.utils.tarot import Deck, Suit\n')]
|
from flask import jsonify, redirect, request, session
from . import middle
import requests, os
from urllib.parse import unquote, quote
@middle.route('/ebay/get_token', methods=['POST', 'GET'])
def ebay_auth():
url = os.environ.get('EBAY_RUNAME')
return redirect(url)
@middle.route('/ebay/get_token/response/', methods=['GET', 'POST'])
def ebay_auth_code_response():
# POST REQ per chiedere il token
url = 'https://api.ebay.com/identity/v1/oauth2/token'
#HEADER
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'Authorization': os.environ.get('EBAY_B64_CREDENTIAL')
}
#BODY
payload = {
'grant_type': 'authorization_code',
'code': request.args['code'],
'redirect_uri': os.environ.get('EBAY_URI')
}
r =requests.post(url, data=payload, headers=headers).json()
return jsonify( r )
###################################################################################################################
# {
# "access_token": "v^1.1#i^1#p^3#r^1...XzMjRV4xMjg0",
# "expires_in": 7200,
# "refresh_token": "v^1.1#i^1#p^3#r^1...zYjRV4xMjg0",
# "refresh_token_expires_in": 47304000,
# "token_type": "User Access Token"
# }
# {
# "access_token": "v^1.1#i ... AjRV4yNjA=",
# "expires_in": 7200,
# "token_type":"User Access Token"
# }
@middle.route('/ebay/refresh_token', methods=['POST'])
def ebay_refresh_token():
refresh_token = request.form['refresh_token']
url = 'https://api.ebay.com/identity/v1/oauth2/token'
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'Authorization': os.environ.get('EBAY_B64_CREDENTIAL')
}
#BODY
payload = {
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'scope': os.environ.get('EBAY_SCOPE_LIST')
}
resp = requests.post(url, data=payload, headers=headers).json()
return jsonify( access_token=resp['access_token'] )
@middle.route('/ebay/get_report')
def ebay_get_report():
tk = request.headers.get('token')
if tk:
mktp = request.args['marketplace']
start_date = request.args['start_date']
end_date = request.args['end_date']
url_api = 'https://api.ebay.com/sell/analytics/v1/traffic_report'
auth = 'Bearer ' + tk
headers = { 'Authorization': auth }
dim = 'DAY'
params = {
"filter": 'marketplace_ids:{' + mktp + '},date_range:[' + start_date + '..' + end_date +']',
"dimension": dim,
"metric": "LISTING_VIEWS_TOTAL,TRANSACTION,SALES_CONVERSION_RATE"
}
response = requests.get(url_api, headers=headers, params=params)
r = response.json()
if 'errors' in r.keys(): # error che indica l'aggiornamento del token
return jsonify(error='ERRORE: ' + r.get('errors')[0]['message'] , error_code=401)
start_report = r['startDate'][:10]
end_report = r['endDate'][:10]
report = []
for i in r['records']:
date = i['dimensionValues'][0]['value']
date = date[:4] + '/' + date[4:6] + '/' + date[6:8]
tot_views = i['metricValues'][0]['value']
transactions = i['metricValues'][1]['value']
scr = i['metricValues'][2]['value'] #SALES_CONVERSION_RATE
report.append({ 'date': date,
'tot_views': tot_views,
'transactions': transactions,
'scr': scr
})
return jsonify( start_report=start_report,
end_report=end_report,
report=report)
else:
return jsonify(error='token non ricevuto' , error_code=401)
|
[
"flask.redirect",
"flask.request.headers.get",
"os.environ.get",
"flask.jsonify",
"requests.get",
"requests.post"
] |
[((223, 252), 'os.environ.get', 'os.environ.get', (['"""EBAY_RUNAME"""'], {}), "('EBAY_RUNAME')\n", (237, 252), False, 'import requests, os\n'), ((264, 277), 'flask.redirect', 'redirect', (['url'], {}), '(url)\n', (272, 277), False, 'from flask import jsonify, redirect, request, session\n'), ((898, 908), 'flask.jsonify', 'jsonify', (['r'], {}), '(r)\n', (905, 908), False, 'from flask import jsonify, redirect, request, session\n'), ((1978, 2020), 'flask.jsonify', 'jsonify', ([], {'access_token': "resp['access_token']"}), "(access_token=resp['access_token'])\n", (1985, 2020), False, 'from flask import jsonify, redirect, request, session\n'), ((2092, 2120), 'flask.request.headers.get', 'request.headers.get', (['"""token"""'], {}), "('token')\n", (2111, 2120), False, 'from flask import jsonify, redirect, request, session\n'), ((589, 626), 'os.environ.get', 'os.environ.get', (['"""EBAY_B64_CREDENTIAL"""'], {}), "('EBAY_B64_CREDENTIAL')\n", (603, 626), False, 'import requests, os\n'), ((789, 815), 'os.environ.get', 'os.environ.get', (['"""EBAY_URI"""'], {}), "('EBAY_URI')\n", (803, 815), False, 'import requests, os\n'), ((1666, 1703), 'os.environ.get', 'os.environ.get', (['"""EBAY_B64_CREDENTIAL"""'], {}), "('EBAY_B64_CREDENTIAL')\n", (1680, 1703), False, 'import requests, os\n'), ((1857, 1890), 'os.environ.get', 'os.environ.get', (['"""EBAY_SCOPE_LIST"""'], {}), "('EBAY_SCOPE_LIST')\n", (1871, 1890), False, 'import requests, os\n'), ((2700, 2753), 'requests.get', 'requests.get', (['url_api'], {'headers': 'headers', 'params': 'params'}), '(url_api, headers=headers, params=params)\n', (2712, 2753), False, 'import requests, os\n'), ((3627, 3699), 'flask.jsonify', 'jsonify', ([], {'start_report': 'start_report', 'end_report': 'end_report', 'report': 'report'}), '(start_report=start_report, end_report=end_report, report=report)\n', (3634, 3699), False, 'from flask import jsonify, redirect, request, session\n'), ((3774, 3825), 'flask.jsonify', 'jsonify', ([], {'error': '"""token non ricevuto"""', 'error_code': '(401)'}), "(error='token non ricevuto', error_code=401)\n", (3781, 3825), False, 'from flask import jsonify, redirect, request, session\n'), ((830, 879), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'headers': 'headers'}), '(url, data=payload, headers=headers)\n', (843, 879), False, 'import requests, os\n'), ((1909, 1958), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'headers': 'headers'}), '(url, data=payload, headers=headers)\n', (1922, 1958), False, 'import requests, os\n')]
|
import os
import glob
from shutil import copy2
from PIL import Image
import json
import numpy as np
import argparse
import shutil
from skimage import io
from tqdm import tqdm
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def copy_file(src, dst):
if os.path.exists(dst):
os.rmdir(dst)
shutil.copytree(src, dst)
def construct_box(inst_root, label_root, dst):
inst_list = os.listdir(inst_root)
cls_list = os.listdir(label_root)
for inst, cls in zip(*(inst_list, cls_list)):
inst_map = Image.open(os.path.join(inst_root, inst))
# inst_map = Image.open(inst)
inst_map = np.array(inst_map, dtype=np.int32)
cls_map = Image.open(os.path.join(label_root, cls))
# cls_map = Image.open(cls)
cls_map = np.array(cls_map, dtype=np.int32)
H, W = inst_map.shape
# get a list of unique instances
inst_info = {'imgHeight':H, 'imgWidth':W, 'objects':{}}
inst_ids = np.unique(inst_map)
for iid in inst_ids:
if int(iid) <=0: # filter out non-instance masks
continue
ys,xs = np.where(inst_map==iid)
ymin, ymax, xmin, xmax = \
ys.min(), ys.max(), xs.min(), xs.max()
cls_label = np.median(cls_map[inst_map==iid])
inst_info['objects'][str(iid)] = {'bbox': [xmin, ymin, xmax, ymax], 'cls':int(cls_label)}
# write a file to path
filename = os.path.splitext(os.path.basename(inst))[0]
savename = os.path.join(dst, filename + '.json')
with open(savename, 'w') as f:
json.dump(inst_info, f, cls=NpEncoder)
print('wrote a bbox summary of %s to %s' % (inst, savename))
def copy_label(src_path, dst_path1, dst_path2):
for img_name in tqdm(os.listdir(src_path)):
if '.png' in img_name:
img = io.imread(os.path.join(src_path, img_name))
img[img == 255] = 30
io.imsave(os.path.join(dst_path1, img_name), img)
img = img.astype('uint16')
img[img == 30] = 30*1000
io.imsave(os.path.join(dst_path2, img_name), img)
def process_files(source_base_path, target_base_pth, subset, COCO_path):
dst_path = {}
for name in ['img','label','inst','bbox']:
cur_path = os.path.join(target_base_pth, subset + '_' + name)
if not os.path.exists(cur_path):
os.makedirs(cur_path)
dst_path[name] = cur_path
print('process label and inst copy')
copy_label(source_base_path, dst_path['label'], dst_path['inst'])
### copy_file(dst_path['label'], dst_path['inst'])
print('process img copy')
if COCO_path:
copy_img_file(source_base_path, dst_path['img'], COCO_path+'/'+subset+'2017')
construct_box(dst_path['inst'], dst_path['label'], dst_path['bbox'])
def copy_img_file(source_base_path, target_base_path, COCO_path):
print({target_base_path})
for filepath in tqdm(os.listdir(source_base_path)):
if ('.png' in filepath) or ('.jpg' in filepath):
basename = os.path.basename(filepath).split('.')[0]
filename = basename.split('_')[0]
indexid = basename.split('_')[1]
if os.path.isfile(COCO_path + '/' + filename + '.jpg'):
os.symlink(COCO_path + '/' + filename + '.jpg', target_base_path + '/' + filename+'_'+indexid+'.jpg')
else:
print('File %s.jpg not Found. Please check mannually.' %filename)
# organize image
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='List the content of a folder')
parser.add_argument('-s', '--subset', help='class for training the model', type=str)
parser.add_argument('-d', '--datapath',default='/home/yam28/Documents/phdYoop/datasets/COCO', type=str)
args = parser.parse_args()
source_base_path_train = 'dataset/train/' + args.subset
source_base_path_train_aug = 'dataset/train/' + args.subset+'_silvia'
source_base_path_valid = 'dataset/val/' + args.subset
target_base_pth = 'datasets/stamp_' + args.subset + '_aug'
COCO_path = args.datapath
process_files(source_base_path_train_aug, target_base_pth, 'train', None)
|
[
"json.dump",
"shutil.copytree",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.median",
"os.path.basename",
"os.path.exists",
"os.path.isfile",
"numpy.where",
"numpy.array",
"os.rmdir",
"os.symlink",
"os.path.join",
"os.listdir",
"numpy.unique"
] |
[((558, 577), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (572, 577), False, 'import os\n'), ((605, 630), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (620, 630), False, 'import shutil\n'), ((696, 717), 'os.listdir', 'os.listdir', (['inst_root'], {}), '(inst_root)\n', (706, 717), False, 'import os\n'), ((733, 755), 'os.listdir', 'os.listdir', (['label_root'], {}), '(label_root)\n', (743, 755), False, 'import os\n'), ((3839, 3906), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""List the content of a folder"""'}), "(description='List the content of a folder')\n", (3862, 3906), False, 'import argparse\n'), ((587, 600), 'os.rmdir', 'os.rmdir', (['dst'], {}), '(dst)\n', (595, 600), False, 'import os\n'), ((924, 958), 'numpy.array', 'np.array', (['inst_map'], {'dtype': 'np.int32'}), '(inst_map, dtype=np.int32)\n', (932, 958), True, 'import numpy as np\n'), ((1073, 1106), 'numpy.array', 'np.array', (['cls_map'], {'dtype': 'np.int32'}), '(cls_map, dtype=np.int32)\n', (1081, 1106), True, 'import numpy as np\n'), ((1261, 1280), 'numpy.unique', 'np.unique', (['inst_map'], {}), '(inst_map)\n', (1270, 1280), True, 'import numpy as np\n'), ((1812, 1849), 'os.path.join', 'os.path.join', (['dst', "(filename + '.json')"], {}), "(dst, filename + '.json')\n", (1824, 1849), False, 'import os\n'), ((2083, 2103), 'os.listdir', 'os.listdir', (['src_path'], {}), '(src_path)\n', (2093, 2103), False, 'import os\n'), ((2591, 2641), 'os.path.join', 'os.path.join', (['target_base_pth', "(subset + '_' + name)"], {}), "(target_base_pth, subset + '_' + name)\n", (2603, 2641), False, 'import os\n'), ((3247, 3275), 'os.listdir', 'os.listdir', (['source_base_path'], {}), '(source_base_path)\n', (3257, 3275), False, 'import os\n'), ((836, 865), 'os.path.join', 'os.path.join', (['inst_root', 'inst'], {}), '(inst_root, inst)\n', (848, 865), False, 'import os\n'), ((988, 1017), 'os.path.join', 'os.path.join', (['label_root', 'cls'], {}), '(label_root, cls)\n', (1000, 1017), False, 'import os\n'), ((1417, 1442), 'numpy.where', 'np.where', (['(inst_map == iid)'], {}), '(inst_map == iid)\n', (1425, 1442), True, 'import numpy as np\n'), ((1563, 1598), 'numpy.median', 'np.median', (['cls_map[inst_map == iid]'], {}), '(cls_map[inst_map == iid])\n', (1572, 1598), True, 'import numpy as np\n'), ((1901, 1939), 'json.dump', 'json.dump', (['inst_info', 'f'], {'cls': 'NpEncoder'}), '(inst_info, f, cls=NpEncoder)\n', (1910, 1939), False, 'import json\n'), ((2657, 2681), 'os.path.exists', 'os.path.exists', (['cur_path'], {}), '(cur_path)\n', (2671, 2681), False, 'import os\n'), ((2695, 2716), 'os.makedirs', 'os.makedirs', (['cur_path'], {}), '(cur_path)\n', (2706, 2716), False, 'import os\n'), ((3505, 3556), 'os.path.isfile', 'os.path.isfile', (["(COCO_path + '/' + filename + '.jpg')"], {}), "(COCO_path + '/' + filename + '.jpg')\n", (3519, 3556), False, 'import os\n'), ((1766, 1788), 'os.path.basename', 'os.path.basename', (['inst'], {}), '(inst)\n', (1782, 1788), False, 'import os\n'), ((2165, 2197), 'os.path.join', 'os.path.join', (['src_path', 'img_name'], {}), '(src_path, img_name)\n', (2177, 2197), False, 'import os\n'), ((2254, 2287), 'os.path.join', 'os.path.join', (['dst_path1', 'img_name'], {}), '(dst_path1, img_name)\n', (2266, 2287), False, 'import os\n'), ((2392, 2425), 'os.path.join', 'os.path.join', (['dst_path2', 'img_name'], {}), '(dst_path2, img_name)\n', (2404, 2425), False, 'import os\n'), ((3574, 3685), 'os.symlink', 'os.symlink', (["(COCO_path + '/' + filename + '.jpg')", "(target_base_path + '/' + filename + '_' + indexid + '.jpg')"], {}), "(COCO_path + '/' + filename + '.jpg', target_base_path + '/' +\n filename + '_' + indexid + '.jpg')\n", (3584, 3685), False, 'import os\n'), ((3358, 3384), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (3374, 3384), False, 'import os\n')]
|
import kfp.dsl as dsl
import json
import kfp.components as comp
from collections import OrderedDict
from kubernetes import client as k8s_client
def create_matrix(d1: int, d2: int):
pipeline_parameters_block = '''
d1 = {}
d2 = {}
'''.format(d1, d2)
block1 = '''
import numpy as np
'''
block2 = '''
rnd_matrix = np.random.rand(d1, d2)
'''
block3 = '''
from kale.utils import kfp_utils as _kale_kfp_utils
_kale_kfp_metrics = {
"d1": d1,
"d2": d2
}
_kale_kfp_utils.generate_mlpipeline_metrics(_kale_kfp_metrics)
'''
data_saving_block = '''
# -----------------------DATA SAVING START---------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.save(rnd_matrix, "rnd_matrix")
# -----------------------DATA SAVING END-----------------------------------
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (pipeline_parameters_block,
block1,
block2,
block3,
data_saving_block)
html_artifact = _kale_run_code(blocks)
with open("/create_matrix.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('create_matrix')
def sum_matrix():
data_loading_block = '''
# -----------------------DATA LOADING START--------------------------------
from kale.marshal import utils as _kale_marshal_utils
_kale_marshal_utils.set_kale_data_directory("/marshal")
_kale_marshal_utils.set_kale_directory_file_names()
rnd_matrix = _kale_marshal_utils.load("rnd_matrix")
# -----------------------DATA LOADING END----------------------------------
'''
block1 = '''
import numpy as np
'''
block2 = '''
sum_result = rnd_matrix.sum()
'''
block3 = '''
from kale.utils import kfp_utils as _kale_kfp_utils
_kale_kfp_metrics = {
"sum-result": sum_result
}
_kale_kfp_utils.generate_mlpipeline_metrics(_kale_kfp_metrics)
'''
# run the code blocks inside a jupyter kernel
from kale.utils.jupyter_utils import run_code as _kale_run_code
from kale.utils.kfp_utils import \
update_uimetadata as _kale_update_uimetadata
blocks = (data_loading_block,
block1,
block2,
block3,
)
html_artifact = _kale_run_code(blocks)
with open("/sum_matrix.html", "w") as f:
f.write(html_artifact)
_kale_update_uimetadata('sum_matrix')
create_matrix_op = comp.func_to_container_op(create_matrix)
sum_matrix_op = comp.func_to_container_op(sum_matrix)
@dsl.pipeline(
name='hp-test-rnd',
description=''
)
def auto_generated_pipeline(booltest='True', d1='5', d2='6', strtest='test'):
pvolumes_dict = OrderedDict()
volume_step_names = []
volume_name_parameters = []
marshal_vop = dsl.VolumeOp(
name="kale_marshal_volume",
resource_name="kale-marshal-pvc",
modes=dsl.VOLUME_MODE_RWM,
size="1Gi"
)
volume_step_names.append(marshal_vop.name)
volume_name_parameters.append(marshal_vop.outputs["name"].full_name)
pvolumes_dict['/marshal'] = marshal_vop.volume
volume_step_names.sort()
volume_name_parameters.sort()
create_matrix_task = create_matrix_op(d1, d2)\
.add_pvolumes(pvolumes_dict)\
.after()
step_limits = {'nvidia.com/gpu': '2'}
for k, v in step_limits.items():
create_matrix_task.container.add_resource_limit(k, v)
create_matrix_task.container.working_dir = "/kale"
create_matrix_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update({'mlpipeline-metrics': '/mlpipeline-metrics.json'})
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'create_matrix': '/create_matrix.html'})
create_matrix_task.output_artifact_paths.update(output_artifacts)
dep_names = create_matrix_task.dependent_names + volume_step_names
create_matrix_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
create_matrix_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
sum_matrix_task = sum_matrix_op()\
.add_pvolumes(pvolumes_dict)\
.after(create_matrix_task)
sum_matrix_task.container.working_dir = "/kale"
sum_matrix_task.container.set_security_context(
k8s_client.V1SecurityContext(run_as_user=0))
output_artifacts = {}
output_artifacts.update({'mlpipeline-metrics': '/mlpipeline-metrics.json'})
output_artifacts.update(
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json'})
output_artifacts.update({'sum_matrix': '/sum_matrix.html'})
sum_matrix_task.output_artifact_paths.update(output_artifacts)
dep_names = sum_matrix_task.dependent_names + volume_step_names
sum_matrix_task.add_pod_annotation(
"kubeflow-kale.org/dependent-templates", json.dumps(dep_names))
if volume_name_parameters:
sum_matrix_task.add_pod_annotation(
"kubeflow-kale.org/volume-name-parameters",
json.dumps(volume_name_parameters))
if __name__ == "__main__":
pipeline_func = auto_generated_pipeline
pipeline_filename = pipeline_func.__name__ + '.pipeline.tar.gz'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment('hp-tuning')
# Submit a pipeline run
from kale.utils.kfp_utils import generate_run_name
run_name = generate_run_name('hp-test-rnd')
run_result = client.run_pipeline(
experiment.id, run_name, pipeline_filename, {})
|
[
"kubernetes.client.V1SecurityContext",
"kale.utils.kfp_utils.update_uimetadata",
"kale.utils.kfp_utils.generate_run_name",
"json.dumps",
"kfp.compiler.Compiler",
"kfp.Client",
"kfp.components.func_to_container_op",
"kale.utils.jupyter_utils.run_code",
"kfp.dsl.VolumeOp",
"collections.OrderedDict",
"kfp.dsl.pipeline"
] |
[((2764, 2804), 'kfp.components.func_to_container_op', 'comp.func_to_container_op', (['create_matrix'], {}), '(create_matrix)\n', (2789, 2804), True, 'import kfp.components as comp\n'), ((2823, 2860), 'kfp.components.func_to_container_op', 'comp.func_to_container_op', (['sum_matrix'], {}), '(sum_matrix)\n', (2848, 2860), True, 'import kfp.components as comp\n'), ((2864, 2912), 'kfp.dsl.pipeline', 'dsl.pipeline', ([], {'name': '"""hp-test-rnd"""', 'description': '""""""'}), "(name='hp-test-rnd', description='')\n", (2876, 2912), True, 'import kfp.dsl as dsl\n'), ((1338, 1360), 'kale.utils.jupyter_utils.run_code', '_kale_run_code', (['blocks'], {}), '(blocks)\n', (1352, 1360), True, 'from kale.utils.jupyter_utils import run_code as _kale_run_code\n'), ((1444, 1484), 'kale.utils.kfp_utils.update_uimetadata', '_kale_update_uimetadata', (['"""create_matrix"""'], {}), "('create_matrix')\n", (1467, 1484), True, 'from kale.utils.kfp_utils import update_uimetadata as _kale_update_uimetadata\n'), ((2602, 2624), 'kale.utils.jupyter_utils.run_code', '_kale_run_code', (['blocks'], {}), '(blocks)\n', (2616, 2624), True, 'from kale.utils.jupyter_utils import run_code as _kale_run_code\n'), ((2705, 2742), 'kale.utils.kfp_utils.update_uimetadata', '_kale_update_uimetadata', (['"""sum_matrix"""'], {}), "('sum_matrix')\n", (2728, 2742), True, 'from kale.utils.kfp_utils import update_uimetadata as _kale_update_uimetadata\n'), ((3021, 3034), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3032, 3034), False, 'from collections import OrderedDict\n'), ((3113, 3230), 'kfp.dsl.VolumeOp', 'dsl.VolumeOp', ([], {'name': '"""kale_marshal_volume"""', 'resource_name': '"""kale-marshal-pvc"""', 'modes': 'dsl.VOLUME_MODE_RWM', 'size': '"""1Gi"""'}), "(name='kale_marshal_volume', resource_name='kale-marshal-pvc',\n modes=dsl.VOLUME_MODE_RWM, size='1Gi')\n", (3125, 3230), True, 'import kfp.dsl as dsl\n'), ((5917, 5929), 'kfp.Client', 'kfp.Client', ([], {}), '()\n', (5927, 5929), False, 'import kfp\n'), ((6084, 6116), 'kale.utils.kfp_utils.generate_run_name', 'generate_run_name', (['"""hp-test-rnd"""'], {}), "('hp-test-rnd')\n", (6101, 6116), False, 'from kale.utils.kfp_utils import generate_run_name\n'), ((3866, 3909), 'kubernetes.client.V1SecurityContext', 'k8s_client.V1SecurityContext', ([], {'run_as_user': '(0)'}), '(run_as_user=0)\n', (3894, 3909), True, 'from kubernetes import client as k8s_client\n'), ((4417, 4438), 'json.dumps', 'json.dumps', (['dep_names'], {}), '(dep_names)\n', (4427, 4438), False, 'import json\n'), ((4847, 4890), 'kubernetes.client.V1SecurityContext', 'k8s_client.V1SecurityContext', ([], {'run_as_user': '(0)'}), '(run_as_user=0)\n', (4875, 4890), True, 'from kubernetes import client as k8s_client\n'), ((5383, 5404), 'json.dumps', 'json.dumps', (['dep_names'], {}), '(dep_names)\n', (5393, 5404), False, 'import json\n'), ((4586, 4620), 'json.dumps', 'json.dumps', (['volume_name_parameters'], {}), '(volume_name_parameters)\n', (4596, 4620), False, 'import json\n'), ((5549, 5583), 'json.dumps', 'json.dumps', (['volume_name_parameters'], {}), '(volume_name_parameters)\n', (5559, 5583), False, 'import json\n'), ((5766, 5785), 'kfp.compiler.Compiler', 'compiler.Compiler', ([], {}), '()\n', (5783, 5785), True, 'import kfp.compiler as compiler\n')]
|
from . import argv
import os
import sys
import json
import csv
from . import dlprof_parser
from .show_gpu_info import show_gpu
from .profiling_command import *
def to_dict(result):
tmp = dict()
tmp["summary"] = list()
for i in result:
tmp["summary"].append(
i.to_dict()
)
return tmp
def write2file(filename,fileformat,result):
if(fileformat=="json"):
result = to_dict(result)
with open(filename+'.json', 'w+', encoding='utf-8') as f:
json.dump(result, f, ensure_ascii=False, indent=4)
elif(fileformat=="csv"):
with open(filename+'.csv', 'w+', encoding='UTF8') as f:
writer = csv.writer(f)
title = ["op type","gpu duration", "api call start", "kernel name"]
writer.writerow(title)
for i in result:
writer.writerow(i.to_list())
else:
tmp = sys.stdout
sys.stdout = open(filename, "w+")
for i in result:
i.printall()
sys.stdout = tmp
def rm_log(verbose:bool):
if(verbose):
os.system("rm -rf log/")
def main():
args = argv.argparsing()
gpu = show_gpu(False)
if(args.function=="gpuinfo"):
for i in gpu:
i.print_info()
elif(args.function=="profile"):
command = generate_command(args.model_type, args.gpu_idx,args.command)
os.system(command)
log_file_name = "log/dlprof_iteration.json"
dlprof_result = dlprof_parser.parsing(log_file_name)
write2file(args.output_filename, args.format, dlprof_result)
rm_log(args.log)
if __name__ == "__main__":
main()
|
[
"csv.writer",
"json.dump",
"os.system"
] |
[((1087, 1111), 'os.system', 'os.system', (['"""rm -rf log/"""'], {}), "('rm -rf log/')\n", (1096, 1111), False, 'import os\n'), ((514, 564), 'json.dump', 'json.dump', (['result', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(result, f, ensure_ascii=False, indent=4)\n', (523, 564), False, 'import json\n'), ((1394, 1412), 'os.system', 'os.system', (['command'], {}), '(command)\n', (1403, 1412), False, 'import os\n'), ((679, 692), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (689, 692), False, 'import csv\n')]
|
"""
PHD2 guider helper
"""
from thirdparty.phd2guider import Guider as PHD2Guider
class GuiderHelper:
guider = None
def connect(self, hostname="localhost"):
if self.guider is None:
self.guider = PHD2Guider(hostname)
self.guider.Connect()
def disconnect(self):
if self.guider is not None:
self.guider.Disconnect()
self.guider = None
def start_dither(self, dither_px, settle_px, settle_time, settle_timeout):
if self.guider is not None:
self.guider.Dither(dither_px, settle_px, settle_time, settle_timeout)
else:
raise Exception("The guider is not connected")
def check_settled(self):
settling = self.guider.CheckSettling()
if settling.Done:
if settling.Error:
raise Exception(settling.Error)
return True, settling
return False, settling
|
[
"thirdparty.phd2guider.Guider"
] |
[((228, 248), 'thirdparty.phd2guider.Guider', 'PHD2Guider', (['hostname'], {}), '(hostname)\n', (238, 248), True, 'from thirdparty.phd2guider import Guider as PHD2Guider\n')]
|
from django.core import serializers
from django.db import connection
from psycopg2 import sql
def reset_primary_key_index(table, col='id'):
"""Sets the postgres primary key index to one larger than max"""
query = sql.SQL(
'''SELECT setval(
pg_get_serial_sequence(%(table)s, %(col)s),
coalesce(max({col}), 1)
) FROM {table}'''
).format(
col=sql.Identifier(col), table=sql.Identifier(table)
)
with connection.cursor() as cursor:
cursor.execute(query, dict(table=table, col=col))
def load_fixture(fixture_file):
"""load all items from fixture file"""
def _load(apps, schema_editor):
original_apps = serializers.python.apps
serializers.python.apps = apps
tables = set()
try:
with open(fixture_file) as fixture:
objects = serializers.deserialize(
'json', fixture, ignorenonexistent=True
)
for obj in objects:
obj.save()
tables.add(obj.object._meta.db_table)
finally:
serializers.python.apps = original_apps
for table in tables:
reset_primary_key_index(table)
return _load
def unload_fixture(appname, models=()):
def _unload(apps, schema_editor):
"""Brutally deleting all entries for this model..."""
for modelname in models:
model = apps.get_model(appname, modelname)
model.objects.all().delete()
reset_primary_key_index(model._meta.db_table)
return _unload
|
[
"django.core.serializers.deserialize",
"psycopg2.sql.SQL",
"django.db.connection.cursor",
"psycopg2.sql.Identifier"
] |
[((463, 482), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (480, 482), False, 'from django.db import connection\n'), ((224, 374), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT setval(\n pg_get_serial_sequence(%(table)s, %(col)s),\n coalesce(max({col}), 1)\n ) FROM {table}"""'], {}), '(\n """SELECT setval(\n pg_get_serial_sequence(%(table)s, %(col)s),\n coalesce(max({col}), 1)\n ) FROM {table}"""\n )\n', (231, 374), False, 'from psycopg2 import sql\n'), ((399, 418), 'psycopg2.sql.Identifier', 'sql.Identifier', (['col'], {}), '(col)\n', (413, 418), False, 'from psycopg2 import sql\n'), ((426, 447), 'psycopg2.sql.Identifier', 'sql.Identifier', (['table'], {}), '(table)\n', (440, 447), False, 'from psycopg2 import sql\n'), ((863, 927), 'django.core.serializers.deserialize', 'serializers.deserialize', (['"""json"""', 'fixture'], {'ignorenonexistent': '(True)'}), "('json', fixture, ignorenonexistent=True)\n", (886, 927), False, 'from django.core import serializers\n')]
|
# Copyright 2006-2012 <NAME>
""" Version of Binner class that works with sqlalchemy
"""
from .rangeFinder import Binner
from sqlalchemy import and_, or_
class BinnerSA(Binner):
"""generate sqlalchemy query to find overlapping ranges using bin numbers"""
@staticmethod
def getOverlappingSqlExpr(seqCol, binCol, startCol, endCol, seq, start, end):
"""General sqlalchemy experssion to select for overlaps with the specified range. Columns
are SA column objects. Returns an and__ object"""
# build bin parts
parts = []
for bins in Binner.getOverlappingBins(start, end):
if bins[0] == bins[1]:
parts.append(binCol == bins[0])
else:
parts.append(and_(binCol >= bins[0], binCol <= bins[1]))
return and_(seqCol == seq, startCol < end, endCol > start, or_(*parts))
|
[
"sqlalchemy.and_",
"sqlalchemy.or_"
] |
[((862, 873), 'sqlalchemy.or_', 'or_', (['*parts'], {}), '(*parts)\n', (865, 873), False, 'from sqlalchemy import and_, or_\n'), ((751, 793), 'sqlalchemy.and_', 'and_', (['(binCol >= bins[0])', '(binCol <= bins[1])'], {}), '(binCol >= bins[0], binCol <= bins[1])\n', (755, 793), False, 'from sqlalchemy import and_, or_\n')]
|
import torch
import scipy.fft
import numpy as np
from functools import lru_cache
@lru_cache()
def compute_dct_mat(n: int, device: str, dtype: torch.dtype) -> torch.Tensor:
m = scipy.fft.dct(np.eye(n), norm="ortho")
return torch.tensor(m, device=device, dtype=dtype)
@lru_cache()
def compute_idct_mat(n: int, device: str, dtype: torch.dtype) -> torch.Tensor:
m = scipy.fft.idct(np.eye(n), norm="ortho")
return torch.tensor(m, device=device, dtype=dtype)
def dct(t: torch.Tensor) -> torch.Tensor:
m = compute_dct_mat(t.shape[-2], device=t.device, dtype=t.dtype)
return torch.einsum("...id,ij->jd", t, m)
def idct(t: torch.Tensor) -> torch.Tensor:
m = compute_idct_mat(t.shape[-2], device=t.device, dtype=t.dtype)
return torch.einsum("...id,ij->jd", t, m)
def dct2(t: torch.Tensor) -> torch.Tensor:
h, w = t.shape[-2:]
mh = compute_dct_mat(h, device=t.device, dtype=t.dtype)
mw = compute_dct_mat(w, device=t.device, dtype=t.dtype)
return torch.einsum("...hw,hi,wj->...ij", t, mh, mw)
def idct2(t: torch.Tensor) -> torch.Tensor:
h, w = t.shape[-2:]
mh = compute_idct_mat(h, device=t.device, dtype=t.dtype)
mw = compute_idct_mat(w, device=t.device, dtype=t.dtype)
return torch.einsum("...hw,hi,wj->...ij", t, mh, mw)
def dct3(t: torch.Tensor) -> torch.Tensor:
l, h, w = t.shape[-3:]
ml = compute_dct_mat(l, device=t.device, dtype=t.dtype)
mh = compute_dct_mat(h, device=t.device, dtype=t.dtype)
mw = compute_dct_mat(w, device=t.device, dtype=t.dtype)
return torch.einsum("...lhw,li,hj,wk->...ijk", t, ml, mh, mw)
def idct3(t: torch.Tensor) -> torch.Tensor:
l, h, w = t.shape[-3:]
ml = compute_idct_mat(l, device=t.device, dtype=t.dtype)
mh = compute_idct_mat(h, device=t.device, dtype=t.dtype)
mw = compute_idct_mat(w, device=t.device, dtype=t.dtype)
return torch.einsum("...lhw,li,hj,wk->...ijk", t, ml, mh, mw)
|
[
"torch.einsum",
"functools.lru_cache",
"numpy.eye",
"torch.tensor"
] |
[((84, 95), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (93, 95), False, 'from functools import lru_cache\n'), ((279, 290), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (288, 290), False, 'from functools import lru_cache\n'), ((232, 275), 'torch.tensor', 'torch.tensor', (['m'], {'device': 'device', 'dtype': 'dtype'}), '(m, device=device, dtype=dtype)\n', (244, 275), False, 'import torch\n'), ((429, 472), 'torch.tensor', 'torch.tensor', (['m'], {'device': 'device', 'dtype': 'dtype'}), '(m, device=device, dtype=dtype)\n', (441, 472), False, 'import torch\n'), ((597, 631), 'torch.einsum', 'torch.einsum', (['"""...id,ij->jd"""', 't', 'm'], {}), "('...id,ij->jd', t, m)\n", (609, 631), False, 'import torch\n'), ((758, 792), 'torch.einsum', 'torch.einsum', (['"""...id,ij->jd"""', 't', 'm'], {}), "('...id,ij->jd', t, m)\n", (770, 792), False, 'import torch\n'), ((993, 1038), 'torch.einsum', 'torch.einsum', (['"""...hw,hi,wj->...ij"""', 't', 'mh', 'mw'], {}), "('...hw,hi,wj->...ij', t, mh, mw)\n", (1005, 1038), False, 'import torch\n'), ((1242, 1287), 'torch.einsum', 'torch.einsum', (['"""...hw,hi,wj->...ij"""', 't', 'mh', 'mw'], {}), "('...hw,hi,wj->...ij', t, mh, mw)\n", (1254, 1287), False, 'import torch\n'), ((1551, 1605), 'torch.einsum', 'torch.einsum', (['"""...lhw,li,hj,wk->...ijk"""', 't', 'ml', 'mh', 'mw'], {}), "('...lhw,li,hj,wk->...ijk', t, ml, mh, mw)\n", (1563, 1605), False, 'import torch\n'), ((1873, 1927), 'torch.einsum', 'torch.einsum', (['"""...lhw,li,hj,wk->...ijk"""', 't', 'ml', 'mh', 'mw'], {}), "('...lhw,li,hj,wk->...ijk', t, ml, mh, mw)\n", (1885, 1927), False, 'import torch\n'), ((196, 205), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (202, 205), True, 'import numpy as np\n'), ((393, 402), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (399, 402), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
'''
Source:
https://github.com/yangaound/xmlutil
Created on 2016年12月24日
@author: albin
'''
import re
import abc
import itertools
from collections import defaultdict
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import petl
try:
from lxml import etree
except ImportError:
from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
__version__ = '1.0.0'
namespace_pattern = re.compile(r"{.+}")
def parse(filename, *args, **kwargs):
"""factory method, return a new instance of XMLNode"""
element = etree.parse(filename, *args, **kwargs).getroot()
return XMLNode(element)
def get_tag(element):
"""return the element's local name"""
return re.sub(namespace_pattern, '', element.tag)
def get_namespace(element):
"""return the element's namespace"""
return re.sub(get_tag(element), '', element.tag)
class BridgeNode(object):
__metaclass__ = abc.ABCMeta
def __init__(self, element):
"""Abstract class, it wraps an element as a implementor
:type element: ``lxml.etree.Element`` or ``xml.etree.ElementTree.Element``
"""
if element is None:
raise TypeError("Argument 'element' should be an instance of lxml.etree.Element or xml.etree.ElementTree.Element")
self.element = element
@abc.abstractmethod
def to_dicts(self, **kwargs):
"""expands the wrapped element tree into a ``sequence``.
:return: ``list<dict>``"""
def to_table(self, inclusive_tags=(), exclusive_tags=(), duplicate_tags=(), with_element=False, with_attrib=False, ):
dicts = self.to_dicts(duplicate_tags=duplicate_tags, with_element=with_element, with_attrib=with_attrib, )
table = dicts2table(dicts)
if inclusive_tags:
header = [field for tag in inclusive_tags for field in table.header() if (tag == field) or field.startswith(tag + '_')]
table = table.cut(*header)
elif exclusive_tags:
header = [field for tag in exclusive_tags for field in table.header() if (tag == field) or field.startswith(tag + '_')]
table = table.cutout(*header)
return table
def findall(self, expression, **kwargs):
"""Wraps the result of executing expression into a ``GroupNode`` and return it"""
return self._execute_expression(self.element, 'findall', expression, **kwargs)
def xpath(self, expression, **kwargs):
"""Wraps the result of executing expression into a ``GroupNode`` and return it"""
return self._execute_expression(self.element, 'xpath', expression, **kwargs)
def _execute_expression(self, target_node, func_name, expression, **kwargs):
"""executes expression over target_node methods named func_name"""
func = getattr(target_node, func_name)
elements = func(expression, **kwargs)
return GroupNode((XMLNode(e) for e in elements))
def join(self, other, key=None, **petl_kwargs):
"""join this node and other node as a ``RelatedNode`` """
return self.relate(other, 'join', key=key, **petl_kwargs)
def crossjoin(self, other, **petl_kwargs):
"""crossjoin this node and other node as a ``RelatedNode`` """
return self.relate(other, 'crossjoin', **petl_kwargs)
def relate(self, other, relation, **petl_kwargs):
"""relate this node and other node as a ``RelatedNode`` over relation. relation is a function name of petl package"""
return RelatedNode(self, other, relation, **petl_kwargs)
def tag(self):
return get_tag(self.element)
def namespace(self):
return get_namespace(self.element)
def nsmap(self, sub=None):
nsmap = self.element.nsmap
return dict((k if k else sub, v) for k, v in nsmap.items())
def __repr__(self):
return "<%s %s at 0x%x>" % (self.__class__.__name__, self.tag(), id(self))
class EmptyNode(BridgeNode):
"""This class just use to relates another nodes"""
def __init__(self): pass
def to_dicts(self, **kwargs):
"""implement"""
return []
def __repr__(self):
return "<%s %s at 0x%x>" % (self.__class__.__name__, 'None', id(self))
class XMLNode(BridgeNode):
"""This class wraps an instance of ``lxml.etree.Element`` or ``xml.etree.ElementTree.Element`` as a implementor"""
def to_dicts(self, **kwargs):
"""implement"""
dicts = DFSExpansion(self.element, **kwargs).expand()
return dicts
def remove(self):
parent = self.element.getparent()
parent.remove(self.element)
def find(self, expression, **kwargs):
element = self.element.find(expression, **kwargs)
return XMLNode(element)
class GroupNode(BridgeNode, list):
def __init__(self, nodes):
"""This class wraps a not empty collection
:type nodes: ``Iterator<? extends xmlutil.BridgeNode>``
"""
self.extend(nodes)
BridgeNode.__init__(self, self[0].element)
def to_dicts(self, **kwargs):
"""implement"""
dicts = []
for node in self:
dicts.extend(node.to_dicts(**kwargs))
return dicts
def _execute_expression(self, _, func_name, expression, **kwargs):
"""overwrite"""
nodes = []
for node in self:
nodes.extend(BridgeNode._execute_expression(self, node, func_name, expression, **kwargs))
return GroupNode(nodes)
def remove(self):
for node in self:
node.remove()
class RelatedNode(BridgeNode):
def __init__(self, this, other, relation, **kwargs):
"""This class wraps 2 node over their relation
:type this: ``? extends xmlutil.BridgeNode``
:type other: ``? extends xmlutil.BridgeNode``
:type relation: a function name of package ``petl``
"""
super(RelatedNode, self).__init__(other.element)
self.this = this
self.other = other
self.relation = relation
self.kwargs = kwargs
def to_dicts(self, **kwargs):
"""implement"""
return self.to_table(**kwargs).dicts()
def to_table(self, **kwargs):
"""overwrite"""
this_table = dicts2table(self.this.to_dicts(**kwargs))
other_table = dicts2table(self.other.to_dicts(**kwargs))
if this_table.nrows() == 0: # self.this is a EmptyNode
table = other_table
else:
table = getattr(this_table, self.relation)(other_table, **self.kwargs)
return table
def _execute_expression(self, _, func_name, expression, **kwargs):
"""overwrite"""
nodes1 = BridgeNode._execute_expression(self, self.this, func_name, expression, **kwargs)
nodes2 = BridgeNode._execute_expression(self, self.other, func_name, expression, **kwargs)
return GroupNode(itertools.chain(nodes1, nodes2))
def dicts2table(dicts):
"""transform dicts into a ``petl.util.base.Table``"""
return petl.wrap(petl.fromdicts(dicts)) if dicts else petl.empty()
class DFSExpansion(object):
"""depth first search element tree and expands it into a ``sequence`` of ``dict``
E.g.,
>>> expansion = DFSExpansion(element)
>>> dicts = expansion.expand()
>>> for dic in dicts:
>>> print dic
"""
def __init__(self, element, duplicate_tags=(), with_element=False, with_attrib=False):
"""
:type element: ``lxml.etree.Element`` or ``xml.etree.cElementTree.Element``
:param duplicate_tags: elements with these tag will be renamed and added to a dictionary
:param with_element: the values of dictionaries contains of element if True otherwise contains of element's text.
:param with_attrib: element that's attribute is not empty will be added to dictionaries if with_attrib is Tr
"""
self.element = element
self.duplicate_tags = duplicate_tags
self.duplicate_tags_counter = None
self.with_element = with_element
self.with_attrib = with_attrib
self.dicts = list()
self.buffer_tags = list()
self.buffer_dict = OrderedDict()
def expand(self):
self._reset_duplicate_tags_counter()
self._expand(self.element)
return self.dicts
def _expand(self, element, level=0):
tag, text = get_tag(element), element.text.strip() if element.text else None
if text: # element.text is not empty:
self._buffer(tag, text, element)
elif element.attrib and self.with_attrib: # element.attribute is not empty and need to fetch it
self._buffer(tag, text, element, with_attrib=True)
for e in element:
self._expand(e, level + 1)
if level == 0:
self._insert(tag)
def _buffer(self, tag, text, element, with_attrib=False):
if self.buffer_dict.get(tag) is not None:
if self.duplicate_tags and tag in self.duplicate_tags:
self.duplicate_tags_counter[tag] += 1
tag = tag + '_' + str(self.duplicate_tags_counter[tag])
else:
self._insert(tag)
self.buffer_tags.append(tag)
buffering = element if self.with_element else (element.attrib if with_attrib else text)
self.buffer_dict.update({tag: buffering})
def _reset_duplicate_tags_counter(self):
self.duplicate_tags_counter = defaultdict(int)
def _insert(self, duplicate_tag):
self._reset_duplicate_tags_counter()
self.dicts.append(self.buffer_dict.copy())
try:
idx = self.buffer_tags.index(duplicate_tag)
except ValueError:
idx = len(self.buffer_tags)
for tag in self.buffer_tags[idx:]:
self.buffer_dict.has_key(tag) and self.buffer_dict.update({tag: None})
self.buffer_tags = self.buffer_tags[:idx]
|
[
"xml.etree.ElementTree.parse",
"collections.defaultdict",
"itertools.chain",
"petl.empty",
"ordereddict.OrderedDict",
"re.sub",
"petl.fromdicts",
"re.compile"
] |
[((525, 543), 're.compile', 're.compile', (['"""{.+}"""'], {}), "('{.+}')\n", (535, 543), False, 'import re\n'), ((812, 854), 're.sub', 're.sub', (['namespace_pattern', '""""""', 'element.tag'], {}), "(namespace_pattern, '', element.tag)\n", (818, 854), False, 'import re\n'), ((7159, 7171), 'petl.empty', 'petl.empty', ([], {}), '()\n', (7169, 7171), False, 'import petl\n'), ((8235, 8248), 'ordereddict.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8246, 8248), False, 'from ordereddict import OrderedDict\n'), ((9545, 9561), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (9556, 9561), False, 'from collections import defaultdict\n'), ((658, 696), 'xml.etree.ElementTree.parse', 'etree.parse', (['filename', '*args'], {}), '(filename, *args, **kwargs)\n', (669, 696), True, 'from xml.etree import ElementTree as etree\n'), ((6984, 7015), 'itertools.chain', 'itertools.chain', (['nodes1', 'nodes2'], {}), '(nodes1, nodes2)\n', (6999, 7015), False, 'import itertools\n'), ((7122, 7143), 'petl.fromdicts', 'petl.fromdicts', (['dicts'], {}), '(dicts)\n', (7136, 7143), False, 'import petl\n')]
|
import os
from os.path import join, dirname
from dotenv import load_dotenv
from pymongo import MongoClient
import pymysql.cursors
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
client = MongoClient('localhost', 27017, username='admin', password="<PASSWORD>")
sql_client = lambda db: pymysql.connect(host = 'localhost',
user='root',
password='<PASSWORD>',
database=db)
|
[
"dotenv.load_dotenv",
"os.path.dirname",
"pymongo.MongoClient"
] |
[((177, 201), 'dotenv.load_dotenv', 'load_dotenv', (['dotenv_path'], {}), '(dotenv_path)\n', (188, 201), False, 'from dotenv import load_dotenv\n'), ((212, 284), 'pymongo.MongoClient', 'MongoClient', (['"""localhost"""', '(27017)'], {'username': '"""admin"""', 'password': '"""<PASSWORD>"""'}), "('localhost', 27017, username='admin', password='<PASSWORD>')\n", (223, 284), False, 'from pymongo import MongoClient\n'), ((150, 167), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (157, 167), False, 'from os.path import join, dirname\n')]
|
import os
import errno
import sys
from pyswip import Prolog
# function for reducing a topology to a term "l(type,dir,term1,term2)"
def reduce(outputFile):
# load Prolog program "reducer"
prolog = Prolog()
prolog.consult("prolog/reducer.pl")
# check well-formedness
violating = list(prolog.query("violatingWellFormedness(L1,L2)"))
violating1 = violating[0]["L1"]
violating2 = violating[0]["L2"]
if len(violating1) > 0 or len(violating2) > 0:
# print violations (if any)
print("ERROR: topology is not well-formed because")
for n in violating1:
print("- node " + str(n) + " violates condition (i) of well-formedness")
for n in violating2:
print("- node " + str(n) + " violates condition (ii) of well-formedness")
exit(2)
# compute first possible solution (if any)
results = list(prolog.query("loop(R)", maxresult=1))
if len(results) > 0:
# if solvable, save first result
result = results[0]["R"]
else:
# otherwise, return error message for non well-formed topology
print("ERROR: something went wrong...is topology rooted?")
exit(2)
# create "output" folder (if not existing yet)
try:
os.mkdir("output")
except Exception as e:
if e.errno != errno.EEXIST:
print(e)
# output term on "outputFile"
outputFileWithFolder = "output/" + outputFile
output = open(outputFileWithFolder,"w")
output.write(result)
def main(args):
# check command line arguments
if len(args) < 1:
print("usage: reducer.py <outputFile>")
exit(2)
# reduce topology and output on file
reduce(args[0])
main(sys.argv[1:])
|
[
"os.mkdir",
"pyswip.Prolog"
] |
[((205, 213), 'pyswip.Prolog', 'Prolog', ([], {}), '()\n', (211, 213), False, 'from pyswip import Prolog\n'), ((1253, 1271), 'os.mkdir', 'os.mkdir', (['"""output"""'], {}), "('output')\n", (1261, 1271), False, 'import os\n')]
|
import os
import shutil
import datetime
from pylokit import Office
from wand.image import Image
from tempfile import NamedTemporaryFile, TemporaryDirectory
from rq import get_current_job
from docsbox import app, rq
from docsbox.docs.utils import make_zip_archive, make_thumbnails
@rq.job(timeout=app.config["REDIS_JOB_TIMEOUT"])
def remove_file(path):
"""
Just removes a file.
Used for deleting original files (uploaded by user) and result files (result of converting)
"""
return os.remove(path)
@rq.job(timeout=app.config["REDIS_JOB_TIMEOUT"])
def process_document(filename, path, options, meta):
current_task = get_current_job()
with Office(app.config["LIBREOFFICE_PATH"]) as office: # acquire libreoffice lock
with office.documentLoad(path) as original_document: # open original document
print ("PATH : ", str(path))
with TemporaryDirectory() as tmp_dir: # create temp dir where output'll be stored
for fmt in options["formats"]: # iterate over requested formats
current_format = app.config["SUPPORTED_FORMATS"][fmt]
#output_path = os.path.join(tmp_dir, current_format["path"])
output_path = os.path.join(tmp_dir, filename+"."+current_format["path"])
original_document.saveAs(output_path, fmt=current_format["fmt"])
if options.get("thumbnails", None):
is_created = False
if meta["mimetype"] == "application/pdf":
pdf_path = path
elif "pdf" in options["formats"]:
pdf_path = os.path.join(tmp_dir, filename+".pdf")
else:
pdf_tmp_file = NamedTemporaryFile()
pdf_path = pdf_tmp_file.name
original_document.saveAs(filename+".pdf", fmt="pdf")
is_created = True
image = Image(filename=pdf_path,
resolution=app.config["THUMBNAILS_DPI"])
if is_created:
pdf_tmp_file.close()
thumbnails = make_thumbnails(image, tmp_dir, options["thumbnails"]["size"])
result_path, result_url = make_zip_archive(current_task.id, tmp_dir)
remove_file.schedule(
datetime.timedelta(seconds=app.config["RESULT_FILE_TTL"]),
result_path
)
return result_url
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"tempfile.TemporaryDirectory",
"docsbox.docs.utils.make_zip_archive",
"pylokit.Office",
"wand.image.Image",
"datetime.timedelta",
"docsbox.rq.job",
"rq.get_current_job",
"docsbox.docs.utils.make_thumbnails",
"os.path.join"
] |
[((287, 334), 'docsbox.rq.job', 'rq.job', ([], {'timeout': "app.config['REDIS_JOB_TIMEOUT']"}), "(timeout=app.config['REDIS_JOB_TIMEOUT'])\n", (293, 334), False, 'from docsbox import app, rq\n'), ((525, 572), 'docsbox.rq.job', 'rq.job', ([], {'timeout': "app.config['REDIS_JOB_TIMEOUT']"}), "(timeout=app.config['REDIS_JOB_TIMEOUT'])\n", (531, 572), False, 'from docsbox import app, rq\n'), ((506, 521), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (515, 521), False, 'import os\n'), ((645, 662), 'rq.get_current_job', 'get_current_job', ([], {}), '()\n', (660, 662), False, 'from rq import get_current_job\n'), ((672, 710), 'pylokit.Office', 'Office', (["app.config['LIBREOFFICE_PATH']"], {}), "(app.config['LIBREOFFICE_PATH'])\n", (678, 710), False, 'from pylokit import Office\n'), ((2393, 2450), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': "app.config['RESULT_FILE_TTL']"}), "(seconds=app.config['RESULT_FILE_TTL'])\n", (2411, 2450), False, 'import datetime\n'), ((893, 913), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (911, 913), False, 'from tempfile import NamedTemporaryFile, TemporaryDirectory\n'), ((2308, 2350), 'docsbox.docs.utils.make_zip_archive', 'make_zip_archive', (['current_task.id', 'tmp_dir'], {}), '(current_task.id, tmp_dir)\n', (2324, 2350), False, 'from docsbox.docs.utils import make_zip_archive, make_thumbnails\n'), ((1239, 1301), 'os.path.join', 'os.path.join', (['tmp_dir', "(filename + '.' + current_format['path'])"], {}), "(tmp_dir, filename + '.' + current_format['path'])\n", (1251, 1301), False, 'import os\n'), ((1990, 2055), 'wand.image.Image', 'Image', ([], {'filename': 'pdf_path', 'resolution': "app.config['THUMBNAILS_DPI']"}), "(filename=pdf_path, resolution=app.config['THUMBNAILS_DPI'])\n", (1995, 2055), False, 'from wand.image import Image\n'), ((2203, 2265), 'docsbox.docs.utils.make_thumbnails', 'make_thumbnails', (['image', 'tmp_dir', "options['thumbnails']['size']"], {}), "(image, tmp_dir, options['thumbnails']['size'])\n", (2218, 2265), False, 'from docsbox.docs.utils import make_zip_archive, make_thumbnails\n'), ((1665, 1705), 'os.path.join', 'os.path.join', (['tmp_dir', "(filename + '.pdf')"], {}), "(tmp_dir, filename + '.pdf')\n", (1677, 1705), False, 'import os\n'), ((1769, 1789), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {}), '()\n', (1787, 1789), False, 'from tempfile import NamedTemporaryFile, TemporaryDirectory\n')]
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('apps.consultants.views',
url(r'^maillist/$', 'maillist', name='maillist'),
url(r'^vcf/$', 'vcf', name='vcf'),
url(r'^maillist/(?P<team_id>[0-9]{2}|(tl))/$', 'maillist', name='maillist'),
)
|
[
"django.conf.urls.url"
] |
[((106, 153), 'django.conf.urls.url', 'url', (['"""^maillist/$"""', '"""maillist"""'], {'name': '"""maillist"""'}), "('^maillist/$', 'maillist', name='maillist')\n", (109, 153), False, 'from django.conf.urls import patterns, include, url\n'), ((160, 192), 'django.conf.urls.url', 'url', (['"""^vcf/$"""', '"""vcf"""'], {'name': '"""vcf"""'}), "('^vcf/$', 'vcf', name='vcf')\n", (163, 192), False, 'from django.conf.urls import patterns, include, url\n'), ((199, 273), 'django.conf.urls.url', 'url', (['"""^maillist/(?P<team_id>[0-9]{2}|(tl))/$"""', '"""maillist"""'], {'name': '"""maillist"""'}), "('^maillist/(?P<team_id>[0-9]{2}|(tl))/$', 'maillist', name='maillist')\n", (202, 273), False, 'from django.conf.urls import patterns, include, url\n')]
|
import asyncio
from helpers import get_db
import importlib
import json
import logging
import sys
from datetime import datetime
from os import path
from queue import Queue
import requests
from alive_progress import alive_bar
from checkEp import check_episode
dir = path.split(path.abspath(__file__))[0]
# logging setup
logging.StreamHandler()
async def main():
if not api_token or not base_url:
logging.critical('Either your api_token or base_url is blank!')
sys.exit()
api_json = {"X-Emby-Token": api_token}
headers={"user-agent": "mozilla/5.0 (windows nt 10.0; win64; x64) applewebkit/537.36 (khtml, like gecko) chrome/81.0.4044.138 safari/537.36"}
headers.update(api_json)
years = datetime.now().year if len(sys.argv)==1 else None if sys.argv[1].lower() in ['all','none'] else sys.argv[1]
logging.warning(f'Years={years}')
raw_data = {
'IncludeItemTypes': 'Episode',
'Years': years,
'Recursive': True,
'IsMissing': False,
}
def get_items(params):
res = requests.get(f'{base_url}/Items', params=params, headers=headers)
try:
data = json.loads(res.text)
except json.decoder.JSONDecodeError:
print(res.text)
return []
items = []
for item in data.get('Items'):
id = item.get("Id")
items.append(id)
return items
ids = []
if not check_thumbs:
for q in ['Episode ', 'TBA']:
raw_data.update({'NameStartsWith': q})
ids += get_items(raw_data)
if check_thumbs:
ids = get_items(raw_data)
logging.warning(f'Checking {len(ids)} ids!')
with alive_bar(len(ids), bar='blocks', spinner='dots_waves2') as bar:
db = get_db()
async def run_with_progress(id):
await check_episode(id, db)
bar()
# Optimal qsize limit might be 8
# 3 = 59.9
# 5 = 61.4
# 7 = 58.5
# 8 = 64
# 9 = 59.5
# 10 = 57.6
ps = Queue()
for id in ids:
while ps.qsize() > limit_concurrent_requests:
await ps.get()
ps.put(asyncio.create_task(run_with_progress(id)))
while not ps.empty():
await ps.get()
db.close()
if __name__ == '__main__':
conf = importlib.import_module('config')
if path.isfile(f'{dir}/config_local.py'):
conf = importlib.import_module('config_local')
api_token = conf.api_token
base_url = conf.base_url
check_thumbs = conf.check_thumbs
limit_concurrent_requests = conf.limit_concurrent_requests
asyncio.run(main())
|
[
"os.path.abspath",
"json.loads",
"importlib.import_module",
"logging.warning",
"logging.StreamHandler",
"checkEp.check_episode",
"datetime.datetime.now",
"os.path.isfile",
"requests.get",
"logging.critical",
"helpers.get_db",
"queue.Queue",
"sys.exit"
] |
[((322, 345), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (343, 345), False, 'import logging\n'), ((837, 870), 'logging.warning', 'logging.warning', (['f"""Years={years}"""'], {}), "(f'Years={years}')\n", (852, 870), False, 'import logging\n'), ((2347, 2380), 'importlib.import_module', 'importlib.import_module', (['"""config"""'], {}), "('config')\n", (2370, 2380), False, 'import importlib\n'), ((2388, 2425), 'os.path.isfile', 'path.isfile', (['f"""{dir}/config_local.py"""'], {}), "(f'{dir}/config_local.py')\n", (2399, 2425), False, 'from os import path\n'), ((278, 300), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (290, 300), False, 'from os import path\n'), ((411, 474), 'logging.critical', 'logging.critical', (['"""Either your api_token or base_url is blank!"""'], {}), "('Either your api_token or base_url is blank!')\n", (427, 474), False, 'import logging\n'), ((483, 493), 'sys.exit', 'sys.exit', ([], {}), '()\n', (491, 493), False, 'import sys\n'), ((1054, 1119), 'requests.get', 'requests.get', (['f"""{base_url}/Items"""'], {'params': 'params', 'headers': 'headers'}), "(f'{base_url}/Items', params=params, headers=headers)\n", (1066, 1119), False, 'import requests\n'), ((1768, 1776), 'helpers.get_db', 'get_db', ([], {}), '()\n', (1774, 1776), False, 'from helpers import get_db\n'), ((2046, 2053), 'queue.Queue', 'Queue', ([], {}), '()\n', (2051, 2053), False, 'from queue import Queue\n'), ((2442, 2481), 'importlib.import_module', 'importlib.import_module', (['"""config_local"""'], {}), "('config_local')\n", (2465, 2481), False, 'import importlib\n'), ((725, 739), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (737, 739), False, 'from datetime import datetime\n'), ((1152, 1172), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (1162, 1172), False, 'import json\n'), ((1837, 1858), 'checkEp.check_episode', 'check_episode', (['id', 'db'], {}), '(id, db)\n', (1850, 1858), False, 'from checkEp import check_episode\n')]
|
import requests
from lxml import html
import pandas as pd
import datetime
from lxml import etree
import google.cloud.storage
import re
from google.cloud import bigquery
import html as hhhh
from functools import reduce
def scrapping(article):
r1 = requests.get('https://www.semana.com{}'.format(article))
tree = html.fromstring(r1.content)
content = tree.xpath('.//div[@id="contentItem"]')
text_content = etree.tostring(content[0], pretty_print=True)
text_content = hhhh.unescape(str(text_content))
text_content = re.sub(r"<.{0,200}>","",text_content)
text_content = text_content.replace("\\n", "").replace("\r", "").replace("b'", "")
date = tree.xpath('.//span[@class="date"]/text()')[0]
date = date.replace("|", "")
date = date.strip()
list_date = date.split(" ")
list_date[1]
if list_date[2] == "PM":
hour = str(int(list_date[1].split(":")[0]) + 12) + ":" + list_date[1].split(":")[1] + ":" + list_date[1].split(":")[2]
else:
hour = list_date[1]
date = list_date[0].replace("/", "-").split("-")
date = date[-1] + "-" + date[0] + "-" + date[1]
tag = tree.xpath('.//a[@itemprop="articleSection"]/text()')[0]
tag= hhhh.unescape(str(tag))
title = tree.xpath('.//h1[@class="tittleArticuloOpinion"]/text()')[0]
title = title.strip()
title = hhhh.unescape(str(title))
item_id = int(tree.xpath('.//input[@id="itemId"]/@value')[0])
timestamp = str(datetime.datetime.utcnow())
row = [str(title), str(date), str(hour), str(tag), str(text_content), item_id, timestamp]
return row
def loop_req():
r = requests.get('https://www.semana.com')
tree = html.fromstring(r.content)
list_articles = tree.xpath('.//a[contains(@class,"article-h-link")]/@href')
## in order to create the df
dflist = []
for n,article in enumerate(list_articles):
try:
row = scrapping(article)
upload_to_bq(row)
row_l = [x for x in row]
dflist.append(row_l)
print(n)
except:
print(n,'fail')
pass
df=pd.DataFrame(dflist,columns=["title", "date", "hour", "tag", "text_content", "item_id", "timestamp"])
return df
def upload_to_bq(row):
# Instantiates a client
bigquery_client = bigquery.Client()
dataset_ref = bigquery_client.dataset('news_scrapping')
table_ref = dataset_ref.table('semana')
table = bigquery_client.get_table(table_ref)
rows_to_insert = [row]
errors = bigquery_client.insert_rows(table, rows_to_insert)
print(errors)
assert errors == []
def upload_bucket(csv):
client = google.cloud.storage.Client()
bucket = client.get_bucket('newscrapp')
now = datetime.datetime.now()
y = now.year
m = now.month
d = now.day
h = now.hour
blob = bucket.blob('semana/{}-{}-{}-{}.csv'.format(y, m, d, h))
blob.upload_from_string(csv)
def scrapper(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
request_json = request.get_json()
if request.args and 'message' in request.args:
return request.args.get('message')
elif request_json and 'message' in request_json:
return request_json['message']
else:
df = loop_req()
df.drop_duplicates(inplace = True)
csv = df.to_csv()
upload_bucket(csv)
# pandas_gbq.to_gbq(df, 'news_scrapping.semana', project_id="servisentimen-servipolitics", if_exists='append')
return csv
|
[
"pandas.DataFrame",
"google.cloud.bigquery.Client",
"datetime.datetime.now",
"lxml.html.fromstring",
"datetime.datetime.utcnow",
"requests.get",
"lxml.etree.tostring",
"re.sub"
] |
[((337, 364), 'lxml.html.fromstring', 'html.fromstring', (['r1.content'], {}), '(r1.content)\n', (352, 364), False, 'from lxml import html\n'), ((444, 489), 'lxml.etree.tostring', 'etree.tostring', (['content[0]'], {'pretty_print': '(True)'}), '(content[0], pretty_print=True)\n', (458, 489), False, 'from lxml import etree\n'), ((567, 605), 're.sub', 're.sub', (['"""<.{0,200}>"""', '""""""', 'text_content'], {}), "('<.{0,200}>', '', text_content)\n", (573, 605), False, 'import re\n'), ((1732, 1770), 'requests.get', 'requests.get', (['"""https://www.semana.com"""'], {}), "('https://www.semana.com')\n", (1744, 1770), False, 'import requests\n'), ((1787, 1813), 'lxml.html.fromstring', 'html.fromstring', (['r.content'], {}), '(r.content)\n', (1802, 1813), False, 'from lxml import html\n'), ((2466, 2483), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (2481, 2483), False, 'from google.cloud import bigquery\n'), ((2901, 2924), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2922, 2924), False, 'import datetime\n'), ((1551, 1577), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1575, 1577), False, 'import datetime\n'), ((2266, 2372), 'pandas.DataFrame', 'pd.DataFrame', (['dflist'], {'columns': "['title', 'date', 'hour', 'tag', 'text_content', 'item_id', 'timestamp']"}), "(dflist, columns=['title', 'date', 'hour', 'tag',\n 'text_content', 'item_id', 'timestamp'])\n", (2278, 2372), True, 'import pandas as pd\n')]
|