hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c48ba21837c94aee68af6465f3d992eba966cf7
| 10,513
|
py
|
Python
|
static_precompiler/compilers/base.py
|
BarnabasSzabolcs/django-static-precompiler
|
5fb7390896d725825a688afd3caa54bb642a08b0
|
[
"MIT"
] | null | null | null |
static_precompiler/compilers/base.py
|
BarnabasSzabolcs/django-static-precompiler
|
5fb7390896d725825a688afd3caa54bb642a08b0
|
[
"MIT"
] | null | null | null |
static_precompiler/compilers/base.py
|
BarnabasSzabolcs/django-static-precompiler
|
5fb7390896d725825a688afd3caa54bb642a08b0
|
[
"MIT"
] | null | null | null |
import logging
import os
import posixpath
import django.core.exceptions
from django.contrib.staticfiles import finders
from django.utils import encoding, functional
try:
from django.utils import six
uses_six = True
except ImportError:
uses_six = False
from .. import models, mtime, settings, utils
logger = logging.getLogger("static_precompiler")
__all__ = (
"BaseCompiler",
)
class BaseCompiler(object):
name = None
supports_dependencies = False
input_extension = None
output_extension = None
def is_supported(self, source_path):
""" Return True iff provided source file type is supported by this precompiler.
:param source_path: relative path to a source file
:type source_path: str
:returns: bool
"""
return os.path.splitext(source_path)[1].lstrip(".") == self.input_extension
# noinspection PyMethodMayBeStatic
def get_full_source_path(self, source_path):
""" Return the full path to the given source file.
Check if the source file exists.
The returned path is OS-dependent.
:param source_path: relative path to a source file
:type source_path: str
:returns: str
:raises: ValueError
"""
norm_source_path = utils.normalize_path(source_path.lstrip("/"))
if settings.STATIC_ROOT:
full_path = os.path.join(settings.STATIC_ROOT, norm_source_path)
if os.path.exists(full_path):
return full_path
try:
full_path = finders.find(norm_source_path)
except django.core.exceptions.SuspiciousOperation:
full_path = None
if full_path is None:
raise ValueError("Can't find staticfile named: {0}".format(source_path))
return full_path
def get_output_filename(self, source_filename):
""" Return the name of compiled file based on the name of source file.
:param source_filename: name of a source file
:type source_filename: str
:returns: str
"""
return "{0}.{1}".format(os.path.splitext(source_filename)[0], self.output_extension)
def get_output_path(self, source_path):
""" Get relative path to compiled file based for the given source file.
The returned path is in posix format.
:param source_path: relative path to a source file
:type source_path: str
:returns: str
"""
source_dir = os.path.dirname(source_path.lstrip("/"))
source_filename = os.path.basename(source_path)
output_filename = self.get_output_filename(source_filename)
return posixpath.join(settings.OUTPUT_DIR, source_dir, output_filename)
def get_full_output_path(self, source_path):
""" Get full path to compiled file based for the given source file.
The returned path is OS-dependent.
:param source_path: relative path to a source file
:type source_path: str
:returns: str
"""
return os.path.join(settings.ROOT, utils.normalize_path(self.get_output_path(source_path)))
def get_source_mtime(self, source_path):
""" Get the modification time of the source file.
:param source_path: relative path to a source file
:type source_path: str
:returns: int
"""
return mtime.get_mtime(self.get_full_source_path(source_path))
def get_output_mtime(self, source_path):
""" Get the modification time of the compiled file.
Return None of compiled file does not exist.
:param source_path: relative path to a source file
:type source_path: str
:returns: int, None
"""
full_output_path = self.get_full_output_path(source_path)
if not os.path.exists(full_output_path):
return None
return mtime.get_mtime(full_output_path)
def should_compile(self, source_path, from_management=False):
""" Return True iff provided source file should be compiled.
:param source_path: relative path to a source file
:type source_path: str
:param from_management: whether the method was invoked from management command
:type from_management: bool
:returns: bool
"""
if settings.DISABLE_AUTO_COMPILE and not from_management:
return False
compiled_mtime = self.get_output_mtime(source_path)
if compiled_mtime is None:
return True
if compiled_mtime <= self.get_source_mtime(source_path):
return True
if self.supports_dependencies:
for dependency in self.get_dependencies(source_path):
dependency_mtime = self.get_source_mtime(dependency)
if compiled_mtime <= dependency_mtime:
return True
return False
def get_source(self, source_path):
""" Get the source code to be compiled.
:param source_path: relative path to a source file
:type source_path: str
:returns: str
"""
return utils.read_file(self.get_full_source_path(source_path))
def compile(self, source_path, from_management=False, verbosity=0):
""" Compile the given source path and return relative path to the compiled file.
Raise ValueError is the source file type is not supported.
May raise a StaticCompilationError if something goes wrong with compilation.
:param source_path: relative path to a source file
:type source_path: str
:param from_management: whether the method was invoked from management command
:type from_management: bool
:type verbosity: int
:rtype: str
"""
if not self.is_supported(source_path):
raise ValueError("'{0}' file type is not supported by '{1}'".format(
source_path, self.__class__.__name__
))
compiled_path = self.get_output_path(source_path)
if self.should_compile(source_path, from_management=from_management):
compiled_path = self.compile_file(source_path)
if self.supports_dependencies:
self.update_dependencies(source_path, self.find_dependencies(source_path))
message = "Compiled '{0}' to '{1}'".format(source_path, compiled_path)
if from_management and verbosity >= 1:
print(message)
else:
logging.info(message)
return compiled_path
def compile_lazy(self, source_path):
""" Return a lazy object which, when translated to string, compiles the specified source path and returns
the path to the compiled file.
Raise ValueError is the source file type is not supported.
May raise a StaticCompilationError if something goes wrong with compilation.
:param source_path: relative path to a source file
:type source_path: str
:returns: str
"""
return encoding.force_text(self.compile(source_path))
compile_lazy = functional.lazy(compile_lazy, six.text_type if uses_six else str)
def compile_file(self, source_path):
""" Compile the source file. Return the relative path to compiled file.
May raise a StaticCompilationError if something goes wrong with compilation.
:param source_path: path to the source file
:type source_path: str
:returns: str
"""
raise NotImplementedError
def compile_source(self, source):
""" Compile the source code. May raise a StaticCompilationError
if something goes wrong with compilation.
:param source: source code
:type source: str
:returns: str
"""
raise NotImplementedError
def find_dependencies(self, source_path):
""" Find the dependencies for the given source file.
:param source_path: relative path to a source file
:type source_path: str
:returns: list
"""
return []
# noinspection PyMethodMayBeStatic
def get_dependencies(self, source_path):
""" Get the saved dependencies for the given source file.
:param source_path: relative path to a source file
:type source_path: str
:returns: list of str
"""
dependencies = []
for dependency in models.Dependency.objects.filter(source=source_path).order_by("depends_on"):
try:
self.get_full_source_path(dependency.depends_on)
except ValueError:
# File referenced in Dependency can't be located. Remove the Dependency object.
dependency.delete()
else:
dependencies.append(dependency.depends_on)
return dependencies
# noinspection PyMethodMayBeStatic
def get_dependents(self, source_path):
""" Get a list of files that depends on the given source file.
:param source_path: relative path to a source file
:type source_path: str
:returns: list of str
"""
dependents = []
for dependency in models.Dependency.objects.filter(depends_on=source_path).order_by("source"):
try:
self.get_full_source_path(dependency.source)
except ValueError:
# File referenced in Dependency can't be located. Remove the Dependency object.
dependency.delete()
else:
dependents.append(dependency.source)
return dependents
# noinspection PyMethodMayBeStatic
def update_dependencies(self, source_path, dependencies):
""" Updates the saved dependencies for the given source file.
:param source_path: relative path to a source file
:type source_path: str
:param dependencies: list of files that source file depends on
:type dependencies: list of str
"""
if not dependencies:
models.Dependency.objects.filter(source=source_path).delete()
else:
models.Dependency.objects.filter(
source=source_path
).exclude(
depends_on__in=dependencies,
).delete()
for dependency in dependencies:
models.Dependency.objects.get_or_create(
source=source_path,
depends_on=dependency,
)
| 33.912903
| 113
| 0.642348
|
import logging
import os
import posixpath
import django.core.exceptions
from django.contrib.staticfiles import finders
from django.utils import encoding, functional
try:
from django.utils import six
uses_six = True
except ImportError:
uses_six = False
from .. import models, mtime, settings, utils
logger = logging.getLogger("static_precompiler")
__all__ = (
"BaseCompiler",
)
class BaseCompiler(object):
name = None
supports_dependencies = False
input_extension = None
output_extension = None
def is_supported(self, source_path):
return os.path.splitext(source_path)[1].lstrip(".") == self.input_extension
def get_full_source_path(self, source_path):
norm_source_path = utils.normalize_path(source_path.lstrip("/"))
if settings.STATIC_ROOT:
full_path = os.path.join(settings.STATIC_ROOT, norm_source_path)
if os.path.exists(full_path):
return full_path
try:
full_path = finders.find(norm_source_path)
except django.core.exceptions.SuspiciousOperation:
full_path = None
if full_path is None:
raise ValueError("Can't find staticfile named: {0}".format(source_path))
return full_path
def get_output_filename(self, source_filename):
return "{0}.{1}".format(os.path.splitext(source_filename)[0], self.output_extension)
def get_output_path(self, source_path):
source_dir = os.path.dirname(source_path.lstrip("/"))
source_filename = os.path.basename(source_path)
output_filename = self.get_output_filename(source_filename)
return posixpath.join(settings.OUTPUT_DIR, source_dir, output_filename)
def get_full_output_path(self, source_path):
return os.path.join(settings.ROOT, utils.normalize_path(self.get_output_path(source_path)))
def get_source_mtime(self, source_path):
return mtime.get_mtime(self.get_full_source_path(source_path))
def get_output_mtime(self, source_path):
full_output_path = self.get_full_output_path(source_path)
if not os.path.exists(full_output_path):
return None
return mtime.get_mtime(full_output_path)
def should_compile(self, source_path, from_management=False):
if settings.DISABLE_AUTO_COMPILE and not from_management:
return False
compiled_mtime = self.get_output_mtime(source_path)
if compiled_mtime is None:
return True
if compiled_mtime <= self.get_source_mtime(source_path):
return True
if self.supports_dependencies:
for dependency in self.get_dependencies(source_path):
dependency_mtime = self.get_source_mtime(dependency)
if compiled_mtime <= dependency_mtime:
return True
return False
def get_source(self, source_path):
return utils.read_file(self.get_full_source_path(source_path))
def compile(self, source_path, from_management=False, verbosity=0):
if not self.is_supported(source_path):
raise ValueError("'{0}' file type is not supported by '{1}'".format(
source_path, self.__class__.__name__
))
compiled_path = self.get_output_path(source_path)
if self.should_compile(source_path, from_management=from_management):
compiled_path = self.compile_file(source_path)
if self.supports_dependencies:
self.update_dependencies(source_path, self.find_dependencies(source_path))
message = "Compiled '{0}' to '{1}'".format(source_path, compiled_path)
if from_management and verbosity >= 1:
print(message)
else:
logging.info(message)
return compiled_path
def compile_lazy(self, source_path):
return encoding.force_text(self.compile(source_path))
compile_lazy = functional.lazy(compile_lazy, six.text_type if uses_six else str)
def compile_file(self, source_path):
raise NotImplementedError
def compile_source(self, source):
raise NotImplementedError
def find_dependencies(self, source_path):
return []
# noinspection PyMethodMayBeStatic
def get_dependencies(self, source_path):
dependencies = []
for dependency in models.Dependency.objects.filter(source=source_path).order_by("depends_on"):
try:
self.get_full_source_path(dependency.depends_on)
except ValueError:
# File referenced in Dependency can't be located. Remove the Dependency object.
dependency.delete()
else:
dependencies.append(dependency.depends_on)
return dependencies
def get_dependents(self, source_path):
dependents = []
for dependency in models.Dependency.objects.filter(depends_on=source_path).order_by("source"):
try:
self.get_full_source_path(dependency.source)
except ValueError:
dependency.delete()
else:
dependents.append(dependency.source)
return dependents
# noinspection PyMethodMayBeStatic
def update_dependencies(self, source_path, dependencies):
if not dependencies:
models.Dependency.objects.filter(source=source_path).delete()
else:
models.Dependency.objects.filter(
source=source_path
).exclude(
depends_on__in=dependencies,
).delete()
for dependency in dependencies:
models.Dependency.objects.get_or_create(
source=source_path,
depends_on=dependency,
)
| true
| true
|
1c48ba419e9dc47c3799b86acf638abebcbe5ba2
| 10,227
|
py
|
Python
|
dolo/algos/value_iteration.py
|
gkbharathy/econ_model_02
|
d91ddf148b009bf79852d9aec70f3a1877e0f79a
|
[
"BSD-2-Clause"
] | null | null | null |
dolo/algos/value_iteration.py
|
gkbharathy/econ_model_02
|
d91ddf148b009bf79852d9aec70f3a1877e0f79a
|
[
"BSD-2-Clause"
] | null | null | null |
dolo/algos/value_iteration.py
|
gkbharathy/econ_model_02
|
d91ddf148b009bf79852d9aec70f3a1877e0f79a
|
[
"BSD-2-Clause"
] | null | null | null |
import time
import numpy as np
import numpy
import scipy.optimize
from dolo.numeric.processes import DiscretizedIIDProcess
# from dolo.numeric.decision_rules_markov import MarkovDecisionRule, IIDDecisionRule
from dolo.numeric.decision_rule import DecisionRule, ConstantDecisionRule
from dolo.numeric.grids import Grid, CartesianGrid, SmolyakGrid, UnstructuredGrid
from dolo.misc.itprinter import IterationsPrinter
def constant_policy(model):
return ConstantDecisionRule(model.calibration["controls"])
from .results import AlgoResult, ValueIterationResult
def value_iteration(model,
grid={},
tol=1e-6,
maxit=500,
maxit_howard=20,
verbose=False,
details=True):
"""
Solve for the value function and associated Markov decision rule by iterating over
the value function.
Parameters:
-----------
model :
"dtmscc" model. Must contain a 'felicity' function.
grid :
grid options
dr :
decision rule to evaluate
Returns:
--------
mdr : Markov decision rule
The solved decision rule/policy function
mdrv: decision rule
The solved value function
"""
transition = model.functions['transition']
felicity = model.functions['felicity']
controls_lb = model.functions['controls_lb']
controls_ub = model.functions['controls_ub']
parms = model.calibration['parameters']
discount = model.calibration['beta']
x0 = model.calibration['controls']
m0 = model.calibration['exogenous']
s0 = model.calibration['states']
r0 = felicity(m0, s0, x0, parms)
process = model.exogenous
dprocess = process.discretize()
n_ms = dprocess.n_nodes() # number of exogenous states
n_mv = dprocess.n_inodes(
0) # this assume number of integration nodes is constant
endo_grid = model.get_grid(**grid)
exo_grid = dprocess.grid
mdrv = DecisionRule(exo_grid, endo_grid)
grid = mdrv.endo_grid.nodes()
N = grid.shape[0]
n_x = len(x0)
mdr = constant_policy(model)
controls_0 = np.zeros((n_ms, N, n_x))
for i_ms in range(n_ms):
controls_0[i_ms, :, :] = mdr.eval_is(i_ms, grid)
values_0 = np.zeros((n_ms, N, 1))
# for i_ms in range(n_ms):
# values_0[i_ms, :, :] = mdrv(i_ms, grid)
mdr = DecisionRule(exo_grid, endo_grid)
# mdr.set_values(controls_0)
# THIRD: value function iterations until convergence
it = 0
err_v = 100
err_v_0 = 0
gain_v = 0.0
err_x = 100
err_x_0 = 0
tol_x = 1e-5
tol_v = 1e-7
itprint = IterationsPrinter(
('N', int), ('Error_V', float), ('Gain_V', float), ('Error_x', float),
('Gain_x', float), ('Eval_n', int), ('Time', float),
verbose=verbose)
itprint.print_header('Start value function iterations.')
while (it < maxit) and (err_v > tol or err_x > tol_x):
t_start = time.time()
it += 1
mdr.set_values(controls_0)
if it > 2:
ev = evaluate_policy(
model, mdr, initial_guess=mdrv, verbose=False, details=True)
else:
ev = evaluate_policy(model, mdr, verbose=False, details=True)
mdrv = ev.solution
for i_ms in range(n_ms):
values_0[i_ms, :, :] = mdrv.eval_is(i_ms, grid)
values = values_0.copy()
controls = controls_0.copy()
for i_m in range(n_ms):
m = dprocess.node(i_m)
for n in range(N):
s = grid[n, :]
x = controls[i_m, n, :]
lb = controls_lb(m, s, parms)
ub = controls_ub(m, s, parms)
bnds = [e for e in zip(lb, ub)]
def valfun(xx):
return -choice_value(transition, felicity, i_m, s, xx,
mdrv, dprocess, parms, discount)[0]
res = scipy.optimize.minimize(valfun, x, bounds=bnds)
controls[i_m, n, :] = res.x
values[i_m, n, 0] = -valfun(x)
# compute error, update value and dr
err_x = abs(controls - controls_0).max()
err_v = abs(values - values_0).max()
t_end = time.time()
elapsed = t_end - t_start
values_0 = values
controls_0 = controls
gain_x = err_x / err_x_0
gain_v = err_v / err_v_0
err_x_0 = err_x
err_v_0 = err_v
itprint.print_iteration(
N=it,
Error_V=err_v,
Gain_V=gain_v,
Error_x=err_x,
Gain_x=gain_x,
Eval_n=ev.iterations,
Time=elapsed)
itprint.print_finished()
mdr = DecisionRule(exo_grid, endo_grid)
mdr.set_values(controls)
mdrv.set_values(values_0)
if not details:
return mdr, mdrv
else:
return ValueIterationResult(
mdr, #:AbstractDecisionRule
mdrv, #:AbstractDecisionRule
it, #:Int
dprocess, #:AbstractDiscretizedProcess
err_x<tol_x, #:Bool
tol_x, #:Float64
err_x, #:Float64
err_v<tol_v, #:Bool
tol_v, #:Float64
err_v, #:Float64
None, #log: #:ValueIterationLog
None #trace: #:Union{Nothing,IterationTrace
)
def choice_value(transition, felicity, i_ms, s, x, drv, dprocess, parms, beta):
m = dprocess.node(i_ms)
cont_v = 0.0
for I_ms in range(dprocess.n_inodes(i_ms)):
M = dprocess.inode(i_ms, I_ms)
prob = dprocess.iweight(i_ms, I_ms)
S = transition(m, s, x, M, parms)
V = drv.eval_is(I_ms, S)[0]
cont_v += prob * V
return felicity(m, s, x, parms) + beta * cont_v
class EvaluationResult:
def __init__(self, solution, iterations, tol, error):
self.solution = solution
self.iterations = iterations
self.tol = tol
self.error = error
def evaluate_policy(model,
mdr,
tol=1e-8,
maxit=2000,
grid={},
verbose=True,
initial_guess=None,
hook=None,
integration_orders=None,
details=False,
interp_type='cubic'):
"""Compute value function corresponding to policy ``dr``
Parameters:
-----------
model:
"dtcscc" model. Must contain a 'value' function.
mdr:
decision rule to evaluate
Returns:
--------
decision rule:
value function (a function of the space similar to a decision rule
object)
"""
process = model.exogenous
dprocess = process.discretize()
n_ms = dprocess.n_nodes() # number of exogenous states
n_mv = dprocess.n_inodes(
0) # this assume number of integration nodes is constant
x0 = model.calibration['controls']
v0 = model.calibration['values']
parms = model.calibration['parameters']
n_x = len(x0)
n_v = len(v0)
n_s = len(model.symbols['states'])
endo_grid = model.get_grid(**grid)
exo_grid = dprocess.grid
if initial_guess is not None:
mdrv = initial_guess
else:
mdrv = DecisionRule(exo_grid, endo_grid, interp_type=interp_type)
grid = mdrv.endo_grid.nodes()
N = grid.shape[0]
if isinstance(mdr, np.ndarray):
controls = mdr
else:
controls = np.zeros((n_ms, N, n_x))
for i_m in range(n_ms):
controls[i_m, :, :] = mdr.eval_is(i_m, grid)
values_0 = np.zeros((n_ms, N, n_v))
if initial_guess is None:
for i_m in range(n_ms):
values_0[i_m, :, :] = v0[None, :]
else:
for i_m in range(n_ms):
values_0[i_m, :, :] = initial_guess.eval_is(i_m, grid)
val = model.functions['value']
g = model.functions['transition']
sh_v = values_0.shape
err = 10
inner_maxit = 50
it = 0
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'.format(
'N', ' Error', 'Gain', 'Time')
stars = '-' * len(headline)
print(stars)
print(headline)
print(stars)
t1 = time.time()
err_0 = np.nan
verbit = (verbose == 'full')
while err > tol and it < maxit:
it += 1
t_start = time.time()
mdrv.set_values(values_0.reshape(sh_v))
values = update_value(val, g, grid, controls, values_0, mdr, mdrv,
dprocess, parms).reshape((-1, n_v))
err = abs(values.reshape(sh_v) - values_0).max()
err_SA = err / err_0
err_0 = err
values_0 = values.reshape(sh_v)
t_finish = time.time()
elapsed = t_finish - t_start
if verbose:
print('|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'.format(
it, err, err_SA, elapsed))
# values_0 = values.reshape(sh_v)
t2 = time.time()
if verbose:
print(stars)
print("Elapsed: {} seconds.".format(t2 - t1))
print(stars)
if not details:
return mdrv
else:
return EvaluationResult(mdrv, it, tol, err)
def update_value(val, g, s, x, v, dr, drv, dprocess, parms):
N = s.shape[0]
n_s = s.shape[1]
n_ms = dprocess.n_nodes() # number of exogenous states
n_mv = dprocess.n_inodes(
0) # this assume number of integration nodes is constant
res = np.zeros_like(v)
for i_ms in range(n_ms):
m = dprocess.node(i_ms)[None, :].repeat(N, axis=0)
xm = x[i_ms, :, :]
vm = v[i_ms, :, :]
for I_ms in range(n_mv):
# M = P[I_ms,:][None,:]
M = dprocess.inode(i_ms, I_ms)[None, :].repeat(N, axis=0)
prob = dprocess.iweight(i_ms, I_ms)
S = g(m, s, xm, M, parms)
XM = dr.eval_ijs(i_ms, I_ms, S)
VM = drv.eval_ijs(i_ms, I_ms, S)
rr = val(m, s, xm, vm, M, S, XM, VM, parms)
res[i_ms, :, :] += prob * rr
return res
| 27.34492
| 86
| 0.553633
|
import time
import numpy as np
import numpy
import scipy.optimize
from dolo.numeric.processes import DiscretizedIIDProcess
from dolo.numeric.decision_rule import DecisionRule, ConstantDecisionRule
from dolo.numeric.grids import Grid, CartesianGrid, SmolyakGrid, UnstructuredGrid
from dolo.misc.itprinter import IterationsPrinter
def constant_policy(model):
return ConstantDecisionRule(model.calibration["controls"])
from .results import AlgoResult, ValueIterationResult
def value_iteration(model,
grid={},
tol=1e-6,
maxit=500,
maxit_howard=20,
verbose=False,
details=True):
transition = model.functions['transition']
felicity = model.functions['felicity']
controls_lb = model.functions['controls_lb']
controls_ub = model.functions['controls_ub']
parms = model.calibration['parameters']
discount = model.calibration['beta']
x0 = model.calibration['controls']
m0 = model.calibration['exogenous']
s0 = model.calibration['states']
r0 = felicity(m0, s0, x0, parms)
process = model.exogenous
dprocess = process.discretize()
n_ms = dprocess.n_nodes() n_mv = dprocess.n_inodes(
0)
endo_grid = model.get_grid(**grid)
exo_grid = dprocess.grid
mdrv = DecisionRule(exo_grid, endo_grid)
grid = mdrv.endo_grid.nodes()
N = grid.shape[0]
n_x = len(x0)
mdr = constant_policy(model)
controls_0 = np.zeros((n_ms, N, n_x))
for i_ms in range(n_ms):
controls_0[i_ms, :, :] = mdr.eval_is(i_ms, grid)
values_0 = np.zeros((n_ms, N, 1))
mdr = DecisionRule(exo_grid, endo_grid)
it = 0
err_v = 100
err_v_0 = 0
gain_v = 0.0
err_x = 100
err_x_0 = 0
tol_x = 1e-5
tol_v = 1e-7
itprint = IterationsPrinter(
('N', int), ('Error_V', float), ('Gain_V', float), ('Error_x', float),
('Gain_x', float), ('Eval_n', int), ('Time', float),
verbose=verbose)
itprint.print_header('Start value function iterations.')
while (it < maxit) and (err_v > tol or err_x > tol_x):
t_start = time.time()
it += 1
mdr.set_values(controls_0)
if it > 2:
ev = evaluate_policy(
model, mdr, initial_guess=mdrv, verbose=False, details=True)
else:
ev = evaluate_policy(model, mdr, verbose=False, details=True)
mdrv = ev.solution
for i_ms in range(n_ms):
values_0[i_ms, :, :] = mdrv.eval_is(i_ms, grid)
values = values_0.copy()
controls = controls_0.copy()
for i_m in range(n_ms):
m = dprocess.node(i_m)
for n in range(N):
s = grid[n, :]
x = controls[i_m, n, :]
lb = controls_lb(m, s, parms)
ub = controls_ub(m, s, parms)
bnds = [e for e in zip(lb, ub)]
def valfun(xx):
return -choice_value(transition, felicity, i_m, s, xx,
mdrv, dprocess, parms, discount)[0]
res = scipy.optimize.minimize(valfun, x, bounds=bnds)
controls[i_m, n, :] = res.x
values[i_m, n, 0] = -valfun(x)
err_x = abs(controls - controls_0).max()
err_v = abs(values - values_0).max()
t_end = time.time()
elapsed = t_end - t_start
values_0 = values
controls_0 = controls
gain_x = err_x / err_x_0
gain_v = err_v / err_v_0
err_x_0 = err_x
err_v_0 = err_v
itprint.print_iteration(
N=it,
Error_V=err_v,
Gain_V=gain_v,
Error_x=err_x,
Gain_x=gain_x,
Eval_n=ev.iterations,
Time=elapsed)
itprint.print_finished()
mdr = DecisionRule(exo_grid, endo_grid)
mdr.set_values(controls)
mdrv.set_values(values_0)
if not details:
return mdr, mdrv
else:
return ValueIterationResult(
mdr, mdrv, it, dprocess, err_x<tol_x, tol_x, err_x, err_v<tol_v, tol_v, err_v, None, None )
def choice_value(transition, felicity, i_ms, s, x, drv, dprocess, parms, beta):
m = dprocess.node(i_ms)
cont_v = 0.0
for I_ms in range(dprocess.n_inodes(i_ms)):
M = dprocess.inode(i_ms, I_ms)
prob = dprocess.iweight(i_ms, I_ms)
S = transition(m, s, x, M, parms)
V = drv.eval_is(I_ms, S)[0]
cont_v += prob * V
return felicity(m, s, x, parms) + beta * cont_v
class EvaluationResult:
def __init__(self, solution, iterations, tol, error):
self.solution = solution
self.iterations = iterations
self.tol = tol
self.error = error
def evaluate_policy(model,
mdr,
tol=1e-8,
maxit=2000,
grid={},
verbose=True,
initial_guess=None,
hook=None,
integration_orders=None,
details=False,
interp_type='cubic'):
process = model.exogenous
dprocess = process.discretize()
n_ms = dprocess.n_nodes() n_mv = dprocess.n_inodes(
0)
x0 = model.calibration['controls']
v0 = model.calibration['values']
parms = model.calibration['parameters']
n_x = len(x0)
n_v = len(v0)
n_s = len(model.symbols['states'])
endo_grid = model.get_grid(**grid)
exo_grid = dprocess.grid
if initial_guess is not None:
mdrv = initial_guess
else:
mdrv = DecisionRule(exo_grid, endo_grid, interp_type=interp_type)
grid = mdrv.endo_grid.nodes()
N = grid.shape[0]
if isinstance(mdr, np.ndarray):
controls = mdr
else:
controls = np.zeros((n_ms, N, n_x))
for i_m in range(n_ms):
controls[i_m, :, :] = mdr.eval_is(i_m, grid)
values_0 = np.zeros((n_ms, N, n_v))
if initial_guess is None:
for i_m in range(n_ms):
values_0[i_m, :, :] = v0[None, :]
else:
for i_m in range(n_ms):
values_0[i_m, :, :] = initial_guess.eval_is(i_m, grid)
val = model.functions['value']
g = model.functions['transition']
sh_v = values_0.shape
err = 10
inner_maxit = 50
it = 0
if verbose:
headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'.format(
'N', ' Error', 'Gain', 'Time')
stars = '-' * len(headline)
print(stars)
print(headline)
print(stars)
t1 = time.time()
err_0 = np.nan
verbit = (verbose == 'full')
while err > tol and it < maxit:
it += 1
t_start = time.time()
mdrv.set_values(values_0.reshape(sh_v))
values = update_value(val, g, grid, controls, values_0, mdr, mdrv,
dprocess, parms).reshape((-1, n_v))
err = abs(values.reshape(sh_v) - values_0).max()
err_SA = err / err_0
err_0 = err
values_0 = values.reshape(sh_v)
t_finish = time.time()
elapsed = t_finish - t_start
if verbose:
print('|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'.format(
it, err, err_SA, elapsed))
t2 = time.time()
if verbose:
print(stars)
print("Elapsed: {} seconds.".format(t2 - t1))
print(stars)
if not details:
return mdrv
else:
return EvaluationResult(mdrv, it, tol, err)
def update_value(val, g, s, x, v, dr, drv, dprocess, parms):
N = s.shape[0]
n_s = s.shape[1]
n_ms = dprocess.n_nodes() n_mv = dprocess.n_inodes(
0)
res = np.zeros_like(v)
for i_ms in range(n_ms):
m = dprocess.node(i_ms)[None, :].repeat(N, axis=0)
xm = x[i_ms, :, :]
vm = v[i_ms, :, :]
for I_ms in range(n_mv):
M = dprocess.inode(i_ms, I_ms)[None, :].repeat(N, axis=0)
prob = dprocess.iweight(i_ms, I_ms)
S = g(m, s, xm, M, parms)
XM = dr.eval_ijs(i_ms, I_ms, S)
VM = drv.eval_ijs(i_ms, I_ms, S)
rr = val(m, s, xm, vm, M, S, XM, VM, parms)
res[i_ms, :, :] += prob * rr
return res
| true
| true
|
1c48baaa5f215b10e5b5cd0d792c038d273b33da
| 1,441
|
py
|
Python
|
gallery/tests.py
|
Kips-alih/my-gallery
|
f48c0dd71e84102560d095fef4da223d11d7c606
|
[
"MIT"
] | null | null | null |
gallery/tests.py
|
Kips-alih/my-gallery
|
f48c0dd71e84102560d095fef4da223d11d7c606
|
[
"MIT"
] | null | null | null |
gallery/tests.py
|
Kips-alih/my-gallery
|
f48c0dd71e84102560d095fef4da223d11d7c606
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from .models import Image, Location,category
# Create your tests here.
# Testing Save Method
class ImageTestClass(TestCase):
# Set up method
def setUp(self):
self.image=Image( title= 'Nature', description ='Our work to conserve biodiversity focuses on Key Biodiversity Areas.', image ='http://image.com/image.jpg',category=category.objects.create(name="nature"),location=Location.objects.create(name='Kenya'))
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.image,Image))
def test_save_method(self):
self.image.save_image()
images = Image.objects.all()
self.assertTrue(len(images) > 0)
def test_delete_image(self):
self.image.save_image()
self.image.delete_image()
images = Image.objects.all()
self.assertTrue(len(images) == 0)
def tearDown(self):
Image.objects.all().delete()
category.objects.all().delete()
#Category test cases
class categoryTestCase(TestCase):
def setUp(self):
category.objects.create(name="Category_test")
def test_category_name(self):
Category = category.objects.get(name="Category_test")
self.assertEqual(Category.name, "Category_test")
def test_category_str(self):
Category = category.objects.get(name="Category_test")
self.assertEqual(str(Category), "Category_test")
| 31.326087
| 259
| 0.684247
|
from django.test import TestCase
from .models import Image, Location,category
class ImageTestClass(TestCase):
def setUp(self):
self.image=Image( title= 'Nature', description ='Our work to conserve biodiversity focuses on Key Biodiversity Areas.', image ='http://image.com/image.jpg',category=category.objects.create(name="nature"),location=Location.objects.create(name='Kenya'))
def test_instance(self):
self.assertTrue(isinstance(self.image,Image))
def test_save_method(self):
self.image.save_image()
images = Image.objects.all()
self.assertTrue(len(images) > 0)
def test_delete_image(self):
self.image.save_image()
self.image.delete_image()
images = Image.objects.all()
self.assertTrue(len(images) == 0)
def tearDown(self):
Image.objects.all().delete()
category.objects.all().delete()
class categoryTestCase(TestCase):
def setUp(self):
category.objects.create(name="Category_test")
def test_category_name(self):
Category = category.objects.get(name="Category_test")
self.assertEqual(Category.name, "Category_test")
def test_category_str(self):
Category = category.objects.get(name="Category_test")
self.assertEqual(str(Category), "Category_test")
| true
| true
|
1c48bb3e561f10b44690e00435e248a10f1ad318
| 2,592
|
py
|
Python
|
tests/test_priceranges.py
|
ahrenberg/marketxtradermodel
|
0907191dfe444da5e407cc9723c3485d278d2952
|
[
"Apache-2.0"
] | null | null | null |
tests/test_priceranges.py
|
ahrenberg/marketxtradermodel
|
0907191dfe444da5e407cc9723c3485d278d2952
|
[
"Apache-2.0"
] | 1
|
2017-12-14T10:18:52.000Z
|
2017-12-22T09:33:22.000Z
|
tests/test_priceranges.py
|
ahrenberg/marketxtradermodel
|
0907191dfe444da5e407cc9723c3485d278d2952
|
[
"Apache-2.0"
] | null | null | null |
"""
Test functions for priceranges.
"""
# Copyright 2017 Lukas Ahrenberg <lukas@ahrenberg.se>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import networkx as nx
import marketxtradermodel as mxtm
from marketxtradermodel.priceranges import *
import numpy as np
import math
def test_creation():
pr = PriceRanges()
# Calling without any prices shoudl raise an exception.
with pytest.raises(Exception):
pr.compute_price()
def test_insert_one():
pr = PriceRanges()
# Sell at 1, buy at -1
pr.insert(p_s = 1, p_b = -1)
# Finally, we should get the solution at zero.
assert(0 == pr.compute_price())
def test_insert_one_translated():
pr = PriceRanges()
# Sell at 4, buy at 2
pr.insert(p_s = 4, p_b = 2)
# Finally, we should get the solution at 3.
assert(3 == pr.compute_price())
def test_insert_one_flipped():
pr = PriceRanges()
# Sell at -4, buy at -2
pr.insert(p_s = -4, p_b = -2)
# Should find a solution at -3
assert(-3 == pr.compute_price())
def test_clear_prices():
pr = PriceRanges()
pr.insert(-1,1)
pr.clear_prices()
with pytest.raises(Exception):
pr.compute_price()
# Insert new prices and try.
pr.insert(-1,1)
assert(0 == pr.compute_price())
def test_insert_multiple():
# Comming up with a sequence of three buy/sell pairs so that
# there are two solutions.
# Call them a, b, and c, with associated buy and sell prices
# a_b, a_s, b_b, b_s, c_b, c_s.
# Letting a be 'inverted' with a_s < a_b and b and c 'normal'.
# Choosing values so that b_b < a_s < b_s < c_b < c_s < a_b
# should produce two zero-areas, in b_b < p < a_s and b_s < p < c_b.
b_b = 1
a_s = 2
b_s = 3
c_b = 4
c_s = 5
a_b = 6
pr = PriceRanges()
pr.insert(a_s,a_b)
pr.insert(b_s,b_b)
pr.insert(c_s,c_b)
# Now test.
# The two solutions.
s1 = (b_b + a_s)/2.0
s2 = (b_s + c_b)/2.0
# The default solution should be the one closest to zero.
assert(min(abs(s1),abs(s2)) == pr.compute_price())
| 29.454545
| 74
| 0.657407
|
import pytest
import networkx as nx
import marketxtradermodel as mxtm
from marketxtradermodel.priceranges import *
import numpy as np
import math
def test_creation():
pr = PriceRanges()
with pytest.raises(Exception):
pr.compute_price()
def test_insert_one():
pr = PriceRanges()
pr.insert(p_s = 1, p_b = -1)
assert(0 == pr.compute_price())
def test_insert_one_translated():
pr = PriceRanges()
pr.insert(p_s = 4, p_b = 2)
assert(3 == pr.compute_price())
def test_insert_one_flipped():
pr = PriceRanges()
pr.insert(p_s = -4, p_b = -2)
assert(-3 == pr.compute_price())
def test_clear_prices():
pr = PriceRanges()
pr.insert(-1,1)
pr.clear_prices()
with pytest.raises(Exception):
pr.compute_price()
pr.insert(-1,1)
assert(0 == pr.compute_price())
def test_insert_multiple():
b_b = 1
a_s = 2
b_s = 3
c_b = 4
c_s = 5
a_b = 6
pr = PriceRanges()
pr.insert(a_s,a_b)
pr.insert(b_s,b_b)
pr.insert(c_s,c_b)
s1 = (b_b + a_s)/2.0
s2 = (b_s + c_b)/2.0
assert(min(abs(s1),abs(s2)) == pr.compute_price())
| true
| true
|
1c48bb7cfd46f213761e5949e5e1a8bdda9040fe
| 2,441
|
py
|
Python
|
spec/unit/database_spec.py
|
sourcery-ai-bot/ipodio
|
e32ab2d1928a2b47500dd0ce0cbd17f71102dbe2
|
[
"BSD-3-Clause"
] | 9
|
2015-06-02T23:31:20.000Z
|
2021-05-17T17:26:32.000Z
|
spec/unit/database_spec.py
|
sourcery-ai-bot/ipodio
|
e32ab2d1928a2b47500dd0ce0cbd17f71102dbe2
|
[
"BSD-3-Clause"
] | null | null | null |
spec/unit/database_spec.py
|
sourcery-ai-bot/ipodio
|
e32ab2d1928a2b47500dd0ce0cbd17f71102dbe2
|
[
"BSD-3-Clause"
] | 3
|
2015-10-07T21:51:38.000Z
|
2021-01-23T12:22:58.000Z
|
#-*- coding: utf-8 -*-
from spec.unit.fixtures import Internal, patch_gpod_module
gpod = patch_gpod_module()
from ipodio.track import Track
from ipodio.database import Database
from expects import expect
from mamba import describe, context, before
with describe(Database) as _:
with context('when fabricated'):
def should_have_an_internal_database():
expect(_.fabricated.internal).to.be.an(_.internal_class)
with context('when constructed'):
def should_have_an_empty_index():
expect(_.database.index).to.be.empty
def should_be_marked_as_not_updated():
expect(_.database.updated).to.be.false
with context('when calling find_by_hash'):
def should_return_an_empty_collection():
expect(_.database.find_by_hash(_.hash)).to.be.empty
with context('when calling get'):
def should_return_None():
expect(_.database.get_by_hash(_.hash)).to.be.none
with context('when accessing tracks'):
def should_return_a_list_with_tracks():
expect(_.database.tracks).not_to.be.empty
with context('when updating index'):
def should_populate_index():
expect(_.database.index).not_to.be.empty
with context('when calling find_by_hash'):
def should_return_a_collection():
expect(_.database.find_by_hash(_.hash)).not_to.be.empty
with context('when calling get_by_hash'):
def should_return_a_Track():
expect(_.database.get_by_hash(_.hash)).to.be.a(Track)
def should_return_a_track_with_the_given_hash():
expect(_.database.get_by_hash(_.hash)).to.have.property('hash', _.hash)
with context('when accessing tracks'):
def should_be_a_collection():
expect(_.database.tracks).not_to.be.empty
@before.all
def fixture():
_.database.update_index()
with context('the playlists property'):
def should_be_a_list():
expect(_.database.playlists).to.be.a(list)
@before.all
def fixtures():
_.internal_class = Internal
_.hash = '204939024023840234'
_.internal_track = Internal({'userdata': {'mp3hash': _.hash}})
_.database = Database(Internal([_.internal_track]))
_.fabricated = Database.create('', internal_class=_.internal_class)
| 32.986486
| 87
| 0.645637
|
from spec.unit.fixtures import Internal, patch_gpod_module
gpod = patch_gpod_module()
from ipodio.track import Track
from ipodio.database import Database
from expects import expect
from mamba import describe, context, before
with describe(Database) as _:
with context('when fabricated'):
def should_have_an_internal_database():
expect(_.fabricated.internal).to.be.an(_.internal_class)
with context('when constructed'):
def should_have_an_empty_index():
expect(_.database.index).to.be.empty
def should_be_marked_as_not_updated():
expect(_.database.updated).to.be.false
with context('when calling find_by_hash'):
def should_return_an_empty_collection():
expect(_.database.find_by_hash(_.hash)).to.be.empty
with context('when calling get'):
def should_return_None():
expect(_.database.get_by_hash(_.hash)).to.be.none
with context('when accessing tracks'):
def should_return_a_list_with_tracks():
expect(_.database.tracks).not_to.be.empty
with context('when updating index'):
def should_populate_index():
expect(_.database.index).not_to.be.empty
with context('when calling find_by_hash'):
def should_return_a_collection():
expect(_.database.find_by_hash(_.hash)).not_to.be.empty
with context('when calling get_by_hash'):
def should_return_a_Track():
expect(_.database.get_by_hash(_.hash)).to.be.a(Track)
def should_return_a_track_with_the_given_hash():
expect(_.database.get_by_hash(_.hash)).to.have.property('hash', _.hash)
with context('when accessing tracks'):
def should_be_a_collection():
expect(_.database.tracks).not_to.be.empty
@before.all
def fixture():
_.database.update_index()
with context('the playlists property'):
def should_be_a_list():
expect(_.database.playlists).to.be.a(list)
@before.all
def fixtures():
_.internal_class = Internal
_.hash = '204939024023840234'
_.internal_track = Internal({'userdata': {'mp3hash': _.hash}})
_.database = Database(Internal([_.internal_track]))
_.fabricated = Database.create('', internal_class=_.internal_class)
| true
| true
|
1c48bb95496680dbac66b2e5ec105326dc33b0f6
| 380
|
py
|
Python
|
srcipts/requests/friends_check_get.py
|
GerasimovRM/Where-I-Am
|
58f6f0d1533421890f199dacabe523a447486b9f
|
[
"MIT"
] | null | null | null |
srcipts/requests/friends_check_get.py
|
GerasimovRM/Where-I-Am
|
58f6f0d1533421890f199dacabe523a447486b9f
|
[
"MIT"
] | null | null | null |
srcipts/requests/friends_check_get.py
|
GerasimovRM/Where-I-Am
|
58f6f0d1533421890f199dacabe523a447486b9f
|
[
"MIT"
] | null | null | null |
from requests import get, post
from pprint import pprint
from srcipts.requests.common import URL
tokens = post(f'{URL}/signin', json={'nickname': 'Roman',
'unhashed_password': 'сильныйпароль'}).json()
pprint(tokens)
headers = {'Authorization': f'Bearer {tokens["access_token"]}'}
pprint(get(f'{URL}/friends_check', headers=headers).json())
| 31.666667
| 82
| 0.665789
|
from requests import get, post
from pprint import pprint
from srcipts.requests.common import URL
tokens = post(f'{URL}/signin', json={'nickname': 'Roman',
'unhashed_password': 'сильныйпароль'}).json()
pprint(tokens)
headers = {'Authorization': f'Bearer {tokens["access_token"]}'}
pprint(get(f'{URL}/friends_check', headers=headers).json())
| true
| true
|
1c48bca115e0da7844c6b39dde5af63e4b379173
| 140
|
py
|
Python
|
mamba/mamba/__init__.py
|
wulmer/mamba
|
5961d76afdd8b0f070bf0f2da396ef25289c965c
|
[
"BSD-3-Clause"
] | 2,262
|
2020-09-08T07:46:35.000Z
|
2022-03-31T21:11:35.000Z
|
mamba/mamba/__init__.py
|
wulmer/mamba
|
5961d76afdd8b0f070bf0f2da396ef25289c965c
|
[
"BSD-3-Clause"
] | 841
|
2020-09-07T15:22:43.000Z
|
2022-03-31T18:18:43.000Z
|
mamba/mamba/__init__.py
|
wulmer/mamba
|
5961d76afdd8b0f070bf0f2da396ef25289c965c
|
[
"BSD-3-Clause"
] | 132
|
2020-09-10T03:05:45.000Z
|
2022-03-29T12:32:47.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
from ._version import __version__, version_info # noqa
| 35
| 82
| 0.842857
|
from __future__ import absolute_import, division, print_function, unicode_literals
from ._version import __version__, version_info
| true
| true
|
1c48bdf5afb88f7044b760c3718f3f56ec6148ee
| 3,807
|
py
|
Python
|
aiida/orm/convert.py
|
borellim/aiida_core
|
eebef392c81e8b130834a92e1d7abf5e2e30b3ce
|
[
"BSD-2-Clause"
] | 1
|
2019-03-15T10:37:53.000Z
|
2019-03-15T10:37:53.000Z
|
aiida/orm/convert.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/orm/convert.py
|
odarbelaeze/aiida_core
|
934b4ccdc73a993f2a6656caf516500470e3da08
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=cyclic-import,ungrouped-imports
"""Module for converting backend entities into frontend, ORM, entities"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Mapping
try: # Python3
from functools import singledispatch
except ImportError: # Python2
from singledispatch import singledispatch
try:
from collections.abc import Iterator, Sized # only works on python 3.3+
except ImportError:
from collections import Iterator, Sized
from aiida.orm.implementation import BackendComputer, BackendGroup, BackendUser, BackendAuthInfo, BackendComment, \
BackendLog, BackendNode
@singledispatch
def get_orm_entity(backend_entity):
raise TypeError("No corresponding AiiDA ORM class exists for backend instance {}".format(
backend_entity.__class__.__name__))
@get_orm_entity.register(Mapping)
def _(backend_entity):
return {key: get_orm_entity(value) for key, value in backend_entity.items()}
@get_orm_entity.register(BackendGroup)
def _(backend_entity):
from . import groups
return groups.Group.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendComputer)
def _(backend_entity):
from . import computers
return computers.Computer.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendUser)
def _(backend_entity):
from . import users
return users.User.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendAuthInfo)
def _(backend_entity):
from . import authinfos
return authinfos.AuthInfo.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendLog)
def _(backend_entity):
from . import logs
return logs.Log.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendComment)
def _(backend_entity):
from . import comments
return comments.Comment.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendNode)
def _(backend_entity):
from .utils.node import load_node_class
node_class = load_node_class(backend_entity.node_type)
return node_class.from_backend_entity(backend_entity)
class ConvertIterator(Iterator, Sized):
"""
Iterator that converts backend entities into frontend ORM entities as needed
See :func:`aiida.orm.Group.nodes` for an example.
"""
def __init__(self, backend_iterator):
super(ConvertIterator, self).__init__()
self._backend_iterator = backend_iterator
self.generator = self._genfunction()
def _genfunction(self):
for backend_node in self._backend_iterator:
yield get_orm_entity(backend_node)
def __iter__(self):
return self
def __len__(self):
return len(self._backend_iterator)
def __getitem__(self, value):
if isinstance(value, slice):
return [get_orm_entity(backend_node) for backend_node in self._backend_iterator[value]]
return get_orm_entity(self._backend_iterator[value])
# For future python-3 compatibility
def __next__(self):
return next(self.generator)
def next(self):
return next(self.generator)
| 31.725
| 115
| 0.695298
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Mapping
try: from functools import singledispatch
except ImportError: from singledispatch import singledispatch
try:
from collections.abc import Iterator, Sized except ImportError:
from collections import Iterator, Sized
from aiida.orm.implementation import BackendComputer, BackendGroup, BackendUser, BackendAuthInfo, BackendComment, \
BackendLog, BackendNode
@singledispatch
def get_orm_entity(backend_entity):
raise TypeError("No corresponding AiiDA ORM class exists for backend instance {}".format(
backend_entity.__class__.__name__))
@get_orm_entity.register(Mapping)
def _(backend_entity):
return {key: get_orm_entity(value) for key, value in backend_entity.items()}
@get_orm_entity.register(BackendGroup)
def _(backend_entity):
from . import groups
return groups.Group.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendComputer)
def _(backend_entity):
from . import computers
return computers.Computer.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendUser)
def _(backend_entity):
from . import users
return users.User.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendAuthInfo)
def _(backend_entity):
from . import authinfos
return authinfos.AuthInfo.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendLog)
def _(backend_entity):
from . import logs
return logs.Log.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendComment)
def _(backend_entity):
from . import comments
return comments.Comment.from_backend_entity(backend_entity)
@get_orm_entity.register(BackendNode)
def _(backend_entity):
from .utils.node import load_node_class
node_class = load_node_class(backend_entity.node_type)
return node_class.from_backend_entity(backend_entity)
class ConvertIterator(Iterator, Sized):
def __init__(self, backend_iterator):
super(ConvertIterator, self).__init__()
self._backend_iterator = backend_iterator
self.generator = self._genfunction()
def _genfunction(self):
for backend_node in self._backend_iterator:
yield get_orm_entity(backend_node)
def __iter__(self):
return self
def __len__(self):
return len(self._backend_iterator)
def __getitem__(self, value):
if isinstance(value, slice):
return [get_orm_entity(backend_node) for backend_node in self._backend_iterator[value]]
return get_orm_entity(self._backend_iterator[value])
def __next__(self):
return next(self.generator)
def next(self):
return next(self.generator)
| true
| true
|
1c48bfcb4049b286061ece2031b4d355497489ab
| 2,534
|
py
|
Python
|
src/DeePyMoD_SBL/deepymod_torch/network.py
|
GJBoth/DeePyMoD_torch
|
b4b90080f4f9fea8fdf4426e0708e807b193242f
|
[
"MIT"
] | 1
|
2021-11-06T18:02:18.000Z
|
2021-11-06T18:02:18.000Z
|
src/DeePyMoD_SBL/deepymod_torch/network.py
|
GJBoth/DeePyMoD_torch
|
b4b90080f4f9fea8fdf4426e0708e807b193242f
|
[
"MIT"
] | null | null | null |
src/DeePyMoD_SBL/deepymod_torch/network.py
|
GJBoth/DeePyMoD_torch
|
b4b90080f4f9fea8fdf4426e0708e807b193242f
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class Library(nn.Module):
def __init__(self, library_func, library_args={}):
super().__init__()
self.library_func = library_func
self.library_args = library_args
def forward(self, input):
time_deriv_list, theta = self.library_func(input, **self.library_args)
return time_deriv_list, theta
class Fitting(nn.Module):
def __init__(self, n_terms, n_out):
super().__init__()
self.coeff_vector = nn.ParameterList([torch.nn.Parameter(torch.rand((n_terms, 1), dtype=torch.float32)) for _ in torch.arange(n_out)])
self.sparsity_mask = [torch.ones(n_terms, dtype=torch.bool) for _ in torch.arange(n_out)]
def forward(self, input):
thetas, time_derivs = input
sparse_thetas = self.apply_mask(thetas)
self.coeff_vector = self.fit_coefficient(sparse_thetas, time_derivs)
return sparse_thetas, self.coeff_vector
def apply_mask(self, theta):
sparse_theta = [theta[:, sparsity_mask] for sparsity_mask in self.sparsity_mask]
return sparse_theta
def fit_coefficient(self, thetas, time_derivs):
return self.coeff_vector
class FittingDynamic(nn.Module):
def __init__(self, n_terms, n_out):
super().__init__()
self.coeff_vector = [torch.rand((n_terms, 1), dtype=torch.float32) for _ in torch.arange(n_out)] # initialize randomly cause otherwise tensorboard will complain
self.sparsity_mask = [torch.ones(n_terms, dtype=torch.bool) for _ in torch.arange(n_out)]
def forward(self, input):
thetas, time_derivs = input
sparse_thetas = self.apply_mask(thetas)
self.coeff_vector = self.fit_coefficient(sparse_thetas, time_derivs)
return sparse_thetas, self.coeff_vector
def apply_mask(self, theta):
sparse_theta = [theta[:, sparsity_mask] for sparsity_mask in self.sparsity_mask]
return sparse_theta
def fit_coefficient(self, thetas, time_derivs):
#opt_coeff = [torch.inverse(theta.T @ theta) @ (theta.T @ dt) for theta, dt in zip(thetas, time_derivs)] # normal equation for least squares
opt_coeff = []
for theta, dt in zip(thetas, time_derivs):
norm = torch.norm(theta, dim=0, keepdim=True)
Q, R = torch.qr(theta / norm)
opt_coeff.append(torch.inverse(R) @ Q.T @ dt / norm.T)
#U, S, V = torch.svd(R)
#print(torch.max(S) / torch.min(S))
return opt_coeff
| 39.59375
| 168
| 0.662589
|
import torch
import torch.nn as nn
class Library(nn.Module):
def __init__(self, library_func, library_args={}):
super().__init__()
self.library_func = library_func
self.library_args = library_args
def forward(self, input):
time_deriv_list, theta = self.library_func(input, **self.library_args)
return time_deriv_list, theta
class Fitting(nn.Module):
def __init__(self, n_terms, n_out):
super().__init__()
self.coeff_vector = nn.ParameterList([torch.nn.Parameter(torch.rand((n_terms, 1), dtype=torch.float32)) for _ in torch.arange(n_out)])
self.sparsity_mask = [torch.ones(n_terms, dtype=torch.bool) for _ in torch.arange(n_out)]
def forward(self, input):
thetas, time_derivs = input
sparse_thetas = self.apply_mask(thetas)
self.coeff_vector = self.fit_coefficient(sparse_thetas, time_derivs)
return sparse_thetas, self.coeff_vector
def apply_mask(self, theta):
sparse_theta = [theta[:, sparsity_mask] for sparsity_mask in self.sparsity_mask]
return sparse_theta
def fit_coefficient(self, thetas, time_derivs):
return self.coeff_vector
class FittingDynamic(nn.Module):
def __init__(self, n_terms, n_out):
super().__init__()
self.coeff_vector = [torch.rand((n_terms, 1), dtype=torch.float32) for _ in torch.arange(n_out)] self.sparsity_mask = [torch.ones(n_terms, dtype=torch.bool) for _ in torch.arange(n_out)]
def forward(self, input):
thetas, time_derivs = input
sparse_thetas = self.apply_mask(thetas)
self.coeff_vector = self.fit_coefficient(sparse_thetas, time_derivs)
return sparse_thetas, self.coeff_vector
def apply_mask(self, theta):
sparse_theta = [theta[:, sparsity_mask] for sparsity_mask in self.sparsity_mask]
return sparse_theta
def fit_coefficient(self, thetas, time_derivs):
opt_coeff = []
for theta, dt in zip(thetas, time_derivs):
norm = torch.norm(theta, dim=0, keepdim=True)
Q, R = torch.qr(theta / norm)
opt_coeff.append(torch.inverse(R) @ Q.T @ dt / norm.T)
return opt_coeff
| true
| true
|
1c48c02f74550a04bdeb6ad17a47a68d67a957c7
| 259
|
py
|
Python
|
whitehats/urls.py
|
stephanpoetschner/demo-whitehats
|
0bd8ccd75f37129ac3ad82949a6899aa7b706b90
|
[
"MIT"
] | null | null | null |
whitehats/urls.py
|
stephanpoetschner/demo-whitehats
|
0bd8ccd75f37129ac3ad82949a6899aa7b706b90
|
[
"MIT"
] | null | null | null |
whitehats/urls.py
|
stephanpoetschner/demo-whitehats
|
0bd8ccd75f37129ac3ad82949a6899aa7b706b90
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.conf.urls import include, url
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('landingpage.urls')),
url(r'^', include('signups.urls')),
url(r'^api/', include('api.urls')),
]
| 19.923077
| 43
| 0.633205
|
from django.contrib import admin
from django.conf.urls import include, url
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('landingpage.urls')),
url(r'^', include('signups.urls')),
url(r'^api/', include('api.urls')),
]
| true
| true
|
1c48c033d3e9037fadefc39f479136487a3dd057
| 575
|
py
|
Python
|
tbonlineproject/faq/urls.py
|
nathangeffen/tbonline3
|
1b8a3af8d2dc1ee8083ca6638d025e94bd98f253
|
[
"MIT"
] | null | null | null |
tbonlineproject/faq/urls.py
|
nathangeffen/tbonline3
|
1b8a3af8d2dc1ee8083ca6638d025e94bd98f253
|
[
"MIT"
] | 3
|
2021-06-08T23:57:13.000Z
|
2022-01-13T03:42:01.000Z
|
tbonlineproject/faq/urls.py
|
nathangeffen/tbonline-2
|
0d5869197e66a0057fa07cb99f21dde7f5b47c30
|
[
"MIT"
] | null | null | null |
from django.conf.urls.defaults import *
from django.views.generic import ListView, DetailView
from faq.models import QuestionCategory, QuestionAndAnswer
urlpatterns = patterns('faq.views',
url(r'^$', ListView.as_view(model=QuestionCategory,
context_object_name="questioncategory_list",),
name='list_faq'),
url(r'category/(?P<pk>\d+)/$', DetailView.as_view(model=QuestionCategory,
context_object_name="questioncategory",),
name='detail_faq_category'),
)
| 41.071429
| 77
| 0.627826
|
from django.conf.urls.defaults import *
from django.views.generic import ListView, DetailView
from faq.models import QuestionCategory, QuestionAndAnswer
urlpatterns = patterns('faq.views',
url(r'^$', ListView.as_view(model=QuestionCategory,
context_object_name="questioncategory_list",),
name='list_faq'),
url(r'category/(?P<pk>\d+)/$', DetailView.as_view(model=QuestionCategory,
context_object_name="questioncategory",),
name='detail_faq_category'),
)
| true
| true
|
1c48c096ccbd2a7376ec2748329779a91cf42457
| 1,250
|
py
|
Python
|
myapp/models/query.py
|
miguelgrinberg/circular-dependencies-webcast
|
741754b956787c88de8bba99d9257a58212b41e7
|
[
"MIT"
] | 19
|
2018-05-26T07:25:56.000Z
|
2021-06-05T07:45:22.000Z
|
myapp/models/query.py
|
miguelgrinberg/circular-dependencies-webcast
|
741754b956787c88de8bba99d9257a58212b41e7
|
[
"MIT"
] | null | null | null |
myapp/models/query.py
|
miguelgrinberg/circular-dependencies-webcast
|
741754b956787c88de8bba99d9257a58212b41e7
|
[
"MIT"
] | 2
|
2020-03-12T11:31:15.000Z
|
2020-08-10T16:07:58.000Z
|
from flask_sqlalchemy import BaseQuery
from myapp import db
class QueryWithSoftDelete(BaseQuery):
_with_deleted = False
def __new__(cls, *args, **kwargs):
obj = super(QueryWithSoftDelete, cls).__new__(cls)
obj._with_deleted = kwargs.pop('_with_deleted', False)
if len(args) > 0:
super(QueryWithSoftDelete, obj).__init__(*args, **kwargs)
return obj.filter_by(deleted=False) if not obj._with_deleted \
else obj
return obj
def __init__(self, *args, **kwargs):
pass
def with_deleted(self):
return self.__class__(db.class_mapper(self._mapper_zero().class_),
session=db.session(), _with_deleted=True)
def _get(self, *args, **kwargs):
# this calls the original query.get function from the base class
return super(QueryWithSoftDelete, self).get(*args, **kwargs)
def get(self, *args, **kwargs):
# the query.get method does not like it if there is a filter clause
# pre-loaded, so we need to implement it using a workaround
obj = self.with_deleted()._get(*args, **kwargs)
return obj if obj is None or self._with_deleted or not obj.deleted \
else None
| 36.764706
| 76
| 0.64
|
from flask_sqlalchemy import BaseQuery
from myapp import db
class QueryWithSoftDelete(BaseQuery):
_with_deleted = False
def __new__(cls, *args, **kwargs):
obj = super(QueryWithSoftDelete, cls).__new__(cls)
obj._with_deleted = kwargs.pop('_with_deleted', False)
if len(args) > 0:
super(QueryWithSoftDelete, obj).__init__(*args, **kwargs)
return obj.filter_by(deleted=False) if not obj._with_deleted \
else obj
return obj
def __init__(self, *args, **kwargs):
pass
def with_deleted(self):
return self.__class__(db.class_mapper(self._mapper_zero().class_),
session=db.session(), _with_deleted=True)
def _get(self, *args, **kwargs):
return super(QueryWithSoftDelete, self).get(*args, **kwargs)
def get(self, *args, **kwargs):
obj = self.with_deleted()._get(*args, **kwargs)
return obj if obj is None or self._with_deleted or not obj.deleted \
else None
| true
| true
|
1c48c0b17c2b24240920a52e3fd57992cd6b0a17
| 10,952
|
py
|
Python
|
src/hyde/driver/configuration/nwp/gfs/drv_configuration_time_gfs.py
|
c-hydro/hyde
|
3a3ff92d442077ce353b071d5afe726fc5465201
|
[
"MIT"
] | null | null | null |
src/hyde/driver/configuration/nwp/gfs/drv_configuration_time_gfs.py
|
c-hydro/hyde
|
3a3ff92d442077ce353b071d5afe726fc5465201
|
[
"MIT"
] | 18
|
2020-04-07T16:34:59.000Z
|
2021-07-02T07:32:39.000Z
|
src/hyde/driver/configuration/nwp/gfs/drv_configuration_time_gfs.py
|
c-hydro/fp-hyde
|
b0728397522aceebec3e7ff115aff160a10efede
|
[
"MIT"
] | null | null | null |
"""
Class Features
Name: drv_configuration_time_gfs
Author(s): Fabio Delogu (fabio.delogu@cimafoundation.org)
Date: '20200228'
Version: '1.0.0'
"""
#######################################################################################
# Library
import logging
import time
import pandas as pd
from src.hyde.algorithm.settings.nwp.gfs.lib_gfs_args import logger_name, time_format
# Log
log_stream = logging.getLogger(logger_name)
# Debug
# import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
class DataObject(dict):
pass
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Class Time
class DataTime:
# -------------------------------------------------------------------------------------
# Global Variable(s)
time_now = None
time_settings = None
time_run = None
time_from = None
time_to = None
time_frequency = None
time_period = None
time_rounding = None
time_steps = {}
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method class initialization
def __init__(self, time_arg=time.strftime(time_format, time.gmtime()),
time_settings=None,
time_now=None,
time_period_past=0, time_period_future=0, time_frequency='H',
time_rounding='H'):
# -------------------------------------------------------------------------------------
# Store information in global workspace
self.time_arg = time_arg
self.time_settings = time_settings
self.time_now = time_now
self.time_period_past = int(time_period_past)
self.time_period_future = int(time_period_future)
self.time_frequency = time_frequency
self.time_rounding = time_rounding
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to set times
def getDataTime(self, time_reverse=False):
# -------------------------------------------------------------------------------------
# Info start
log_stream.info(' ---> Configure time ... ')
# Get time now
self.time_now = self.__getTimeNow()
# Get time argument
self.time_arg = self.__getTimeArg()
# Set time run
self.time_run = self.__setTimeRun(self.time_now, self.time_arg)
# Round time to reference
self.time_run = self.__computeTimeRound(self.time_rounding)
# Get initial time step (taking care restart time condition)
self.time_from = self.__getTimeFrom(self.time_run,
time_period=self.time_period_past,
time_frequency=self.time_frequency)
# Get ending time step
self.time_to = self.__getTimeTo(self.time_run,
time_period=self.time_period_future,
time_frequency=self.time_frequency)
# Compute period time steps
self.time_steps = self.__computeTimePeriod(self.time_from, self.time_to,
time_frequency=self.time_frequency,
time_reverse=time_reverse)
# Info end
log_stream.info(' ---> Configure time ... OK')
return DataObject(self.__dict__)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to round time to reference
def __computeTimeRound(self, time_rounding):
log_stream.info(' ----> Round time run ... ')
time_round = self.time_run.round(time_rounding)
if time_round > self.time_run:
time_round = pd.date_range(end=time_round, periods=2, freq=time_rounding)[0]
log_stream.info(' -----> Algorithm time run: [' + time_round.strftime(time_format) + ']')
log_stream.info(' ----> Round time run ... DONE')
return time_round
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get time now
def __getTimeNow(self):
log_stream.info(' ----> Configure time now ... ')
try:
if self.time_now is None:
log_stream.info(' -----> Time now is not set. Time will be taken using time library.')
self.time_now = time.strftime(time_format, time.gmtime())
else:
log_stream.info(' -----> Time argument is set using script configuration file')
if pd.to_datetime(self.time_now, format=time_format, errors='coerce'):
log_stream.warning(' ===> Mismatch in input time now format. '
'Expected format is: ' + time_format + '. Try to recover using automatic parser.')
time_now = pd.to_datetime(self.time_now)
else:
time_now = pd.to_datetime(self.time_now, format=time_format)
time_now = time_now.floor('min')
time_now = time_now.replace(minute=0)
self.time_now = time_now.strftime(time_format)
log_stream.info(' ----> Configure time now ... DONE [' + self.time_now + ']')
except BaseException:
log_stream.error(' -----> Time now definition failed! Check your data and settings!')
raise BaseException('Error in time now definition!')
return time_now
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get time set in argument(s)
def __getTimeArg(self):
log_stream.info(' ----> Configure time argument ... ')
try:
if self.time_arg is None:
if self.time_settings is not None:
self.time_arg = self.time_settings
log_stream.info(' -----> Time argument is not set. Time will be taken using time in settings file.')
else:
log_stream.info(' -----> Time argument is not set. Time will be taken using time library.')
self.time_arg = time.strftime(time_format, time.gmtime())
else:
log_stream.info(' -----> Time argument is set using script arg(s)')
if pd.to_datetime(self.time_now, format=time_format, errors='coerce'):
log_stream.warning(' ===> Mismatch in input time argument format. '
'Expected format is: ' + time_format + '. Try to recover using automatic parser.')
time_arg = pd.to_datetime(self.time_arg)
else:
time_arg = pd.to_datetime(self.time_arg, format=time_format)
time_arg = time_arg.floor('min')
time_arg = time_arg.replace(minute=0)
self.time_arg = time_arg.strftime(time_format)
log_stream.info(' ----> Configure time argument ... DONE [' + self.time_arg + ']')
except BaseException:
log_stream.error(' -----> Time now definition failed! Check your data and settings!')
raise BaseException('Error in time now definition!')
return time_arg
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to set time run
@staticmethod
def __setTimeRun(time_now, time_arg):
log_stream.info(' ----> Set time run ... ')
if time_arg is not None:
log_stream.info(' -----> Time argument is used as time run [' + time_arg.strftime(time_format) + ']')
log_stream.info(' ----> Set time run ... DONE')
return time_arg
else:
log_stream.info(' -----> Time now is used as time run [' + time_now.strftime(time_format) + ']')
log_stream.info(' ----> Set time run ... DONE')
return time_now
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define time restart
def __parserTimeFrm(self):
pass
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define time restart
@staticmethod
def __getTimeFrom(time_run, time_period=0, time_frequency='H'):
if time_period == 0:
time_from = time_run
else:
time_period = time_period + 1
time_from = pd.date_range(end=time_run, periods=time_period, freq=time_frequency)[0]
return time_from
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get time to
@staticmethod
def __getTimeTo(time_run, time_period=0, time_frequency='H'):
if time_period == 0:
time_to = time_run
else:
time_period = time_period + 1
time_to = pd.date_range(start=time_run, periods=time_period, freq=time_frequency)[-1]
return time_to
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute period time steps
@staticmethod
def __computeTimePeriod(time_from, time_to, time_frequency='H', time_reverse=False):
time_range = pd.date_range(time_from, time_to, freq=time_frequency)
time_range = time_range.floor(time_frequency)
if time_reverse:
time_range = time_range.sort_values(return_indexer=False, ascending=False)
return time_range
# -------------------------------------------------------------------------------------
| 41.172932
| 120
| 0.442111
|
import logging
import time
import pandas as pd
from src.hyde.algorithm.settings.nwp.gfs.lib_gfs_args import logger_name, time_format
log_stream = logging.getLogger(logger_name)
class DataObject(dict):
pass
class DataTime:
time_now = None
time_settings = None
time_run = None
time_from = None
time_to = None
time_frequency = None
time_period = None
time_rounding = None
time_steps = {}
def __init__(self, time_arg=time.strftime(time_format, time.gmtime()),
time_settings=None,
time_now=None,
time_period_past=0, time_period_future=0, time_frequency='H',
time_rounding='H'):
self.time_arg = time_arg
self.time_settings = time_settings
self.time_now = time_now
self.time_period_past = int(time_period_past)
self.time_period_future = int(time_period_future)
self.time_frequency = time_frequency
self.time_rounding = time_rounding
def getDataTime(self, time_reverse=False):
log_stream.info(' ---> Configure time ... ')
self.time_now = self.__getTimeNow()
self.time_arg = self.__getTimeArg()
self.time_run = self.__setTimeRun(self.time_now, self.time_arg)
self.time_run = self.__computeTimeRound(self.time_rounding)
self.time_from = self.__getTimeFrom(self.time_run,
time_period=self.time_period_past,
time_frequency=self.time_frequency)
self.time_to = self.__getTimeTo(self.time_run,
time_period=self.time_period_future,
time_frequency=self.time_frequency)
self.time_steps = self.__computeTimePeriod(self.time_from, self.time_to,
time_frequency=self.time_frequency,
time_reverse=time_reverse)
log_stream.info(' ---> Configure time ... OK')
return DataObject(self.__dict__)
def __computeTimeRound(self, time_rounding):
log_stream.info(' ----> Round time run ... ')
time_round = self.time_run.round(time_rounding)
if time_round > self.time_run:
time_round = pd.date_range(end=time_round, periods=2, freq=time_rounding)[0]
log_stream.info(' -----> Algorithm time run: [' + time_round.strftime(time_format) + ']')
log_stream.info(' ----> Round time run ... DONE')
return time_round
def __getTimeNow(self):
log_stream.info(' ----> Configure time now ... ')
try:
if self.time_now is None:
log_stream.info(' -----> Time now is not set. Time will be taken using time library.')
self.time_now = time.strftime(time_format, time.gmtime())
else:
log_stream.info(' -----> Time argument is set using script configuration file')
if pd.to_datetime(self.time_now, format=time_format, errors='coerce'):
log_stream.warning(' ===> Mismatch in input time now format. '
'Expected format is: ' + time_format + '. Try to recover using automatic parser.')
time_now = pd.to_datetime(self.time_now)
else:
time_now = pd.to_datetime(self.time_now, format=time_format)
time_now = time_now.floor('min')
time_now = time_now.replace(minute=0)
self.time_now = time_now.strftime(time_format)
log_stream.info(' ----> Configure time now ... DONE [' + self.time_now + ']')
except BaseException:
log_stream.error(' -----> Time now definition failed! Check your data and settings!')
raise BaseException('Error in time now definition!')
return time_now
def __getTimeArg(self):
log_stream.info(' ----> Configure time argument ... ')
try:
if self.time_arg is None:
if self.time_settings is not None:
self.time_arg = self.time_settings
log_stream.info(' -----> Time argument is not set. Time will be taken using time in settings file.')
else:
log_stream.info(' -----> Time argument is not set. Time will be taken using time library.')
self.time_arg = time.strftime(time_format, time.gmtime())
else:
log_stream.info(' -----> Time argument is set using script arg(s)')
if pd.to_datetime(self.time_now, format=time_format, errors='coerce'):
log_stream.warning(' ===> Mismatch in input time argument format. '
'Expected format is: ' + time_format + '. Try to recover using automatic parser.')
time_arg = pd.to_datetime(self.time_arg)
else:
time_arg = pd.to_datetime(self.time_arg, format=time_format)
time_arg = time_arg.floor('min')
time_arg = time_arg.replace(minute=0)
self.time_arg = time_arg.strftime(time_format)
log_stream.info(' ----> Configure time argument ... DONE [' + self.time_arg + ']')
except BaseException:
log_stream.error(' -----> Time now definition failed! Check your data and settings!')
raise BaseException('Error in time now definition!')
return time_arg
@staticmethod
def __setTimeRun(time_now, time_arg):
log_stream.info(' ----> Set time run ... ')
if time_arg is not None:
log_stream.info(' -----> Time argument is used as time run [' + time_arg.strftime(time_format) + ']')
log_stream.info(' ----> Set time run ... DONE')
return time_arg
else:
log_stream.info(' -----> Time now is used as time run [' + time_now.strftime(time_format) + ']')
log_stream.info(' ----> Set time run ... DONE')
return time_now
def __parserTimeFrm(self):
pass
@staticmethod
def __getTimeFrom(time_run, time_period=0, time_frequency='H'):
if time_period == 0:
time_from = time_run
else:
time_period = time_period + 1
time_from = pd.date_range(end=time_run, periods=time_period, freq=time_frequency)[0]
return time_from
@staticmethod
def __getTimeTo(time_run, time_period=0, time_frequency='H'):
if time_period == 0:
time_to = time_run
else:
time_period = time_period + 1
time_to = pd.date_range(start=time_run, periods=time_period, freq=time_frequency)[-1]
return time_to
@staticmethod
def __computeTimePeriod(time_from, time_to, time_frequency='H', time_reverse=False):
time_range = pd.date_range(time_from, time_to, freq=time_frequency)
time_range = time_range.floor(time_frequency)
if time_reverse:
time_range = time_range.sort_values(return_indexer=False, ascending=False)
return time_range
| true
| true
|
1c48c0fb0a50d5dc326448a85db2b46c3fbabf2a
| 439
|
py
|
Python
|
test/jsontesturls.py
|
JohnJorgensen19/json-rpc
|
ab96aa8654e4ddcc968cfefa1a27fd10459045dc
|
[
"MIT"
] | 165
|
2015-01-04T15:00:45.000Z
|
2022-03-12T11:36:41.000Z
|
test/jsontesturls.py
|
JohnJorgensen19/json-rpc
|
ab96aa8654e4ddcc968cfefa1a27fd10459045dc
|
[
"MIT"
] | 30
|
2015-03-02T21:49:56.000Z
|
2021-07-15T11:56:23.000Z
|
test/jsontesturls.py
|
JohnJorgensen19/json-rpc
|
ab96aa8654e4ddcc968cfefa1a27fd10459045dc
|
[
"MIT"
] | 47
|
2015-01-24T17:50:57.000Z
|
2022-03-30T09:40:22.000Z
|
try:
from django.conf.urls import patterns, url
except ImportError:
# Compatibility with Django <= 1.3
from django.conf.urls.defaults import patterns, url
from jsonrpc.site import jsonrpc_site
urlpatterns = patterns('',
url(r'^json/browse/$', 'jsonrpc.views.browse', name='jsonrpc_browser'),
url(r'^json/$', jsonrpc_site.dispatch, name='jsonrpc_mountpoint'),
(r'^json/(?P<method>[a-zA-Z0-9.-_]+)$', jsonrpc_site.dispatch),
)
| 31.357143
| 73
| 0.71754
|
try:
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
from jsonrpc.site import jsonrpc_site
urlpatterns = patterns('',
url(r'^json/browse/$', 'jsonrpc.views.browse', name='jsonrpc_browser'),
url(r'^json/$', jsonrpc_site.dispatch, name='jsonrpc_mountpoint'),
(r'^json/(?P<method>[a-zA-Z0-9.-_]+)$', jsonrpc_site.dispatch),
)
| true
| true
|
1c48c26137216ab70c99b8c6f1171a552774911f
| 2,661
|
py
|
Python
|
tests/components/recorder/test_migrate.py
|
atemon/home-assistant
|
dbd0763f83d0857fceb00e2c973a4ec91663ddcf
|
[
"Apache-2.0"
] | 1
|
2018-08-25T06:08:21.000Z
|
2018-08-25T06:08:21.000Z
|
tests/components/recorder/test_migrate.py
|
atemon/home-assistant
|
dbd0763f83d0857fceb00e2c973a4ec91663ddcf
|
[
"Apache-2.0"
] | 2
|
2018-08-25T06:13:22.000Z
|
2018-08-25T07:00:54.000Z
|
tests/components/recorder/test_migrate.py
|
sara0871/desktop
|
e1b2e00cf67452828c021e3d73c76e00b72bd3ad
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for the Recorder component."""
# pylint: disable=protected-access
import asyncio
from unittest.mock import patch, call
import pytest
from sqlalchemy import create_engine
from sqlalchemy.pool import StaticPool
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.recorder import (
wait_connection_ready, migration, const, models)
from tests.components.recorder import models_original
def create_engine_test(*args, **kwargs):
"""Test version of create_engine that initializes with old schema.
This simulates an existing db with the old schema.
"""
engine = create_engine(*args, **kwargs)
models_original.Base.metadata.create_all(engine)
return engine
@asyncio.coroutine
def test_schema_update_calls(hass):
"""Test that schema migrations occur in correct order."""
with patch('sqlalchemy.create_engine', new=create_engine_test), \
patch('homeassistant.components.recorder.migration._apply_update') as \
update:
yield from async_setup_component(hass, 'recorder', {
'recorder': {
'db_url': 'sqlite://'
}
})
yield from wait_connection_ready(hass)
update.assert_has_calls([
call(hass.data[const.DATA_INSTANCE].engine, version+1, 0) for version
in range(0, models.SCHEMA_VERSION)])
@asyncio.coroutine
def test_schema_migrate(hass):
"""Test the full schema migration logic.
We're just testing that the logic can execute successfully here without
throwing exceptions. Maintaining a set of assertions based on schema
inspection could quickly become quite cumbersome.
"""
with patch('sqlalchemy.create_engine', new=create_engine_test), \
patch('homeassistant.components.recorder.Recorder._setup_run') as \
setup_run:
yield from async_setup_component(hass, 'recorder', {
'recorder': {
'db_url': 'sqlite://'
}
})
yield from wait_connection_ready(hass)
assert setup_run.called
def test_invalid_update():
"""Test that an invalid new version raises an exception."""
with pytest.raises(ValueError):
migration._apply_update(None, -1, 0)
def test_forgiving_add_column():
"""Test that add column will continue if column exists."""
engine = create_engine(
'sqlite://',
poolclass=StaticPool
)
engine.execute('CREATE TABLE hello (id int)')
migration._add_columns(engine, 'hello', [
'context_id CHARACTER(36)',
])
migration._add_columns(engine, 'hello', [
'context_id CHARACTER(36)',
])
| 32.060241
| 79
| 0.687336
|
import asyncio
from unittest.mock import patch, call
import pytest
from sqlalchemy import create_engine
from sqlalchemy.pool import StaticPool
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.recorder import (
wait_connection_ready, migration, const, models)
from tests.components.recorder import models_original
def create_engine_test(*args, **kwargs):
engine = create_engine(*args, **kwargs)
models_original.Base.metadata.create_all(engine)
return engine
@asyncio.coroutine
def test_schema_update_calls(hass):
with patch('sqlalchemy.create_engine', new=create_engine_test), \
patch('homeassistant.components.recorder.migration._apply_update') as \
update:
yield from async_setup_component(hass, 'recorder', {
'recorder': {
'db_url': 'sqlite://'
}
})
yield from wait_connection_ready(hass)
update.assert_has_calls([
call(hass.data[const.DATA_INSTANCE].engine, version+1, 0) for version
in range(0, models.SCHEMA_VERSION)])
@asyncio.coroutine
def test_schema_migrate(hass):
with patch('sqlalchemy.create_engine', new=create_engine_test), \
patch('homeassistant.components.recorder.Recorder._setup_run') as \
setup_run:
yield from async_setup_component(hass, 'recorder', {
'recorder': {
'db_url': 'sqlite://'
}
})
yield from wait_connection_ready(hass)
assert setup_run.called
def test_invalid_update():
with pytest.raises(ValueError):
migration._apply_update(None, -1, 0)
def test_forgiving_add_column():
engine = create_engine(
'sqlite://',
poolclass=StaticPool
)
engine.execute('CREATE TABLE hello (id int)')
migration._add_columns(engine, 'hello', [
'context_id CHARACTER(36)',
])
migration._add_columns(engine, 'hello', [
'context_id CHARACTER(36)',
])
| true
| true
|
1c48c281c263bc634f99ad847437ff2fe1b8daa5
| 31,849
|
py
|
Python
|
mne/decoding/transformer.py
|
dokato/mne-python
|
a188859b57044fa158af05852bcce2870fabde91
|
[
"BSD-3-Clause"
] | null | null | null |
mne/decoding/transformer.py
|
dokato/mne-python
|
a188859b57044fa158af05852bcce2870fabde91
|
[
"BSD-3-Clause"
] | null | null | null |
mne/decoding/transformer.py
|
dokato/mne-python
|
a188859b57044fa158af05852bcce2870fabde91
|
[
"BSD-3-Clause"
] | 1
|
2021-04-12T12:45:31.000Z
|
2021-04-12T12:45:31.000Z
|
# -*- coding: utf-8 -*-
# Authors: Mainak Jas <mainak@neuro.hut.fi>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Romain Trachel <trachelr@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from .mixin import TransformerMixin
from .base import BaseEstimator
from .. import pick_types
from ..filter import filter_data, _triage_filter_params
from ..time_frequency.psd import psd_array_multitaper
from ..externals.six import string_types
from ..utils import _check_type_picks, check_version
from ..io.pick import pick_info, _pick_data_channels, _picks_by_type
from ..cov import _check_scalings_user
class _ConstantScaler():
"""Scale channel types using constant values."""
def __init__(self, info, scalings, do_scaling=True):
self._scalings = scalings
self._info = info
self._do_scaling = do_scaling
def fit(self, X, y=None):
scalings = _check_scalings_user(self._scalings)
picks_by_type = _picks_by_type(pick_info(
self._info, _pick_data_channels(self._info, exclude=())))
std = np.ones(sum(len(p[1]) for p in picks_by_type))
if X.shape[1] != len(std):
raise ValueError('info had %d data channels but X has %d channels'
% (len(std), len(X)))
if self._do_scaling: # this is silly, but necessary for completeness
for kind, picks in picks_by_type:
std[picks] = 1. / scalings[kind]
self.std_ = std
self.mean_ = np.zeros_like(std)
return self
def transform(self, X):
return X / self.std_
def inverse_transform(self, X, y=None):
return X * self.std_
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs):
"""Reshape epochs and apply function."""
if not isinstance(X, np.ndarray):
raise ValueError("data should be an np.ndarray, got %s." % type(X))
X = np.atleast_3d(X)
orig_shape = X.shape
X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1]))
X = func(X, *args, **kwargs)
if return_result:
X.shape = (orig_shape[0], orig_shape[2], orig_shape[1])
X = X.transpose(0, 2, 1)
return X
class Scaler(TransformerMixin, BaseEstimator):
u"""Standardize channel data.
This class scales data for each channel. It differs from scikit-learn
classes (e.g., :class:`sklearn.preprocessing.StandardScaler`) in that
it scales each *channel* by estimating μ and σ using data from all
time points and epochs, as opposed to standardizing each *feature*
(i.e., each time point for each channel) by estimating using μ and σ
using data from all epochs.
Parameters
----------
info : instance of Info | None
The measurement info. Only necessary if ``scalings`` is a dict or
None.
scalings : dict, string, defaults to None.
Scaling method to be applied to data channel wise.
* if scalings is None (default), scales mag by 1e15, grad by 1e13,
and eeg by 1e6.
* if scalings is :class:`dict`, keys are channel types and values
are scale factors.
* if ``scalings=='median'``,
:class:`sklearn.preprocessing.RobustScaler`
is used (requires sklearn version 0.17+).
* if ``scalings=='mean'``,
:class:`sklearn.preprocessing.StandardScaler`
is used.
with_mean : boolean, True by default
If True, center the data using mean (or median) before scaling.
Ignored for channel-type scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (``scalings='mean'``),
quantile range (``scalings='median``), or using channel type
if ``scalings`` is a dict or None).
"""
def __init__(self, info=None, scalings=None, with_mean=True,
with_std=True): # noqa: D102
self.info = info
self.with_mean = with_mean
self.with_std = with_std
self.scalings = scalings
if not (scalings is None or isinstance(scalings, (dict, str))):
raise ValueError('scalings type should be dict, str, or None, '
'got %s' % type(scalings))
if isinstance(scalings, string_types) and \
scalings not in ('mean', 'median'):
raise ValueError('Invalid method for scaling, must be "mean" or '
'"median" but got %s' % scalings)
if scalings is None or isinstance(scalings, dict):
self._scaler = _ConstantScaler(info, scalings, self.with_std)
elif scalings == 'mean':
from sklearn.preprocessing import StandardScaler
self._scaler = StandardScaler(self.with_mean, self.with_std)
else: # scalings == 'median':
if not check_version('sklearn', '0.17'):
raise ValueError("median requires version 0.17 of "
"sklearn library")
from sklearn.preprocessing import RobustScaler
self._scaler = RobustScaler(self.with_mean, self.with_std)
def fit(self, epochs_data, y=None):
"""Standardize data across channels.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data to concatenate channels.
y : array, shape (n_epochs,)
The label for each epoch.
Returns
-------
self : instance of Scaler
Returns the modified instance.
"""
_sklearn_reshape_apply(self._scaler.fit, False, epochs_data, y=y)
return self
def transform(self, epochs_data):
"""Standardize data across channels.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data concatenated over channels.
Notes
-----
This function makes a copy of the data before the operations and the
memory usage may be large with big data.
"""
return _sklearn_reshape_apply(self._scaler.transform, True,
epochs_data)
def fit_transform(self, epochs_data, y=None):
"""Fit to data, then transform it.
Fits transformer to epochs_data and y and returns a transformed version
of epochs_data.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None | array, shape (n_epochs,)
The label for each epoch.
Defaults to None.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data concatenated over channels.
Notes
-----
This function makes a copy of the data before the operations and the
memory usage may be large with big data.
"""
return self.fit(epochs_data, y).transform(epochs_data)
def inverse_transform(self, epochs_data):
"""Invert standardization of data across channels.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data concatenated over channels.
Notes
-----
This function makes a copy of the data before the operations and the
memory usage may be large with big data.
"""
return _sklearn_reshape_apply(self._scaler.inverse_transform, True,
epochs_data)
class Vectorizer(TransformerMixin):
"""Transform n-dimensional array into 2D array of n_samples by n_features.
This class reshapes an n-dimensional array into an n_samples * n_features
array, usable by the estimators and transformers of scikit-learn.
Examples
--------
clf = make_pipeline(SpatialFilter(), _XdawnTransformer(), Vectorizer(),
LogisticRegression())
Attributes
----------
``features_shape_`` : tuple
Stores the original shape of data.
"""
def fit(self, X, y=None):
"""Store the shape of the features of X.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array of at
least 2d. The first dimension must be of length n_samples, where
samples are the independent samples used by the estimator
(e.g. n_epochs for epoched data).
y : None | array, shape (n_samples,)
Used for scikit-learn compatibility.
Returns
-------
self : Instance of Vectorizer
Return the modified instance.
"""
X = np.asarray(X)
self.features_shape_ = X.shape[1:]
return self
def transform(self, X):
"""Convert given array into two dimensions.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array of at
least 2d. The first dimension must be of length n_samples, where
samples are the independent samples used by the estimator
(e.g. n_epochs for epoched data).
Returns
-------
X : array, shape (n_samples, n_features)
The transformed data.
"""
X = np.asarray(X)
if X.shape[1:] != self.features_shape_:
raise ValueError("Shape of X used in fit and transform must be "
"same")
return X.reshape(len(X), -1)
def fit_transform(self, X, y=None):
"""Fit the data, then transform in one step.
Parameters
----------
X : array-like
The data to fit. Can be, for example a list, or an array of at
least 2d. The first dimension must be of length n_samples, where
samples are the independent samples used by the estimator
(e.g. n_epochs for epoched data).
y : None | array, shape (n_samples,)
Used for scikit-learn compatibility.
Returns
-------
X : array, shape (n_samples, -1)
The transformed data.
"""
return self.fit(X).transform(X)
def inverse_transform(self, X):
"""Transform 2D data back to its original feature shape.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to be transformed back to original shape.
Returns
-------
X : array
The data transformed into shape as used in fit. The first
dimension is of length n_samples.
"""
X = np.asarray(X)
if X.ndim != 2:
raise ValueError("X should be of 2 dimensions but given has %s "
"dimension(s)" % X.ndim)
return X.reshape((len(X),) + self.features_shape_)
class PSDEstimator(TransformerMixin):
"""Compute power spectrum density (PSD) using a multi-taper method.
Parameters
----------
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
n_jobs : int
Number of parallel jobs to use (only used if adaptive=True).
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
See Also
--------
mne.time_frequency.psd_multitaper
"""
def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
adaptive=False, low_bias=True, n_jobs=1,
normalization='length', verbose=None): # noqa: D102
self.sfreq = sfreq
self.fmin = fmin
self.fmax = fmax
self.bandwidth = bandwidth
self.adaptive = adaptive
self.low_bias = low_bias
self.n_jobs = n_jobs
self.verbose = verbose
self.normalization = normalization
def fit(self, epochs_data, y):
"""Compute power spectrum density (PSD) using a multi-taper method.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : array, shape (n_epochs,)
The label for each epoch
Returns
-------
self : instance of PSDEstimator
returns the modified instance
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
return self
def transform(self, epochs_data):
"""Compute power spectrum density (PSD) using a multi-taper method.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data
Returns
-------
psd : array, shape (n_signals, len(freqs)) or (len(freqs),)
The computed PSD.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
psd, _ = psd_array_multitaper(
epochs_data, sfreq=self.sfreq, fmin=self.fmin, fmax=self.fmax,
bandwidth=self.bandwidth, adaptive=self.adaptive,
low_bias=self.low_bias, normalization=self.normalization,
n_jobs=self.n_jobs)
return psd
class FilterEstimator(TransformerMixin):
"""Estimator to filter RtEpochs.
Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
filter to the channels selected by "picks".
l_freq and h_freq are the frequencies below which and above which,
respectively, to filter out of the data. Thus the uses are:
- l_freq < h_freq: band-pass filter
- l_freq > h_freq: band-stop filter
- l_freq is not None, h_freq is None: low-pass filter
- l_freq is None, h_freq is not None: high-pass filter
If n_jobs > 1, more memory is required as "len(picks) * n_times"
additional time points need to be temporarily stored in memory.
Parameters
----------
info : instance of Info
Measurement info.
l_freq : float | None
Low cut-off frequency in Hz. If None the data are only low-passed.
h_freq : float | None
High cut-off frequency in Hz. If None the data are only
high-passed.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str (Default: '10s') | int | None
Length of the filter to use. If None or "len(x) < filter_length",
the filter length used is len(x). Otherwise, if int, overlap-add
filtering with a filter of the specified length in samples) is
used (faster for long signals). If str, a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
to the shortest power-of-two length at least that duration.
l_trans_bandwidth : float
Width of the transition band at the low cut-off frequency in Hz.
h_trans_bandwidth : float
Width of the transition band at the high cut-off frequency in Hz.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fft'.
method : str
'fft' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt).
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
fir_design : str
Can be "firwin" (default in 0.16) to use
:func:`scipy.signal.firwin`, or "firwin2" (default in 0.15 and
before) to use :func:`scipy.signal.firwin2`. "firwin" uses a
time-domain design technique that generally gives improved
attenuation using fewer samples than "firwin2".
..versionadded:: 0.15
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more). Defaults to
self.verbose.
See Also
--------
TemporalFilter
"""
def __init__(self, info, l_freq, h_freq, picks=None, filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1,
method='fft', iir_params=None, fir_design='firwin',
verbose=None): # noqa: D102
self.info = info
self.l_freq = l_freq
self.h_freq = h_freq
self.picks = _check_type_picks(picks)
self.filter_length = filter_length
self.l_trans_bandwidth = l_trans_bandwidth
self.h_trans_bandwidth = h_trans_bandwidth
self.n_jobs = n_jobs
self.method = method
self.iir_params = iir_params
self.fir_design = fir_design
def fit(self, epochs_data, y):
"""Filter data.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : array, shape (n_epochs,)
The label for each epoch.
Returns
-------
self : instance of FilterEstimator
Returns the modified instance
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
if self.picks is None:
self.picks = pick_types(self.info, meg=True, eeg=True,
ref_meg=False, exclude=[])
if self.l_freq == 0:
self.l_freq = None
if self.h_freq is not None and self.h_freq > (self.info['sfreq'] / 2.):
self.h_freq = None
if self.l_freq is not None and not isinstance(self.l_freq, float):
self.l_freq = float(self.l_freq)
if self.h_freq is not None and not isinstance(self.h_freq, float):
self.h_freq = float(self.h_freq)
if self.info['lowpass'] is None or (self.h_freq is not None and
(self.l_freq is None or
self.l_freq < self.h_freq) and
self.h_freq <
self.info['lowpass']):
self.info['lowpass'] = self.h_freq
if self.info['highpass'] is None or (self.l_freq is not None and
(self.h_freq is None or
self.l_freq < self.h_freq) and
self.l_freq >
self.info['highpass']):
self.info['highpass'] = self.l_freq
return self
def transform(self, epochs_data):
"""Filter data.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data after filtering
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
return filter_data(
epochs_data, self.info['sfreq'], self.l_freq, self.h_freq,
self.picks, self.filter_length, self.l_trans_bandwidth,
self.h_trans_bandwidth, method=self.method,
iir_params=self.iir_params, n_jobs=self.n_jobs, copy=False,
fir_design=self.fir_design, verbose=False)
class UnsupervisedSpatialFilter(TransformerMixin, BaseEstimator):
"""Use unsupervised spatial filtering across time and samples.
Parameters
----------
estimator : scikit-learn estimator
Estimator using some decomposition algorithm.
average : bool, defaults to False
If True, the estimator is fitted on the average across samples
(e.g. epochs).
"""
def __init__(self, estimator, average=False): # noqa: D102
# XXX: Use _check_estimator #3381
for attr in ('fit', 'transform', 'fit_transform'):
if not hasattr(estimator, attr):
raise ValueError('estimator must be a scikit-learn '
'transformer, missing %s method' % attr)
if not isinstance(average, bool):
raise ValueError("average parameter must be of bool type, got "
"%s instead" % type(bool))
self.estimator = estimator
self.average = average
def fit(self, X, y=None):
"""Fit the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data to be filtered.
y : None | array, shape (n_samples,)
Used for scikit-learn compatibility.
Returns
-------
self : Instance of UnsupervisedSpatialFilter
Return the modified instance.
"""
if self.average:
X = np.mean(X, axis=0).T
else:
n_epochs, n_channels, n_times = X.shape
# trial as time samples
X = np.transpose(X, (1, 0, 2)).reshape((n_channels, n_epochs *
n_times)).T
self.estimator.fit(X)
return self
def fit_transform(self, X, y=None):
"""Transform the data to its filtered components after fitting.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data to be filtered.
y : None | array, shape (n_samples,)
Used for scikit-learn compatibility.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The transformed data.
"""
return self.fit(X).transform(X)
def transform(self, X):
"""Transform the data to its spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data to be filtered.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The transformed data.
"""
return self._apply_method(X, 'transform')
def inverse_transform(self, X):
"""Inverse transform the data to its original space.
Parameters
----------
X : array, shape (n_epochs, n_components, n_times)
The data to be inverted.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The transformed data.
"""
return self._apply_method(X, 'inverse_transform')
def _apply_method(self, X, method):
"""Vectorize time samples as trials, apply method and reshape back.
Parameters
----------
X : array, shape (n_epochs, n_dims, n_times)
The data to be inverted.
Returns
-------
X : array, shape (n_epochs, n_dims, n_times)
The transformed data.
"""
n_epochs, n_channels, n_times = X.shape
# trial as time samples
X = np.transpose(X, [1, 0, 2])
X = np.reshape(X, [n_channels, n_epochs * n_times]).T
# apply method
method = getattr(self.estimator, method)
X = method(X)
# put it back to n_epochs, n_dimensions
X = np.reshape(X.T, [-1, n_epochs, n_times]).transpose([1, 0, 2])
return X
class TemporalFilter(TransformerMixin):
"""Estimator to filter data array along the last dimension.
Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
filter to the channels.
l_freq and h_freq are the frequencies below which and above which,
respectively, to filter out of the data. Thus the uses are:
- l_freq < h_freq: band-pass filter
- l_freq > h_freq: band-stop filter
- l_freq is not None, h_freq is None: low-pass filter
- l_freq is None, h_freq is not None: high-pass filter
See :func:`mne.filter.filter_data`.
Parameters
----------
l_freq : float | None
Low cut-off frequency in Hz. If None the data are only low-passed.
h_freq : float | None
High cut-off frequency in Hz. If None the data are only
high-passed.
sfreq : float, defaults to 1.0
Sampling frequency in Hz.
filter_length : str | int, defaults to 'auto'
Length of the FIR filter to use (if applicable):
* int: specified length in samples.
* 'auto' (default in 0.14): the filter length is chosen based
on the size of the transition regions (7 times the reciprocal
of the shortest transition band).
* str: (default in 0.13 is "10s") a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be
converted to that number of samples if ``phase="zero"``, or
the shortest power-of-two length at least that duration for
``phase="zero-double"``.
l_trans_bandwidth : float | str
Width of the transition band at the low cut-off frequency in Hz
(high pass or cutoff 1 in bandpass). Can be "auto"
(default in 0.14) to use a multiple of ``l_freq``::
min(max(l_freq * 0.25, 2), l_freq)
Only used for ``method='fir'``.
h_trans_bandwidth : float | str
Width of the transition band at the high cut-off frequency in Hz
(low pass or cutoff 2 in bandpass). Can be "auto"
(default in 0.14) to use a multiple of ``h_freq``::
min(max(h_freq * 0.25, 2.), info['sfreq'] / 2. - h_freq)
Only used for ``method='fir'``.
n_jobs : int | str, defaults to 1
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fft'.
method : str, defaults to 'fir'
'fir' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt).
iir_params : dict | None, defaults to None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
fir_window : str, defaults to 'hamming'
The window to use in FIR design, can be "hamming", "hann",
or "blackman".
fir_design : str
Can be "firwin" (default) to use :func:`scipy.signal.firwin`,
or "firwin2" to use :func:`scipy.signal.firwin2`. "firwin" uses
a time-domain design technique that generally gives improved
attenuation using fewer samples than "firwin2".
..versionadded:: 0.15
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more). Defaults to
self.verbose.
See Also
--------
FilterEstimator
Vectorizer
mne.filter.filter_data
"""
def __init__(self, l_freq=None, h_freq=None, sfreq=1.0,
filter_length='auto', l_trans_bandwidth='auto',
h_trans_bandwidth='auto', n_jobs=1, method='fir',
iir_params=None, fir_window='hamming', fir_design='firwin',
verbose=None): # noqa: D102
self.l_freq = l_freq
self.h_freq = h_freq
self.sfreq = sfreq
self.filter_length = filter_length
self.l_trans_bandwidth = l_trans_bandwidth
self.h_trans_bandwidth = h_trans_bandwidth
self.n_jobs = n_jobs
self.method = method
self.iir_params = iir_params
self.fir_window = fir_window
self.fir_design = fir_design
self.verbose = verbose
if not isinstance(self.n_jobs, int) and self.n_jobs == 'cuda':
raise ValueError('n_jobs must be int or "cuda", got %s instead.'
% type(self.n_jobs))
def fit(self, X, y=None):
"""Do nothing (for scikit-learn compatibility purposes).
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times) or or shape (n_channels, n_times) # noqa
The data to be filtered over the last dimension. The channels
dimension can be zero when passing a 2D array.
y : None
Not used, for scikit-learn compatibility issues.
Returns
-------
self : instance of Filterer
Returns the modified instance.
"""
return self
def transform(self, X):
"""Filter data along the last dimension.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times) or shape (n_channels, n_times) # noqa
The data to be filtered over the last dimension. The channels
dimension can be zero when passing a 2D array.
Returns
-------
X : array, shape is same as used in input.
The data after filtering.
"""
X = np.atleast_2d(X)
if X.ndim > 3:
raise ValueError("Array must be of at max 3 dimensions instead "
"got %s dimensional matrix" % (X.ndim))
shape = X.shape
X = X.reshape(-1, shape[-1])
(X, self.sfreq, self.l_freq, self.h_freq, self.l_trans_bandwidth,
self.h_trans_bandwidth, self.filter_length, _, self.fir_window,
self.fir_design) = \
_triage_filter_params(X, self.sfreq, self.l_freq, self.h_freq,
self.l_trans_bandwidth,
self.h_trans_bandwidth, self.filter_length,
self.method, phase='zero',
fir_window=self.fir_window,
fir_design=self.fir_design)
X = filter_data(X, self.sfreq, self.l_freq, self.h_freq,
filter_length=self.filter_length,
l_trans_bandwidth=self.l_trans_bandwidth,
h_trans_bandwidth=self.h_trans_bandwidth,
n_jobs=self.n_jobs, method=self.method,
iir_params=self.iir_params, copy=False,
fir_window=self.fir_window, fir_design=self.fir_design,
verbose=self.verbose)
return X.reshape(shape)
| 37.033721
| 97
| 0.586392
|
import numpy as np
from .mixin import TransformerMixin
from .base import BaseEstimator
from .. import pick_types
from ..filter import filter_data, _triage_filter_params
from ..time_frequency.psd import psd_array_multitaper
from ..externals.six import string_types
from ..utils import _check_type_picks, check_version
from ..io.pick import pick_info, _pick_data_channels, _picks_by_type
from ..cov import _check_scalings_user
class _ConstantScaler():
def __init__(self, info, scalings, do_scaling=True):
self._scalings = scalings
self._info = info
self._do_scaling = do_scaling
def fit(self, X, y=None):
scalings = _check_scalings_user(self._scalings)
picks_by_type = _picks_by_type(pick_info(
self._info, _pick_data_channels(self._info, exclude=())))
std = np.ones(sum(len(p[1]) for p in picks_by_type))
if X.shape[1] != len(std):
raise ValueError('info had %d data channels but X has %d channels'
% (len(std), len(X)))
if self._do_scaling: for kind, picks in picks_by_type:
std[picks] = 1. / scalings[kind]
self.std_ = std
self.mean_ = np.zeros_like(std)
return self
def transform(self, X):
return X / self.std_
def inverse_transform(self, X, y=None):
return X * self.std_
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs):
if not isinstance(X, np.ndarray):
raise ValueError("data should be an np.ndarray, got %s." % type(X))
X = np.atleast_3d(X)
orig_shape = X.shape
X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1]))
X = func(X, *args, **kwargs)
if return_result:
X.shape = (orig_shape[0], orig_shape[2], orig_shape[1])
X = X.transpose(0, 2, 1)
return X
class Scaler(TransformerMixin, BaseEstimator):
def __init__(self, info=None, scalings=None, with_mean=True,
with_std=True): self.info = info
self.with_mean = with_mean
self.with_std = with_std
self.scalings = scalings
if not (scalings is None or isinstance(scalings, (dict, str))):
raise ValueError('scalings type should be dict, str, or None, '
'got %s' % type(scalings))
if isinstance(scalings, string_types) and \
scalings not in ('mean', 'median'):
raise ValueError('Invalid method for scaling, must be "mean" or '
'"median" but got %s' % scalings)
if scalings is None or isinstance(scalings, dict):
self._scaler = _ConstantScaler(info, scalings, self.with_std)
elif scalings == 'mean':
from sklearn.preprocessing import StandardScaler
self._scaler = StandardScaler(self.with_mean, self.with_std)
else: if not check_version('sklearn', '0.17'):
raise ValueError("median requires version 0.17 of "
"sklearn library")
from sklearn.preprocessing import RobustScaler
self._scaler = RobustScaler(self.with_mean, self.with_std)
def fit(self, epochs_data, y=None):
_sklearn_reshape_apply(self._scaler.fit, False, epochs_data, y=y)
return self
def transform(self, epochs_data):
return _sklearn_reshape_apply(self._scaler.transform, True,
epochs_data)
def fit_transform(self, epochs_data, y=None):
return self.fit(epochs_data, y).transform(epochs_data)
def inverse_transform(self, epochs_data):
return _sklearn_reshape_apply(self._scaler.inverse_transform, True,
epochs_data)
class Vectorizer(TransformerMixin):
def fit(self, X, y=None):
X = np.asarray(X)
self.features_shape_ = X.shape[1:]
return self
def transform(self, X):
X = np.asarray(X)
if X.shape[1:] != self.features_shape_:
raise ValueError("Shape of X used in fit and transform must be "
"same")
return X.reshape(len(X), -1)
def fit_transform(self, X, y=None):
return self.fit(X).transform(X)
def inverse_transform(self, X):
X = np.asarray(X)
if X.ndim != 2:
raise ValueError("X should be of 2 dimensions but given has %s "
"dimension(s)" % X.ndim)
return X.reshape((len(X),) + self.features_shape_)
class PSDEstimator(TransformerMixin):
def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
adaptive=False, low_bias=True, n_jobs=1,
normalization='length', verbose=None): self.sfreq = sfreq
self.fmin = fmin
self.fmax = fmax
self.bandwidth = bandwidth
self.adaptive = adaptive
self.low_bias = low_bias
self.n_jobs = n_jobs
self.verbose = verbose
self.normalization = normalization
def fit(self, epochs_data, y):
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
return self
def transform(self, epochs_data):
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
psd, _ = psd_array_multitaper(
epochs_data, sfreq=self.sfreq, fmin=self.fmin, fmax=self.fmax,
bandwidth=self.bandwidth, adaptive=self.adaptive,
low_bias=self.low_bias, normalization=self.normalization,
n_jobs=self.n_jobs)
return psd
class FilterEstimator(TransformerMixin):
def __init__(self, info, l_freq, h_freq, picks=None, filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1,
method='fft', iir_params=None, fir_design='firwin',
verbose=None): self.info = info
self.l_freq = l_freq
self.h_freq = h_freq
self.picks = _check_type_picks(picks)
self.filter_length = filter_length
self.l_trans_bandwidth = l_trans_bandwidth
self.h_trans_bandwidth = h_trans_bandwidth
self.n_jobs = n_jobs
self.method = method
self.iir_params = iir_params
self.fir_design = fir_design
def fit(self, epochs_data, y):
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
if self.picks is None:
self.picks = pick_types(self.info, meg=True, eeg=True,
ref_meg=False, exclude=[])
if self.l_freq == 0:
self.l_freq = None
if self.h_freq is not None and self.h_freq > (self.info['sfreq'] / 2.):
self.h_freq = None
if self.l_freq is not None and not isinstance(self.l_freq, float):
self.l_freq = float(self.l_freq)
if self.h_freq is not None and not isinstance(self.h_freq, float):
self.h_freq = float(self.h_freq)
if self.info['lowpass'] is None or (self.h_freq is not None and
(self.l_freq is None or
self.l_freq < self.h_freq) and
self.h_freq <
self.info['lowpass']):
self.info['lowpass'] = self.h_freq
if self.info['highpass'] is None or (self.l_freq is not None and
(self.h_freq is None or
self.l_freq < self.h_freq) and
self.l_freq >
self.info['highpass']):
self.info['highpass'] = self.l_freq
return self
def transform(self, epochs_data):
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
return filter_data(
epochs_data, self.info['sfreq'], self.l_freq, self.h_freq,
self.picks, self.filter_length, self.l_trans_bandwidth,
self.h_trans_bandwidth, method=self.method,
iir_params=self.iir_params, n_jobs=self.n_jobs, copy=False,
fir_design=self.fir_design, verbose=False)
class UnsupervisedSpatialFilter(TransformerMixin, BaseEstimator):
def __init__(self, estimator, average=False): for attr in ('fit', 'transform', 'fit_transform'):
if not hasattr(estimator, attr):
raise ValueError('estimator must be a scikit-learn '
'transformer, missing %s method' % attr)
if not isinstance(average, bool):
raise ValueError("average parameter must be of bool type, got "
"%s instead" % type(bool))
self.estimator = estimator
self.average = average
def fit(self, X, y=None):
if self.average:
X = np.mean(X, axis=0).T
else:
n_epochs, n_channels, n_times = X.shape
X = np.transpose(X, (1, 0, 2)).reshape((n_channels, n_epochs *
n_times)).T
self.estimator.fit(X)
return self
def fit_transform(self, X, y=None):
return self.fit(X).transform(X)
def transform(self, X):
return self._apply_method(X, 'transform')
def inverse_transform(self, X):
return self._apply_method(X, 'inverse_transform')
def _apply_method(self, X, method):
n_epochs, n_channels, n_times = X.shape
X = np.transpose(X, [1, 0, 2])
X = np.reshape(X, [n_channels, n_epochs * n_times]).T
method = getattr(self.estimator, method)
X = method(X)
X = np.reshape(X.T, [-1, n_epochs, n_times]).transpose([1, 0, 2])
return X
class TemporalFilter(TransformerMixin):
def __init__(self, l_freq=None, h_freq=None, sfreq=1.0,
filter_length='auto', l_trans_bandwidth='auto',
h_trans_bandwidth='auto', n_jobs=1, method='fir',
iir_params=None, fir_window='hamming', fir_design='firwin',
verbose=None): self.l_freq = l_freq
self.h_freq = h_freq
self.sfreq = sfreq
self.filter_length = filter_length
self.l_trans_bandwidth = l_trans_bandwidth
self.h_trans_bandwidth = h_trans_bandwidth
self.n_jobs = n_jobs
self.method = method
self.iir_params = iir_params
self.fir_window = fir_window
self.fir_design = fir_design
self.verbose = verbose
if not isinstance(self.n_jobs, int) and self.n_jobs == 'cuda':
raise ValueError('n_jobs must be int or "cuda", got %s instead.'
% type(self.n_jobs))
def fit(self, X, y=None):
return self
def transform(self, X):
X = np.atleast_2d(X)
if X.ndim > 3:
raise ValueError("Array must be of at max 3 dimensions instead "
"got %s dimensional matrix" % (X.ndim))
shape = X.shape
X = X.reshape(-1, shape[-1])
(X, self.sfreq, self.l_freq, self.h_freq, self.l_trans_bandwidth,
self.h_trans_bandwidth, self.filter_length, _, self.fir_window,
self.fir_design) = \
_triage_filter_params(X, self.sfreq, self.l_freq, self.h_freq,
self.l_trans_bandwidth,
self.h_trans_bandwidth, self.filter_length,
self.method, phase='zero',
fir_window=self.fir_window,
fir_design=self.fir_design)
X = filter_data(X, self.sfreq, self.l_freq, self.h_freq,
filter_length=self.filter_length,
l_trans_bandwidth=self.l_trans_bandwidth,
h_trans_bandwidth=self.h_trans_bandwidth,
n_jobs=self.n_jobs, method=self.method,
iir_params=self.iir_params, copy=False,
fir_window=self.fir_window, fir_design=self.fir_design,
verbose=self.verbose)
return X.reshape(shape)
| true
| true
|
1c48c2b07f578561a5bd3b8cc4d3319e4282e76e
| 2,291
|
py
|
Python
|
steampipe_alchemy/models/aws_codebuild_project.py
|
RyanJarv/steampipe_alchemy
|
c8a31303252c1bd8d83d0f9c429d7d0ef7e1690f
|
[
"BSD-3-Clause"
] | 9
|
2021-04-21T04:21:01.000Z
|
2021-06-19T19:33:36.000Z
|
steampipe_alchemy/models/aws_codebuild_project.py
|
RyanJarv/steampipe_alchemy
|
c8a31303252c1bd8d83d0f9c429d7d0ef7e1690f
|
[
"BSD-3-Clause"
] | null | null | null |
steampipe_alchemy/models/aws_codebuild_project.py
|
RyanJarv/steampipe_alchemy
|
c8a31303252c1bd8d83d0f9c429d7d0ef7e1690f
|
[
"BSD-3-Clause"
] | 1
|
2021-04-26T21:08:20.000Z
|
2021-04-26T21:08:20.000Z
|
from sqlalchemy import Column
from sqlalchemy.types import JSON, Text, Boolean, TIMESTAMP, BigInteger
from sqlalchemy.dialects import postgresql as psql
from steampipe_alchemy.mixins import FormatMixins
from steampipe_alchemy import Base
class AwsCodebuildProject(Base, FormatMixins):
__tablename__ = 'aws_codebuild_project'
source = Column('source', JSON, nullable=True)
vpc_config = Column('vpc_config', JSON, nullable=True)
webhook = Column('webhook', JSON, nullable=True)
tags_src = Column('tags_src', JSON, nullable=True)
concurrent_build_limit = Column('concurrent_build_limit', BigInteger, nullable=True)
tags = Column('tags', JSON, nullable=True)
akas = Column('akas', JSON, nullable=True)
queued_timeout_in_minutes = Column('queued_timeout_in_minutes', BigInteger, nullable=True)
created = Column('created', TIMESTAMP, nullable=True)
last_modified = Column('last_modified', TIMESTAMP, nullable=True)
timeout_in_minutes = Column('timeout_in_minutes', BigInteger, nullable=True)
artifacts = Column('artifacts', JSON, nullable=True)
badge = Column('badge', JSON, nullable=True)
build_batch_config = Column('build_batch_config', JSON, nullable=True)
cache = Column('cache', JSON, nullable=True)
environment = Column('environment', JSON, nullable=True)
file_system_locations = Column('file_system_locations', JSON, nullable=True)
logs_config = Column('logs_config', JSON, nullable=True)
secondary_artifacts = Column('secondary_artifacts', JSON, nullable=True)
secondary_source_versions = Column('secondary_source_versions', JSON, nullable=True)
secondary_sources = Column('secondary_sources', JSON, nullable=True)
account_id = Column('account_id', Text, nullable=True)
arn = Column('arn', Text, primary_key=True, nullable=True)
description = Column('description', Text, nullable=True)
encryption_key = Column('encryption_key', Text, nullable=True)
service_role = Column('service_role', Text, nullable=True)
source_version = Column('source_version', Text, nullable=True)
title = Column('title', Text, nullable=True)
partition = Column('partition', Text, nullable=True)
region = Column('region', Text, nullable=True)
name = Column('name', Text, nullable=True)
| 57.275
| 94
| 0.746399
|
from sqlalchemy import Column
from sqlalchemy.types import JSON, Text, Boolean, TIMESTAMP, BigInteger
from sqlalchemy.dialects import postgresql as psql
from steampipe_alchemy.mixins import FormatMixins
from steampipe_alchemy import Base
class AwsCodebuildProject(Base, FormatMixins):
__tablename__ = 'aws_codebuild_project'
source = Column('source', JSON, nullable=True)
vpc_config = Column('vpc_config', JSON, nullable=True)
webhook = Column('webhook', JSON, nullable=True)
tags_src = Column('tags_src', JSON, nullable=True)
concurrent_build_limit = Column('concurrent_build_limit', BigInteger, nullable=True)
tags = Column('tags', JSON, nullable=True)
akas = Column('akas', JSON, nullable=True)
queued_timeout_in_minutes = Column('queued_timeout_in_minutes', BigInteger, nullable=True)
created = Column('created', TIMESTAMP, nullable=True)
last_modified = Column('last_modified', TIMESTAMP, nullable=True)
timeout_in_minutes = Column('timeout_in_minutes', BigInteger, nullable=True)
artifacts = Column('artifacts', JSON, nullable=True)
badge = Column('badge', JSON, nullable=True)
build_batch_config = Column('build_batch_config', JSON, nullable=True)
cache = Column('cache', JSON, nullable=True)
environment = Column('environment', JSON, nullable=True)
file_system_locations = Column('file_system_locations', JSON, nullable=True)
logs_config = Column('logs_config', JSON, nullable=True)
secondary_artifacts = Column('secondary_artifacts', JSON, nullable=True)
secondary_source_versions = Column('secondary_source_versions', JSON, nullable=True)
secondary_sources = Column('secondary_sources', JSON, nullable=True)
account_id = Column('account_id', Text, nullable=True)
arn = Column('arn', Text, primary_key=True, nullable=True)
description = Column('description', Text, nullable=True)
encryption_key = Column('encryption_key', Text, nullable=True)
service_role = Column('service_role', Text, nullable=True)
source_version = Column('source_version', Text, nullable=True)
title = Column('title', Text, nullable=True)
partition = Column('partition', Text, nullable=True)
region = Column('region', Text, nullable=True)
name = Column('name', Text, nullable=True)
| true
| true
|
1c48c3c2ebc8d2ba708e1b653f7bc8c9eea2bfcb
| 36,395
|
py
|
Python
|
deutschland/polizei_brandenburg/api_client.py
|
kiranmusze/deutschland
|
86d8ead3f38ad88ad66bb338b9f5a8db06992344
|
[
"Apache-2.0"
] | null | null | null |
deutschland/polizei_brandenburg/api_client.py
|
kiranmusze/deutschland
|
86d8ead3f38ad88ad66bb338b9f5a8db06992344
|
[
"Apache-2.0"
] | null | null | null |
deutschland/polizei_brandenburg/api_client.py
|
kiranmusze/deutschland
|
86d8ead3f38ad88ad66bb338b9f5a8db06992344
|
[
"Apache-2.0"
] | null | null | null |
"""
Polizei Brandenburg: App
Polizei Brandenburg Nachrichten, Hochwasser-, Verkehrs- und Waldbrandwarnungen # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import json
import atexit
import mimetypes
from multiprocessing.pool import ThreadPool
import io
import os
import re
import typing
from urllib.parse import quote
from urllib3.fields import RequestField
from deutschland.polizei_brandenburg import rest
from deutschland.polizei_brandenburg.configuration import Configuration
from deutschland.polizei_brandenburg.exceptions import (
ApiTypeError,
ApiValueError,
ApiException,
)
from deutschland.polizei_brandenburg.model_utils import (
ModelNormal,
ModelSimple,
ModelComposed,
check_allowed_values,
check_validations,
date,
datetime,
deserialize_file,
file_type,
model_to_dict,
none_type,
validate_and_convert_types,
)
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
_pool = None
def __init__(
self,
configuration=None,
header_name=None,
header_value=None,
cookie=None,
pool_threads=1,
):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = "OpenAPI-Generator/1.0.0/python"
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, "unregister"):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers["User-Agent"]
@user_agent.setter
def user_agent(self, value):
self.default_headers["User-Agent"] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[
typing.List[typing.Tuple[str, typing.Any]]
] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[
typing.Union[int, float, typing.Tuple]
] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None,
):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params["Cookie"] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(
self.parameters_to_tuples(header_params, collection_formats)
)
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
"{%s}" % k, quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params, collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params, collection_formats)
post_params.extend(self.files_parameters(files))
if header_params["Content-Type"].startswith("multipart"):
post_params = self.parameters_to_multipart(post_params, (dict))
# body
if body:
body = self.sanitize_for_serialization(body)
# auth setting
self.update_params_for_auth(
header_params, query_params, auth_settings, resource_path, method, body
)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = self.request(
method,
url,
query_params=query_params,
headers=header_params,
post_params=post_params,
body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
except ApiException as e:
e.body = e.body.decode("utf-8")
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
return return_data
return return_data
# deserialize response data
if response_type:
if response_type != (file_type,):
encoding = "utf-8"
content_type = response_data.getheader("content-type")
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
if match:
encoding = match.group(1)
response_data.data = response_data.data.decode(encoding)
return_data = self.deserialize(response_data, response_type, _check_type)
else:
return_data = None
if _return_http_data_only:
return return_data
else:
return (return_data, response_data.status, response_data.getheaders())
def parameters_to_multipart(self, params, collection_types):
"""Get parameters as list of tuples, formatting as json if value is collection_types
:param params: Parameters as list of two-tuples
:param dict collection_types: Parameter collection types
:return: Parameters as list of tuple or urllib3.fields.RequestField
"""
new_params = []
if collection_types is None:
collection_types = dict
for k, v in (
params.items() if isinstance(params, dict) else params
): # noqa: E501
if isinstance(
v, collection_types
): # v is instance of collection_type, formatting as application/json
v = json.dumps(v, ensure_ascii=False).encode("utf-8")
field = RequestField(k, v)
field.make_multipart(content_type="application/json; charset=utf-8")
new_params.append(field)
else:
new_params.append((k, v))
return new_params
@classmethod
def sanitize_for_serialization(cls, obj):
"""Prepares data for transmission before it is sent with the rest client
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
If obj is io.IOBase, return the bytes
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if isinstance(obj, (ModelNormal, ModelComposed)):
return {
key: cls.sanitize_for_serialization(val)
for key, val in model_to_dict(obj, serialize=True).items()
}
elif isinstance(obj, io.IOBase):
return cls.get_file_data_and_close_file(obj)
elif isinstance(obj, (str, int, float, none_type, bool)):
return obj
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, ModelSimple):
return cls.sanitize_for_serialization(obj.value)
elif isinstance(obj, (list, tuple)):
return [cls.sanitize_for_serialization(item) for item in obj]
if isinstance(obj, dict):
return {
key: cls.sanitize_for_serialization(val) for key, val in obj.items()
}
raise ApiValueError(
"Unable to prepare type {} for serialization".format(obj.__class__.__name__)
)
def deserialize(self, response, response_type, _check_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param _check_type: boolean, whether to check the types of the data
received from the server
:type _check_type: bool
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == (file_type,):
content_disposition = response.getheader("Content-Disposition")
return deserialize_file(
response.data,
self.configuration,
content_disposition=content_disposition,
)
# fetch data from response object
try:
received_data = json.loads(response.data)
except ValueError:
received_data = response.data
# store our data under the key of 'received_data' so users have some
# context if they are deserializing a string and the data type is wrong
deserialized_data = validate_and_convert_types(
received_data,
response_type,
["received_data"],
True,
_check_type,
configuration=self.configuration,
)
return deserialized_data
def call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[
typing.List[typing.Tuple[str, typing.Any]]
] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
async_req: typing.Optional[bool] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[
typing.Union[int, float, typing.Tuple]
] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None,
):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:param files: key -> field name, value -> a list of open file
objects for `multipart/form-data`.
:type files: dict
:param async_req bool: execute request asynchronously
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:type collection_formats: dict, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _check_type: boolean describing if the data back from the server
should have its type checked.
:type _check_type: bool, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(
resource_path,
method,
path_params,
query_params,
header_params,
body,
post_params,
files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host,
_check_type,
)
return self.pool.apply_async(
self.__call_api,
(
resource_path,
method,
path_params,
query_params,
header_params,
body,
post_params,
files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host,
_check_type,
),
)
def request(
self,
method,
url,
query_params=None,
headers=None,
post_params=None,
body=None,
_preload_content=True,
_request_timeout=None,
):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(
url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
)
elif method == "HEAD":
return self.rest_client.HEAD(
url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "POST":
return self.rest_client.POST(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "PUT":
return self.rest_client.PUT(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "PATCH":
return self.rest_client.PATCH(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "DELETE":
return self.rest_client.DELETE(
url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in (
params.items() if isinstance(params, dict) else params
): # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == "multi":
new_params.extend((k, value) for value in v)
else:
if collection_format == "ssv":
delimiter = " "
elif collection_format == "tsv":
delimiter = "\t"
elif collection_format == "pipes":
delimiter = "|"
else: # csv is the default
delimiter = ","
new_params.append((k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
@staticmethod
def get_file_data_and_close_file(file_instance: io.IOBase) -> bytes:
file_data = file_instance.read()
file_instance.close()
return file_data
def files_parameters(
self, files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None
):
"""Builds form parameters.
:param files: None or a dict with key=param_name and
value is a list of open file objects
:return: List of tuples of form parameters with file data
"""
if files is None:
return []
params = []
for param_name, file_instances in files.items():
if file_instances is None:
# if the file field is nullable, skip None values
continue
for file_instance in file_instances:
if file_instance is None:
# if the file field is nullable, skip None values
continue
if file_instance.closed is True:
raise ApiValueError(
"Cannot read a closed file. The passed in file_type "
"for %s must be open." % param_name
)
filename = os.path.basename(file_instance.name)
filedata = self.get_file_data_and_close_file(file_instance)
mimetype = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
params.append(
tuple([param_name, tuple([filename, filedata, mimetype])])
)
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if "application/json" in accepts:
return "application/json"
else:
return ", ".join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return "application/json"
content_types = [x.lower() for x in content_types]
if "application/json" in content_types or "*/*" in content_types:
return "application/json"
else:
return content_types[0]
def update_params_for_auth(
self, headers, queries, auth_settings, resource_path, method, body
):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param resource_path: A string representation of the HTTP request resource path.
:param method: A string representation of the HTTP request method.
:param body: A object representing the body of the HTTP request.
The object type is the return value of _encoder.default().
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if auth_setting["in"] == "cookie":
headers["Cookie"] = auth_setting["value"]
elif auth_setting["in"] == "header":
if auth_setting["type"] != "http-signature":
headers[auth_setting["key"]] = auth_setting["value"]
elif auth_setting["in"] == "query":
queries.append((auth_setting["key"], auth_setting["value"]))
else:
raise ApiValueError(
"Authentication token must be in `query` or `header`"
)
class Endpoint(object):
def __init__(
self,
settings=None,
params_map=None,
root_map=None,
headers_map=None,
api_client=None,
callable=None,
):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map["all"].extend(
[
"async_req",
"_host_index",
"_preload_content",
"_request_timeout",
"_return_http_data_only",
"_check_input_type",
"_check_return_type",
]
)
self.params_map["nullable"].extend(["_request_timeout"])
self.validations = root_map["validations"]
self.allowed_values = root_map["allowed_values"]
self.openapi_types = root_map["openapi_types"]
extra_types = {
"async_req": (bool,),
"_host_index": (none_type, int),
"_preload_content": (bool,),
"_request_timeout": (
none_type,
float,
(float,),
[float],
int,
(int,),
[int],
),
"_return_http_data_only": (bool,),
"_check_input_type": (bool,),
"_check_return_type": (bool,),
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map["attribute_map"]
self.location_map = root_map["location_map"]
self.collection_format_map = root_map["collection_format_map"]
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map["enum"]:
if param in kwargs:
check_allowed_values(self.allowed_values, (param,), kwargs[param])
for param in self.params_map["validation"]:
if param in kwargs:
check_validations(
self.validations,
(param,),
kwargs[param],
configuration=self.api_client.configuration,
)
if kwargs["_check_input_type"] is False:
return
for key, value in kwargs.items():
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
False,
kwargs["_check_input_type"],
configuration=self.api_client.configuration,
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
"body": None,
"collection_format": {},
"file": {},
"form": [],
"header": {},
"path": {},
"query": [],
}
for param_name, param_value in kwargs.items():
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == "body":
params["body"] = param_value
continue
base_name = self.attribute_map[param_name]
if param_location == "form" and self.openapi_types[param_name] == (
file_type,
):
params["file"][param_name] = [param_value]
elif param_location == "form" and self.openapi_types[param_name] == (
[file_type],
):
# param_value is already a list
params["file"][param_name] = param_value
elif param_location in {"form", "query"}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {"form", "query"}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params["collection_format"][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
"""This method is invoked when endpoints are called
Example:
api_instance = DefaultApi()
api_instance.news_version1_get # this is an instance of the class Endpoint
api_instance.news_version1_get() # this invokes api_instance.news_version1_get.__call__()
which then invokes the callable functions stored in that endpoint at
api_instance.news_version1_get.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
index = (
self.api_client.configuration.server_operation_index.get(
self.settings["operation_id"],
self.api_client.configuration.server_index,
)
if kwargs["_host_index"] is None
else kwargs["_host_index"]
)
server_variables = (
self.api_client.configuration.server_operation_variables.get(
self.settings["operation_id"],
self.api_client.configuration.server_variables,
)
)
_host = self.api_client.configuration.get_host_from_settings(
index, variables=server_variables, servers=self.settings["servers"]
)
except IndexError:
if self.settings["servers"]:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s"
% len(self.settings["servers"])
)
_host = None
for key, value in kwargs.items():
if key not in self.params_map["all"]:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" % (key, self.settings["operation_id"])
)
# only throw this nullable ApiValueError if _check_input_type
# is False, if _check_input_type==True we catch this case
# in self.__validate_inputs
if (
key not in self.params_map["nullable"]
and value is None
and kwargs["_check_input_type"] is False
):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" % (key, self.settings["operation_id"])
)
for key in self.params_map["required"]:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings["operation_id"])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map["accept"]
if accept_headers_list:
params["header"]["Accept"] = self.api_client.select_header_accept(
accept_headers_list
)
content_type_headers_list = self.headers_map["content_type"]
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list
)
params["header"]["Content-Type"] = header_list
return self.api_client.call_api(
self.settings["endpoint_path"],
self.settings["http_method"],
params["path"],
params["query"],
params["header"],
body=params["body"],
post_params=params["form"],
files=params["file"],
response_type=self.settings["response_type"],
auth_settings=self.settings["auth"],
async_req=kwargs["async_req"],
_check_type=kwargs["_check_return_type"],
_return_http_data_only=kwargs["_return_http_data_only"],
_preload_content=kwargs["_preload_content"],
_request_timeout=kwargs["_request_timeout"],
_host=_host,
collection_formats=params["collection_format"],
)
| 38.189927
| 98
| 0.572908
|
import json
import atexit
import mimetypes
from multiprocessing.pool import ThreadPool
import io
import os
import re
import typing
from urllib.parse import quote
from urllib3.fields import RequestField
from deutschland.polizei_brandenburg import rest
from deutschland.polizei_brandenburg.configuration import Configuration
from deutschland.polizei_brandenburg.exceptions import (
ApiTypeError,
ApiValueError,
ApiException,
)
from deutschland.polizei_brandenburg.model_utils import (
ModelNormal,
ModelSimple,
ModelComposed,
check_allowed_values,
check_validations,
date,
datetime,
deserialize_file,
file_type,
model_to_dict,
none_type,
validate_and_convert_types,
)
class ApiClient(object):
_pool = None
def __init__(
self,
configuration=None,
header_name=None,
header_value=None,
cookie=None,
pool_threads=1,
):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
self.user_agent = "OpenAPI-Generator/1.0.0/python"
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, "unregister"):
atexit.unregister(self.close)
@property
def pool(self):
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
return self.default_headers["User-Agent"]
@user_agent.setter
def user_agent(self, value):
self.default_headers["User-Agent"] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[
typing.List[typing.Tuple[str, typing.Any]]
] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[
typing.Union[int, float, typing.Tuple]
] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None,
):
config = self.configuration
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params["Cookie"] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(
self.parameters_to_tuples(header_params, collection_formats)
)
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
resource_path = resource_path.replace(
"{%s}" % k, quote(str(v), safe=config.safe_chars_for_path_param)
)
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params, collection_formats)
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params, collection_formats)
post_params.extend(self.files_parameters(files))
if header_params["Content-Type"].startswith("multipart"):
post_params = self.parameters_to_multipart(post_params, (dict))
if body:
body = self.sanitize_for_serialization(body)
self.update_params_for_auth(
header_params, query_params, auth_settings, resource_path, method, body
)
if _host is None:
url = self.configuration.host + resource_path
else:
url = _host + resource_path
try:
response_data = self.request(
method,
url,
query_params=query_params,
headers=header_params,
post_params=post_params,
body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
except ApiException as e:
e.body = e.body.decode("utf-8")
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
return return_data
return return_data
if response_type:
if response_type != (file_type,):
encoding = "utf-8"
content_type = response_data.getheader("content-type")
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
if match:
encoding = match.group(1)
response_data.data = response_data.data.decode(encoding)
return_data = self.deserialize(response_data, response_type, _check_type)
else:
return_data = None
if _return_http_data_only:
return return_data
else:
return (return_data, response_data.status, response_data.getheaders())
def parameters_to_multipart(self, params, collection_types):
new_params = []
if collection_types is None:
collection_types = dict
for k, v in (
params.items() if isinstance(params, dict) else params
): if isinstance(
v, collection_types
): v = json.dumps(v, ensure_ascii=False).encode("utf-8")
field = RequestField(k, v)
field.make_multipart(content_type="application/json; charset=utf-8")
new_params.append(field)
else:
new_params.append((k, v))
return new_params
@classmethod
def sanitize_for_serialization(cls, obj):
if isinstance(obj, (ModelNormal, ModelComposed)):
return {
key: cls.sanitize_for_serialization(val)
for key, val in model_to_dict(obj, serialize=True).items()
}
elif isinstance(obj, io.IOBase):
return cls.get_file_data_and_close_file(obj)
elif isinstance(obj, (str, int, float, none_type, bool)):
return obj
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, ModelSimple):
return cls.sanitize_for_serialization(obj.value)
elif isinstance(obj, (list, tuple)):
return [cls.sanitize_for_serialization(item) for item in obj]
if isinstance(obj, dict):
return {
key: cls.sanitize_for_serialization(val) for key, val in obj.items()
}
raise ApiValueError(
"Unable to prepare type {} for serialization".format(obj.__class__.__name__)
)
def deserialize(self, response, response_type, _check_type):
if response_type == (file_type,):
content_disposition = response.getheader("Content-Disposition")
return deserialize_file(
response.data,
self.configuration,
content_disposition=content_disposition,
)
try:
received_data = json.loads(response.data)
except ValueError:
received_data = response.data
deserialized_data = validate_and_convert_types(
received_data,
response_type,
["received_data"],
True,
_check_type,
configuration=self.configuration,
)
return deserialized_data
def call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[
typing.List[typing.Tuple[str, typing.Any]]
] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[typing.Tuple[typing.Any]] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
async_req: typing.Optional[bool] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[
typing.Union[int, float, typing.Tuple]
] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None,
):
if not async_req:
return self.__call_api(
resource_path,
method,
path_params,
query_params,
header_params,
body,
post_params,
files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host,
_check_type,
)
return self.pool.apply_async(
self.__call_api,
(
resource_path,
method,
path_params,
query_params,
header_params,
body,
post_params,
files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host,
_check_type,
),
)
def request(
self,
method,
url,
query_params=None,
headers=None,
post_params=None,
body=None,
_preload_content=True,
_request_timeout=None,
):
if method == "GET":
return self.rest_client.GET(
url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
)
elif method == "HEAD":
return self.rest_client.HEAD(
url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers,
)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "POST":
return self.rest_client.POST(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "PUT":
return self.rest_client.PUT(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "PATCH":
return self.rest_client.PATCH(
url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
elif method == "DELETE":
return self.rest_client.DELETE(
url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in (
params.items() if isinstance(params, dict) else params
): if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == "multi":
new_params.extend((k, value) for value in v)
else:
if collection_format == "ssv":
delimiter = " "
elif collection_format == "tsv":
delimiter = "\t"
elif collection_format == "pipes":
delimiter = "|"
else: delimiter = ","
new_params.append((k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
@staticmethod
def get_file_data_and_close_file(file_instance: io.IOBase) -> bytes:
file_data = file_instance.read()
file_instance.close()
return file_data
def files_parameters(
self, files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None
):
if files is None:
return []
params = []
for param_name, file_instances in files.items():
if file_instances is None:
continue
for file_instance in file_instances:
if file_instance is None:
continue
if file_instance.closed is True:
raise ApiValueError(
"Cannot read a closed file. The passed in file_type "
"for %s must be open." % param_name
)
filename = os.path.basename(file_instance.name)
filedata = self.get_file_data_and_close_file(file_instance)
mimetype = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
params.append(
tuple([param_name, tuple([filename, filedata, mimetype])])
)
return params
def select_header_accept(self, accepts):
if not accepts:
return
accepts = [x.lower() for x in accepts]
if "application/json" in accepts:
return "application/json"
else:
return ", ".join(accepts)
def select_header_content_type(self, content_types):
if not content_types:
return "application/json"
content_types = [x.lower() for x in content_types]
if "application/json" in content_types or "*/*" in content_types:
return "application/json"
else:
return content_types[0]
def update_params_for_auth(
self, headers, queries, auth_settings, resource_path, method, body
):
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if auth_setting["in"] == "cookie":
headers["Cookie"] = auth_setting["value"]
elif auth_setting["in"] == "header":
if auth_setting["type"] != "http-signature":
headers[auth_setting["key"]] = auth_setting["value"]
elif auth_setting["in"] == "query":
queries.append((auth_setting["key"], auth_setting["value"]))
else:
raise ApiValueError(
"Authentication token must be in `query` or `header`"
)
class Endpoint(object):
def __init__(
self,
settings=None,
params_map=None,
root_map=None,
headers_map=None,
api_client=None,
callable=None,
):
self.settings = settings
self.params_map = params_map
self.params_map["all"].extend(
[
"async_req",
"_host_index",
"_preload_content",
"_request_timeout",
"_return_http_data_only",
"_check_input_type",
"_check_return_type",
]
)
self.params_map["nullable"].extend(["_request_timeout"])
self.validations = root_map["validations"]
self.allowed_values = root_map["allowed_values"]
self.openapi_types = root_map["openapi_types"]
extra_types = {
"async_req": (bool,),
"_host_index": (none_type, int),
"_preload_content": (bool,),
"_request_timeout": (
none_type,
float,
(float,),
[float],
int,
(int,),
[int],
),
"_return_http_data_only": (bool,),
"_check_input_type": (bool,),
"_check_return_type": (bool,),
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map["attribute_map"]
self.location_map = root_map["location_map"]
self.collection_format_map = root_map["collection_format_map"]
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map["enum"]:
if param in kwargs:
check_allowed_values(self.allowed_values, (param,), kwargs[param])
for param in self.params_map["validation"]:
if param in kwargs:
check_validations(
self.validations,
(param,),
kwargs[param],
configuration=self.api_client.configuration,
)
if kwargs["_check_input_type"] is False:
return
for key, value in kwargs.items():
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
False,
kwargs["_check_input_type"],
configuration=self.api_client.configuration,
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
"body": None,
"collection_format": {},
"file": {},
"form": [],
"header": {},
"path": {},
"query": [],
}
for param_name, param_value in kwargs.items():
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == "body":
params["body"] = param_value
continue
base_name = self.attribute_map[param_name]
if param_location == "form" and self.openapi_types[param_name] == (
file_type,
):
params["file"][param_name] = [param_value]
elif param_location == "form" and self.openapi_types[param_name] == (
[file_type],
):
params["file"][param_name] = param_value
elif param_location in {"form", "query"}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {"form", "query"}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params["collection_format"][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
index = (
self.api_client.configuration.server_operation_index.get(
self.settings["operation_id"],
self.api_client.configuration.server_index,
)
if kwargs["_host_index"] is None
else kwargs["_host_index"]
)
server_variables = (
self.api_client.configuration.server_operation_variables.get(
self.settings["operation_id"],
self.api_client.configuration.server_variables,
)
)
_host = self.api_client.configuration.get_host_from_settings(
index, variables=server_variables, servers=self.settings["servers"]
)
except IndexError:
if self.settings["servers"]:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s"
% len(self.settings["servers"])
)
_host = None
for key, value in kwargs.items():
if key not in self.params_map["all"]:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" % (key, self.settings["operation_id"])
)
if (
key not in self.params_map["nullable"]
and value is None
and kwargs["_check_input_type"] is False
):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" % (key, self.settings["operation_id"])
)
for key in self.params_map["required"]:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings["operation_id"])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map["accept"]
if accept_headers_list:
params["header"]["Accept"] = self.api_client.select_header_accept(
accept_headers_list
)
content_type_headers_list = self.headers_map["content_type"]
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list
)
params["header"]["Content-Type"] = header_list
return self.api_client.call_api(
self.settings["endpoint_path"],
self.settings["http_method"],
params["path"],
params["query"],
params["header"],
body=params["body"],
post_params=params["form"],
files=params["file"],
response_type=self.settings["response_type"],
auth_settings=self.settings["auth"],
async_req=kwargs["async_req"],
_check_type=kwargs["_check_return_type"],
_return_http_data_only=kwargs["_return_http_data_only"],
_preload_content=kwargs["_preload_content"],
_request_timeout=kwargs["_request_timeout"],
_host=_host,
collection_formats=params["collection_format"],
)
| true
| true
|
1c48c4ba54570e45f887298ee9e4673315687b4d
| 17,349
|
py
|
Python
|
tools/nocompile_driver.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777
|
2017-08-29T15:15:32.000Z
|
2022-03-21T05:29:41.000Z
|
tools/nocompile_driver.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66
|
2017-08-30T18:31:18.000Z
|
2021-08-02T10:59:35.000Z
|
tools/nocompile_driver.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123
|
2017-08-30T01:19:34.000Z
|
2022-03-17T22:55:31.000Z
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a simple "negative compile" test for C++ on linux.
Sometimes a C++ API needs to ensure that various usages cannot compile. To
enable unittesting of these assertions, we use this python script to
invoke gcc on a source file and assert that compilation fails.
For more info, see:
http://dev.chromium.org/developers/testing/no-compile-tests
"""
import StringIO
import ast
import locale
import os
import re
import select
import shlex
import subprocess
import sys
import time
# Matches lines that start with #if and have the substring TEST in the
# conditional. Also extracts the comment. This allows us to search for
# lines like the following:
#
# #ifdef NCTEST_NAME_OF_TEST // [r'expected output']
# #if defined(NCTEST_NAME_OF_TEST) // [r'expected output']
# #if NCTEST_NAME_OF_TEST // [r'expected output']
# #elif NCTEST_NAME_OF_TEST // [r'expected output']
# #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output']
#
# inside the unittest file.
NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
# Matches and removes the defined() preprocesor predicate. This is useful
# for test cases that use the preprocessor if-statement form:
#
# #if defined(NCTEST_NAME_OF_TEST)
#
# Should be used to post-process the results found by NCTEST_CONFIG_RE.
STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)')
# Used to grab the expectation from comment at the end of an #ifdef. See
# NCTEST_CONFIG_RE's comment for examples of what the format should look like.
#
# The extracted substring should be a python array of regular expressions.
EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])')
# The header for the result file so that it can be compiled.
RESULT_FILE_HEADER = """
// This file is generated by the no compile test from:
// %s
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
"""
# The GUnit test function to output on a successful test completion.
SUCCESS_GUNIT_TEMPLATE = """
TEST(%s, %s) {
LOG(INFO) << "Took %f secs. Started at %f, ended at %f";
}
"""
# The GUnit test function to output for a disabled test.
DISABLED_GUNIT_TEMPLATE = """
TEST(%s, %s) { }
"""
# Timeout constants.
NCTEST_TERMINATE_TIMEOUT_SEC = 60
NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path):
"""Make sure the arguments being passed in are sane."""
assert parallelism >= 1
assert type(sourcefile_path) is str
assert type(cflags) is str
assert type(resultfile_path) is str
def ParseExpectation(expectation_string):
"""Extracts expectation definition from the trailing comment on the ifdef.
See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing.
Args:
expectation_string: A string like "// [r'some_regex']"
Returns:
A list of compiled regular expressions indicating all possible valid
compiler outputs. If the list is empty, all outputs are considered valid.
"""
assert expectation_string is not None
match = EXTRACT_EXPECTATION_RE.match(expectation_string)
assert match
raw_expectation = ast.literal_eval(match.group(1))
assert type(raw_expectation) is list
expectation = []
for regex_str in raw_expectation:
assert type(regex_str) is str
expectation.append(re.compile(regex_str))
return expectation
def ExtractTestConfigs(sourcefile_path, suite_name):
"""Parses the source file for test configurations.
Each no-compile test in the file is separated by an ifdef macro. We scan
the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
they demark one no-compile test and try to extract the test configuration
from that.
Args:
sourcefile_path: The path to the source file.
suite_name: The name of the test suite.
Returns:
A list of test configurations. Each test configuration is a dictionary of
the form:
{ name: 'NCTEST_NAME'
suite_name: 'SOURCE_FILE_NAME'
expectations: [re.Pattern, re.Pattern] }
The |suite_name| is used to generate a pretty gtest output on successful
completion of the no compile test.
The compiled regexps in |expectations| define the valid outputs of the
compiler. If any one of the listed patterns matches either the stderr or
stdout from the compilation, and the compilation failed, then the test is
considered to have succeeded. If the list is empty, than we ignore the
compiler output and just check for failed compilation. If |expectations|
is actually None, then this specifies a compiler sanity check test, which
should expect a SUCCESSFUL compilation.
"""
sourcefile = open(sourcefile_path, 'r')
# Start with at least the compiler sanity test. You need to always have one
# sanity test to show that compiler flags and configuration are not just
# wrong. Otherwise, having a misconfigured compiler, or an error in the
# shared portions of the .nc file would cause all tests to erroneously pass.
test_configs = []
for line in sourcefile:
match_result = NCTEST_CONFIG_RE.match(line)
if not match_result:
continue
groups = match_result.groups()
# Grab the name and remove the defined() predicate if there is one.
name = groups[0]
strip_result = STRIP_DEFINED_RE.match(name)
if strip_result:
name = strip_result.group(1)
# Read expectations if there are any.
test_configs.append({'name': name,
'suite_name': suite_name,
'expectations': ParseExpectation(groups[1])})
sourcefile.close()
return test_configs
def StartTest(sourcefile_path, cflags, config):
"""Start one negative compile test.
Args:
sourcefile_path: The path to the source file.
cflags: A string with all the CFLAGS to give to gcc. This string will be
split by shelex so be careful with escaping.
config: A dictionary describing the test. See ExtractTestConfigs
for a description of the config format.
Returns:
A dictionary containing all the information about the started test. The
fields in the dictionary are as follows:
{ 'proc': A subprocess object representing the compiler run.
'cmdline': The executed command line.
'name': The name of the test.
'suite_name': The suite name to use when generating the gunit test
result.
'terminate_timeout': The timestamp in seconds since the epoch after
which the test should be terminated.
'kill_timeout': The timestamp in seconds since the epoch after which
the test should be given a hard kill signal.
'started_at': A timestamp in seconds since the epoch for when this test
was started.
'aborted_at': A timestamp in seconds since the epoch for when this test
was aborted. If the test completed successfully,
this value is 0.
'finished_at': A timestamp in seconds since the epoch for when this
test was successfully complete. If the test is aborted,
or running, this value is 0.
'expectations': A dictionary with the test expectations. See
ParseExpectation() for the structure.
}
"""
# TODO(ajwong): Get the compiler from gyp.
cmdline = [os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../third_party/llvm-build/Release+Asserts/bin',
'clang++')]
cmdline.extend(shlex.split(cflags))
name = config['name']
expectations = config['expectations']
if expectations is not None:
cmdline.append('-D%s' % name)
cmdline.extend(['-std=c++11', '-o', '/dev/null', '-c', '-x', 'c++',
sourcefile_path])
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
now = time.time()
return {'proc': process,
'cmdline': ' '.join(cmdline),
'name': name,
'suite_name': config['suite_name'],
'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC,
'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC,
'started_at': now,
'aborted_at': 0,
'finished_at': 0,
'expectations': expectations}
def PassTest(resultfile, test):
"""Logs the result of a test started by StartTest(), or a disabled test
configuration.
Args:
resultfile: File object for .cc file that results are written to.
test: An instance of the dictionary returned by StartTest(), a
configuration from ExtractTestConfigs().
"""
# The 'started_at' key is only added if a test has been started.
if 'started_at' in test:
resultfile.write(SUCCESS_GUNIT_TEMPLATE % (
test['suite_name'], test['name'],
test['finished_at'] - test['started_at'],
test['started_at'], test['finished_at']))
else:
resultfile.write(DISABLED_GUNIT_TEMPLATE % (
test['suite_name'], test['name']))
def FailTest(resultfile, test, error, stdout=None, stderr=None):
"""Logs the result of a test started by StartTest()
Args:
resultfile: File object for .cc file that results are written to.
test: An instance of the dictionary returned by StartTest()
error: The printable reason for the failure.
stdout: The test's output to stdout.
stderr: The test's output to stderr.
"""
resultfile.write('#error "%s Failed: %s"\n' % (test['name'], error))
resultfile.write('#error "compile line: %s"\n' % test['cmdline'])
if stdout and len(stdout) != 0:
resultfile.write('#error "%s stdout:"\n' % test['name'])
for line in stdout.split('\n'):
resultfile.write('#error " %s:"\n' % line)
if stderr and len(stderr) != 0:
resultfile.write('#error "%s stderr:"\n' % test['name'])
for line in stderr.split('\n'):
resultfile.write('#error " %s"\n' % line)
resultfile.write('\n')
def WriteStats(resultfile, suite_name, timings):
"""Logs the peformance timings for each stage of the script into a fake test.
Args:
resultfile: File object for .cc file that results are written to.
suite_name: The name of the GUnit suite this test belongs to.
timings: Dictionary with timestamps for each stage of the script run.
"""
stats_template = ("Started %f, Ended %f, Total %fs, Extract %fs, "
"Compile %fs, Process %fs")
total_secs = timings['results_processed'] - timings['started']
extract_secs = timings['extract_done'] - timings['started']
compile_secs = timings['compile_done'] - timings['extract_done']
process_secs = timings['results_processed'] - timings['compile_done']
resultfile.write('TEST(%s, Stats) { LOG(INFO) << "%s"; }\n' % (
suite_name, stats_template % (
timings['started'], timings['results_processed'], total_secs,
extract_secs, compile_secs, process_secs)))
def ProcessTestResult(resultfile, test):
"""Interprets and logs the result of a test started by StartTest()
Args:
resultfile: File object for .cc file that results are written to.
test: The dictionary from StartTest() to process.
"""
# Snap a copy of stdout and stderr into the test dictionary immediately
# cause we can only call this once on the Popen object, and lots of stuff
# below will want access to it.
proc = test['proc']
(stdout, stderr) = proc.communicate()
if test['aborted_at'] != 0:
FailTest(resultfile, test, "Compile timed out. Started %f ended %f." %
(test['started_at'], test['aborted_at']))
return
if proc.poll() == 0:
# Handle failure due to successful compile.
FailTest(resultfile, test,
'Unexpected successful compilation.',
stdout, stderr)
return
else:
# Check the output has the right expectations. If there are no
# expectations, then we just consider the output "matched" by default.
if len(test['expectations']) == 0:
PassTest(resultfile, test)
return
# Otherwise test against all expectations.
for regexp in test['expectations']:
if (regexp.search(stdout) is not None or
regexp.search(stderr) is not None):
PassTest(resultfile, test)
return
expectation_str = ', '.join(
["r'%s'" % regexp.pattern for regexp in test['expectations']])
FailTest(resultfile, test,
'Expectations [%s] did not match output.' % expectation_str,
stdout, stderr)
return
def CompleteAtLeastOneTest(resultfile, executing_tests):
"""Blocks until at least one task is removed from executing_tests.
This function removes completed tests from executing_tests, logging failures
and output. If no tests can be removed, it will enter a poll-loop until one
test finishes or times out. On a timeout, this function is responsible for
terminating the process in the appropriate fashion.
Args:
executing_tests: A dict mapping a string containing the test name to the
test dict return from StartTest().
Returns:
A list of tests that have finished.
"""
finished_tests = []
busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC
while len(finished_tests) == 0:
# If we don't make progress for too long, assume the code is just dead.
assert busy_loop_timeout > time.time()
# Select on the output pipes.
read_set = []
for test in executing_tests.values():
read_set.extend([test['proc'].stderr, test['proc'].stdout])
result = select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC)
# Now attempt to process results.
now = time.time()
for test in executing_tests.values():
proc = test['proc']
if proc.poll() is not None:
test['finished_at'] = now
finished_tests.append(test)
elif test['terminate_timeout'] < now:
proc.terminate()
test['aborted_at'] = now
elif test['kill_timeout'] < now:
proc.kill()
test['aborted_at'] = now
for test in finished_tests:
del executing_tests[test['name']]
return finished_tests
def main():
if len(sys.argv) != 5:
print ('Usage: %s <parallelism> <sourcefile> <cflags> <resultfile>' %
sys.argv[0])
sys.exit(1)
# Force us into the "C" locale so the compiler doesn't localize its output.
# In particular, this stops gcc from using smart quotes when in english UTF-8
# locales. This makes the expectation writing much easier.
os.environ['LC_ALL'] = 'C'
parallelism = int(sys.argv[1])
sourcefile_path = sys.argv[2]
cflags = sys.argv[3]
resultfile_path = sys.argv[4]
timings = {'started': time.time()}
ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path)
# Convert filename from underscores to CamelCase.
words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_')
words = [w.capitalize() for w in words]
suite_name = 'NoCompile' + ''.join(words)
test_configs = ExtractTestConfigs(sourcefile_path, suite_name)
timings['extract_done'] = time.time()
resultfile = StringIO.StringIO()
resultfile.write(RESULT_FILE_HEADER % sourcefile_path)
# Run the no-compile tests, but ensure we do not run more than |parallelism|
# tests at once.
timings['header_written'] = time.time()
executing_tests = {}
finished_tests = []
test = StartTest(
sourcefile_path,
cflags + ' -MMD -MF %s.d -MT %s' % (resultfile_path, resultfile_path),
{ 'name': 'NCTEST_SANITY',
'suite_name': suite_name,
'expectations': None,
})
executing_tests[test['name']] = test
for config in test_configs:
# CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
# acts as a semaphore. We cannot use threads + a real semaphore because
# subprocess forks, which can cause all sorts of hilarity with threads.
if len(executing_tests) >= parallelism:
finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests))
if config['name'].startswith('DISABLED_'):
PassTest(resultfile, config)
else:
test = StartTest(sourcefile_path, cflags, config)
assert test['name'] not in executing_tests
executing_tests[test['name']] = test
# If there are no more test to start, we still need to drain the running
# ones.
while len(executing_tests) > 0:
finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests))
timings['compile_done'] = time.time()
for test in finished_tests:
if test['name'] == 'NCTEST_SANITY':
_, stderr = test['proc'].communicate()
return_code = test['proc'].poll()
if return_code != 0:
sys.stderr.write(stderr)
continue
ProcessTestResult(resultfile, test)
timings['results_processed'] = time.time()
WriteStats(resultfile, suite_name, timings)
if return_code == 0:
with open(resultfile_path, 'w') as fd:
fd.write(resultfile.getvalue())
resultfile.close()
sys.exit(return_code)
if __name__ == '__main__':
main()
| 35.62423
| 80
| 0.683152
|
import StringIO
import ast
import locale
import os
import re
import select
import shlex
import subprocess
import sys
import time
NCTEST_CONFIG_RE = re.compile(r'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
STRIP_DEFINED_RE = re.compile(r'defined\((.*)\)')
#
# The extracted substring should be a python array of regular expressions.
EXTRACT_EXPECTATION_RE = re.compile(r'//\s*(\[.*\])')
# The header for the result file so that it can be compiled.
RESULT_FILE_HEADER = """
// This file is generated by the no compile test from:
// %s
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
"""
# The GUnit test function to output on a successful test completion.
SUCCESS_GUNIT_TEMPLATE = """
TEST(%s, %s) {
LOG(INFO) << "Took %f secs. Started at %f, ended at %f";
}
"""
# The GUnit test function to output for a disabled test.
DISABLED_GUNIT_TEMPLATE = """
TEST(%s, %s) { }
"""
# Timeout constants.
NCTEST_TERMINATE_TIMEOUT_SEC = 60
NCTEST_KILL_TIMEOUT_SEC = NCTEST_TERMINATE_TIMEOUT_SEC + 2
BUSY_LOOP_MAX_TIME_SEC = NCTEST_KILL_TIMEOUT_SEC * 2
def ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path):
assert parallelism >= 1
assert type(sourcefile_path) is str
assert type(cflags) is str
assert type(resultfile_path) is str
def ParseExpectation(expectation_string):
assert expectation_string is not None
match = EXTRACT_EXPECTATION_RE.match(expectation_string)
assert match
raw_expectation = ast.literal_eval(match.group(1))
assert type(raw_expectation) is list
expectation = []
for regex_str in raw_expectation:
assert type(regex_str) is str
expectation.append(re.compile(regex_str))
return expectation
def ExtractTestConfigs(sourcefile_path, suite_name):
sourcefile = open(sourcefile_path, 'r')
# Start with at least the compiler sanity test. You need to always have one
# sanity test to show that compiler flags and configuration are not just
# wrong. Otherwise, having a misconfigured compiler, or an error in the
# shared portions of the .nc file would cause all tests to erroneously pass.
test_configs = []
for line in sourcefile:
match_result = NCTEST_CONFIG_RE.match(line)
if not match_result:
continue
groups = match_result.groups()
# Grab the name and remove the defined() predicate if there is one.
name = groups[0]
strip_result = STRIP_DEFINED_RE.match(name)
if strip_result:
name = strip_result.group(1)
# Read expectations if there are any.
test_configs.append({'name': name,
'suite_name': suite_name,
'expectations': ParseExpectation(groups[1])})
sourcefile.close()
return test_configs
def StartTest(sourcefile_path, cflags, config):
# TODO(ajwong): Get the compiler from gyp.
cmdline = [os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../third_party/llvm-build/Release+Asserts/bin',
'clang++')]
cmdline.extend(shlex.split(cflags))
name = config['name']
expectations = config['expectations']
if expectations is not None:
cmdline.append('-D%s' % name)
cmdline.extend(['-std=c++11', '-o', '/dev/null', '-c', '-x', 'c++',
sourcefile_path])
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
now = time.time()
return {'proc': process,
'cmdline': ' '.join(cmdline),
'name': name,
'suite_name': config['suite_name'],
'terminate_timeout': now + NCTEST_TERMINATE_TIMEOUT_SEC,
'kill_timeout': now + NCTEST_KILL_TIMEOUT_SEC,
'started_at': now,
'aborted_at': 0,
'finished_at': 0,
'expectations': expectations}
def PassTest(resultfile, test):
# The 'started_at' key is only added if a test has been started.
if 'started_at' in test:
resultfile.write(SUCCESS_GUNIT_TEMPLATE % (
test['suite_name'], test['name'],
test['finished_at'] - test['started_at'],
test['started_at'], test['finished_at']))
else:
resultfile.write(DISABLED_GUNIT_TEMPLATE % (
test['suite_name'], test['name']))
def FailTest(resultfile, test, error, stdout=None, stderr=None):
resultfile.write(' resultfile.write(' if stdout and len(stdout) != 0:
resultfile.write(' for line in stdout.split('\n'):
resultfile.write('
if stderr and len(stderr) != 0:
resultfile.write(' for line in stderr.split('\n'):
resultfile.write(' resultfile.write('\n')
def WriteStats(resultfile, suite_name, timings):
stats_template = ("Started %f, Ended %f, Total %fs, Extract %fs, "
"Compile %fs, Process %fs")
total_secs = timings['results_processed'] - timings['started']
extract_secs = timings['extract_done'] - timings['started']
compile_secs = timings['compile_done'] - timings['extract_done']
process_secs = timings['results_processed'] - timings['compile_done']
resultfile.write('TEST(%s, Stats) { LOG(INFO) << "%s"; }\n' % (
suite_name, stats_template % (
timings['started'], timings['results_processed'], total_secs,
extract_secs, compile_secs, process_secs)))
def ProcessTestResult(resultfile, test):
# Snap a copy of stdout and stderr into the test dictionary immediately
# cause we can only call this once on the Popen object, and lots of stuff
# below will want access to it.
proc = test['proc']
(stdout, stderr) = proc.communicate()
if test['aborted_at'] != 0:
FailTest(resultfile, test, "Compile timed out. Started %f ended %f." %
(test['started_at'], test['aborted_at']))
return
if proc.poll() == 0:
# Handle failure due to successful compile.
FailTest(resultfile, test,
'Unexpected successful compilation.',
stdout, stderr)
return
else:
# Check the output has the right expectations. If there are no
# expectations, then we just consider the output "matched" by default.
if len(test['expectations']) == 0:
PassTest(resultfile, test)
return
# Otherwise test against all expectations.
for regexp in test['expectations']:
if (regexp.search(stdout) is not None or
regexp.search(stderr) is not None):
PassTest(resultfile, test)
return
expectation_str = ', '.join(
["r'%s'" % regexp.pattern for regexp in test['expectations']])
FailTest(resultfile, test,
'Expectations [%s] did not match output.' % expectation_str,
stdout, stderr)
return
def CompleteAtLeastOneTest(resultfile, executing_tests):
finished_tests = []
busy_loop_timeout = time.time() + BUSY_LOOP_MAX_TIME_SEC
while len(finished_tests) == 0:
# If we don't make progress for too long, assume the code is just dead.
assert busy_loop_timeout > time.time()
read_set = []
for test in executing_tests.values():
read_set.extend([test['proc'].stderr, test['proc'].stdout])
result = select.select(read_set, [], read_set, NCTEST_TERMINATE_TIMEOUT_SEC)
now = time.time()
for test in executing_tests.values():
proc = test['proc']
if proc.poll() is not None:
test['finished_at'] = now
finished_tests.append(test)
elif test['terminate_timeout'] < now:
proc.terminate()
test['aborted_at'] = now
elif test['kill_timeout'] < now:
proc.kill()
test['aborted_at'] = now
for test in finished_tests:
del executing_tests[test['name']]
return finished_tests
def main():
if len(sys.argv) != 5:
print ('Usage: %s <parallelism> <sourcefile> <cflags> <resultfile>' %
sys.argv[0])
sys.exit(1)
# In particular, this stops gcc from using smart quotes when in english UTF-8
# locales. This makes the expectation writing much easier.
os.environ['LC_ALL'] = 'C'
parallelism = int(sys.argv[1])
sourcefile_path = sys.argv[2]
cflags = sys.argv[3]
resultfile_path = sys.argv[4]
timings = {'started': time.time()}
ValidateInput(parallelism, sourcefile_path, cflags, resultfile_path)
# Convert filename from underscores to CamelCase.
words = os.path.splitext(os.path.basename(sourcefile_path))[0].split('_')
words = [w.capitalize() for w in words]
suite_name = 'NoCompile' + ''.join(words)
test_configs = ExtractTestConfigs(sourcefile_path, suite_name)
timings['extract_done'] = time.time()
resultfile = StringIO.StringIO()
resultfile.write(RESULT_FILE_HEADER % sourcefile_path)
# Run the no-compile tests, but ensure we do not run more than |parallelism|
# tests at once.
timings['header_written'] = time.time()
executing_tests = {}
finished_tests = []
test = StartTest(
sourcefile_path,
cflags + ' -MMD -MF %s.d -MT %s' % (resultfile_path, resultfile_path),
{ 'name': 'NCTEST_SANITY',
'suite_name': suite_name,
'expectations': None,
})
executing_tests[test['name']] = test
for config in test_configs:
# CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
# acts as a semaphore. We cannot use threads + a real semaphore because
# subprocess forks, which can cause all sorts of hilarity with threads.
if len(executing_tests) >= parallelism:
finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests))
if config['name'].startswith('DISABLED_'):
PassTest(resultfile, config)
else:
test = StartTest(sourcefile_path, cflags, config)
assert test['name'] not in executing_tests
executing_tests[test['name']] = test
# If there are no more test to start, we still need to drain the running
# ones.
while len(executing_tests) > 0:
finished_tests.extend(CompleteAtLeastOneTest(resultfile, executing_tests))
timings['compile_done'] = time.time()
for test in finished_tests:
if test['name'] == 'NCTEST_SANITY':
_, stderr = test['proc'].communicate()
return_code = test['proc'].poll()
if return_code != 0:
sys.stderr.write(stderr)
continue
ProcessTestResult(resultfile, test)
timings['results_processed'] = time.time()
WriteStats(resultfile, suite_name, timings)
if return_code == 0:
with open(resultfile_path, 'w') as fd:
fd.write(resultfile.getvalue())
resultfile.close()
sys.exit(return_code)
if __name__ == '__main__':
main()
| true
| true
|
1c48c5eaecd04f40d264f244194b5aa2a851e574
| 180
|
py
|
Python
|
tests/unit/output/test_s3.py
|
gyliu513/dvc
|
d932405ee148767c5dbbbc394d6cd414270bf8f0
|
[
"Apache-2.0"
] | 2
|
2019-06-23T14:24:48.000Z
|
2019-07-08T12:22:53.000Z
|
tests/unit/output/test_s3.py
|
dnabanita7/dvc
|
638aaa254ea475947545edd046116befe82040f1
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/output/test_s3.py
|
dnabanita7/dvc
|
638aaa254ea475947545edd046116befe82040f1
|
[
"Apache-2.0"
] | 1
|
2019-09-02T00:29:40.000Z
|
2019-09-02T00:29:40.000Z
|
from dvc.output.s3 import OutputS3
from tests.unit.output.test_local import TestOutputLOCAL
class TestOutputS3(TestOutputLOCAL):
def _get_cls(self):
return OutputS3
| 20
| 56
| 0.777778
|
from dvc.output.s3 import OutputS3
from tests.unit.output.test_local import TestOutputLOCAL
class TestOutputS3(TestOutputLOCAL):
def _get_cls(self):
return OutputS3
| true
| true
|
1c48c5ee2edd81fc4846841b50a8f02464e6c22c
| 205
|
py
|
Python
|
GUITKinter/Label and Entry.py
|
zysundar/Python_programming
|
51384ecd2dfdb2cfe94b67605ca49bbd7edf49b6
|
[
"bzip2-1.0.6"
] | null | null | null |
GUITKinter/Label and Entry.py
|
zysundar/Python_programming
|
51384ecd2dfdb2cfe94b67605ca49bbd7edf49b6
|
[
"bzip2-1.0.6"
] | null | null | null |
GUITKinter/Label and Entry.py
|
zysundar/Python_programming
|
51384ecd2dfdb2cfe94b67605ca49bbd7edf49b6
|
[
"bzip2-1.0.6"
] | null | null | null |
from Tkinter import*
t=Tk()
l=Label(t,text="user name")
m=Label(t,text="password")
l.pack(side=LEFT)
m.pack(side=CENTER)
e=Entry(t,bd=5)
f=Entry(t,bd=6)
e.pack(side=RIGHT)
f.pack()
t.mainloop()
| 17.083333
| 28
| 0.653659
|
from Tkinter import*
t=Tk()
l=Label(t,text="user name")
m=Label(t,text="password")
l.pack(side=LEFT)
m.pack(side=CENTER)
e=Entry(t,bd=5)
f=Entry(t,bd=6)
e.pack(side=RIGHT)
f.pack()
t.mainloop()
| true
| true
|
1c48c659be69a411c3a89d94e86e6ed9d0376790
| 19,843
|
py
|
Python
|
packit/local_project.py
|
wickdChromosome/packit
|
ee31b8bbab579679f928a05db8125897bf2cad62
|
[
"MIT"
] | null | null | null |
packit/local_project.py
|
wickdChromosome/packit
|
ee31b8bbab579679f928a05db8125897bf2cad62
|
[
"MIT"
] | null | null | null |
packit/local_project.py
|
wickdChromosome/packit
|
ee31b8bbab579679f928a05db8125897bf2cad62
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import shutil
from contextlib import contextmanager
from pathlib import Path
from typing import Optional, Union, Iterable, Iterator
import git
from ogr import GitlabService
from ogr.abstract import GitProject, GitService
from ogr.parsing import parse_git_repo
from packit.exceptions import PackitException
from packit.utils.repo import RepositoryCache, is_git_repo, get_repo, is_a_git_ref
logger = logging.getLogger(__name__)
class LocalProject:
"""
Class representing a cloned repository
and its API to the remote git-forge (e.g. GitHub/GitLab/Pagure)
- git_repo: instance of git.Repo
- working_dir: working directory for the project
- ref: git ref (branch/tag/commit) if set, then checkouted
- git_project: instance of ogr.GitProject (remote API for project)
- git_service: instance of ogr.GitService (tokens for remote API)
- git_url: remote url (used for cloning)
- full_name: "$namespace/$repo"
- namespace: namespace of the remote project
- repo_name: name of the remote project
Local project can compute other attributes if it is possible.
"""
# setting defaults to str because `None == ""` results into TypeError is not true-true
def __init__(
self,
git_repo: git.Repo = None,
working_dir: Union[Path, str] = None,
ref: str = "",
git_project: GitProject = None,
git_service: GitService = None,
git_url: str = "",
full_name: str = "",
namespace: str = "",
repo_name: str = "",
offline: bool = False,
refresh: bool = True,
remote: str = "",
pr_id: Optional[str] = None,
cache: Optional[RepositoryCache] = None,
) -> None:
"""
:param git_repo: git.Repo
:param working_dir: Path|str (working directory for the project)
:param ref: str (git ref (branch/tag/commit) if set, then checked out)
:param git_project: ogr.GitProject (remote API for project)
:param git_service: ogr.GitService (tokens for remote API)
:param git_url: str (remote url used for cloning)
:param full_name: str ("$namespace/$repo")
:param namespace: str (namespace of the remote project)
:param repo_name: str (name of the remote project)
:param offline: bool (do not use any network action, defaults to False)
:param refresh: bool (calculate the missing attributes, defaults to True)
:param remote: name of the git remote to use
:param pr_id: ID of the pull request to fetch and check out
"""
self.working_dir_temporary = False
self.git_repo: git.Repo = git_repo
self.working_dir: Path = Path(working_dir) if working_dir else None
self._ref = ref
self.git_project = git_project
self.git_service = git_service
self.git_url = git_url
self.full_name = full_name
self.repo_name = repo_name
self.namespace = namespace
self.offline = offline
self.remote = remote
self.cache = cache
logger.debug(
"Arguments received in the init method of the LocalProject class: \n"
f"git_repo: {git_repo}\n"
f"working_dir: {working_dir}\n"
f"ref: {ref}\n"
f"git_project: {git_project}\n"
f"git_service: {git_service}\n"
f"git_url: {git_url}\n"
f"full_name: {full_name}\n"
f"namespace: {namespace}\n"
f"repo_name: {repo_name}\n"
f"offline: {offline}\n"
f"refresh {refresh}\n"
f"remote: {remote}\n"
f"pr_id: {pr_id}\n"
f"cache: {cache}\n"
)
if refresh:
self.refresh_the_arguments()
# p-s gives us both, commit hash for a PR and PR ID as well
# since we want to have 'pr123' in the release field, let's check out
# the PR itself, so if both are specified, PR ID > ref
if pr_id:
self.checkout_pr(pr_id)
elif ref:
self.checkout_ref(ref)
def __repr__(self):
return (
"LocalProject("
f"working_dir_temporary='{self.working_dir_temporary}', "
f"git_repo='{self.git_repo}', "
f"working_dir='{self.working_dir}', "
f"ref='{self.ref}', "
f"git_project='{self.git_project}', "
f"git_service='{self.git_service}', "
f"git_url='{self.git_url}', "
f"full_name='{self.full_name}', "
f"repo_name='{self.repo_name}', "
f"namespace='{self.namespace}', "
f"offline='{self.offline}', "
f"remote='{self.remote}', "
f"commit_hexsha='{self.commit_hexsha}')"
)
@property
def ref(self) -> Optional[str]:
"""
Name of the HEAD if the HEAD is not detached,
else commit hash.
"""
if self.git_repo:
return self._get_ref_from_git_repo()
return None
@property
def commit_hexsha(self) -> str:
"""
Get the short commit hash for the current commit.
:return: first 8 characters of the current commit
"""
if self.git_repo.head.is_detached:
return self.git_repo.head.commit.hexsha[:8]
else:
return self.git_repo.active_branch.commit.hexsha[:8]
def clean(self):
if self.working_dir_temporary:
logger.debug(f"Cleaning: {self.working_dir}")
shutil.rmtree(self.working_dir)
self.working_dir_temporary = False
def refresh_the_arguments(self):
change = True
while change:
# we are trying to get new information while it is possible
# new iteration is done only if there was a change in the last iteration
change = (
self._parse_repo_name_full_name_and_namespace()
or self._parse_git_repo_from_working_dir()
or self._parse_git_project_from_repo_namespace_and_git_service()
or self._parse_git_service_from_git_project()
or self._parse_ref_from_git_repo()
or self._parse_working_dir_from_git_repo()
or self._parse_git_repo_from_git_url()
or self._parse_git_url_from_git_project()
or self._parse_repo_name_from_git_project()
or self._parse_namespace_from_git_project()
or self._parse_git_url_from_git_repo()
or self._parse_namespace_from_git_url()
)
@contextmanager
def git_checkout_block(self, ref: str = None):
"""Allows temporarily checkout another git-ref."""
current_head = self._get_ref_from_git_repo()
if ref:
logger.debug(
f"Leaving old ref {current_head!r} and checkout new ref {ref!r}"
)
if ref not in self.git_repo.refs:
if not is_a_git_ref(self.git_repo, ref):
raise PackitException(
f"Git ref {ref!r} not found, cannot checkout."
)
ref = self.git_repo.commit(ref).hexsha
self.git_repo.git.checkout(ref)
yield
if ref:
logger.debug(
f"Leaving new ref {ref!r} and checkout old ref {current_head!r}"
)
self.git_repo.git.checkout(current_head)
def _parse_repo_name_full_name_and_namespace(self):
change = False
if self.repo_name and self.namespace and not self.full_name:
self.full_name = f"{self.namespace}/{self.repo_name}"
change = True
if self.full_name and not self.namespace:
self.namespace = self.full_name.split("/")[0]
change = True
if self.full_name and not self.repo_name:
self.repo_name = self.full_name.split("/")[1]
change = True
if change:
logger.debug(f"Parsed full repo name '{self.namespace}/{self.repo_name}'.")
return change
def _parse_git_repo_from_working_dir(self) -> bool:
"""
Get the repo from the self.working_dir (clone self.git_url if it is not a git repo)
"""
if self.working_dir and not self.git_repo:
logger.debug(
"`working_dir` is set and `git_repo` is not: let's discover..."
)
if is_git_repo(directory=self.working_dir):
logger.debug("It's a git repo!")
self.git_repo = git.Repo(path=self.working_dir)
return True
elif self.git_url and not self.offline:
self.git_repo = self._get_repo(
url=self.git_url, directory=self.working_dir
)
logger.debug(
f"We just cloned git repo {self.git_url} to {self.working_dir}."
)
return True
return False
def _parse_git_project_from_repo_namespace_and_git_service(
self,
) -> bool:
if (
self.repo_name
and self.namespace
and self.git_service
and not self.git_project
and not self.offline
):
self.git_project = self.git_service.get_project(
repo=self.repo_name, namespace=self.namespace
)
logger.debug(f"Parsed project '{self.namespace}/{self.repo_name}'.")
return True
return False
def _parse_git_service_from_git_project(self):
if not (self.git_project is None or self.git_service or self.offline):
self.git_service = self.git_project.service
logger.debug(
f"Parsed service {self.git_service} from the project {self.git_project}."
)
return True
return False
def _parse_ref_from_git_repo(self):
if self.git_repo and not self._ref:
self._ref = self._get_ref_from_git_repo()
logger.debug(f"Parsed ref {self._ref!r} from the repo {self.git_repo}.")
return bool(self._ref)
return False
def _parse_working_dir_from_git_repo(self):
if self.git_repo and not self.working_dir:
self.working_dir = Path(self.git_repo.working_dir)
logger.debug(
f"Parsed working directory {self.working_dir} from the repo {self.git_repo}."
)
return True
return False
def _parse_git_repo_from_git_url(self):
if (
self.git_url
and not self.working_dir
and not self.git_repo
and not self.offline
):
self.git_repo = self._get_repo(url=self.git_url)
self.working_dir_temporary = True
logger.debug(f"Parsed repo {self.git_repo} from url {self.git_url!r}.")
return True
return False
def _parse_git_url_from_git_project(self):
if self.git_project and not self.git_url and not self.offline:
self.git_url = self.git_project.get_git_urls()["git"]
logger.debug(
f"Parsed remote url {self.git_url!r} from the project {self.git_project}."
)
return True
return False
def _parse_repo_name_from_git_project(self):
if self.git_project and not self.repo_name:
self.repo_name = self.git_project.repo
if not self.repo_name:
raise PackitException(
"Repo name should have been set but isn't, this is bug!"
)
logger.debug(
f"Parsed repo name {self.repo_name!r} from the git project {self.git_project}."
)
return True
return False
def _parse_namespace_from_git_project(self):
if self.git_project and not self.namespace:
self.namespace = self.git_project.namespace
logger.debug(
f"Parsed namespace {self.namespace!r} from the project {self.git_project}."
)
return True
return False
def _parse_git_url_from_git_repo(self):
if not self.git_repo or self.git_url:
return False
if self.remote:
self.git_url = next(self.git_repo.remote(self.remote).urls)
elif self.git_repo.remotes:
for remote in self.git_repo.remotes:
if remote.name == "origin":
# origin as a default
self.git_url = remote.url
break
else:
# or use first one
self.git_url = next(self.git_repo.remotes[0].urls)
else:
# Repo has no remotes
return False
logger.debug(
f"Parsed remote url {self.git_url!r} from the repo {self.git_repo}."
)
return True
def _parse_namespace_from_git_url(self):
if self.git_url and not (self.namespace and self.repo_name):
parsed_repo_url = parse_git_repo(potential_url=self.git_url)
if (
parsed_repo_url.namespace == self.namespace
and parsed_repo_url.repo == self.repo_name
):
return False
self.namespace, self.repo_name = (
parsed_repo_url.namespace,
parsed_repo_url.repo,
)
logger.debug(
f"Parsed namespace and repo name ({self.namespace}, {self.repo_name}) "
f"from url {self.git_url!r}."
)
return True
return False
def _get_ref_from_git_repo(self) -> str:
if self.git_repo.head.is_detached:
return self.git_repo.head.commit.hexsha
else:
return self.git_repo.active_branch.name
def _get_repo(self, url, directory=None):
if self.cache:
return self.cache.get_repo(url, directory=directory)
return get_repo(url=url, directory=directory)
def checkout_ref(self, ref: str):
"""Check out selected ref in the git repo"""
logger.info(f"Checking out ref {ref!r}.")
self.git_repo.git.checkout(ref)
logger.debug(f"Current commit is '{self.git_repo.commit()}'")
def create_branch(
self, branch_name: str, base: str = "HEAD", setup_tracking: bool = False
) -> git.Head:
"""
Create a new git branch in git
:param branch_name: name of the branch to check out and fetch
:param base: we base our new branch on this one
:param setup_tracking: set up remote tracking
(exc will be raised if the branch is not in the remote)
:return the branch which was just created
"""
# it's not an error if the branch already exists
if branch_name in self.git_repo.branches:
logger.debug(
f"It seems that branch {branch_name!r} already exists, checking it out."
)
head = self.git_repo.branches[branch_name]
else:
head = self.git_repo.create_head(branch_name, commit=base)
if setup_tracking:
origin = self.git_repo.remote("origin")
if branch_name in origin.refs:
remote_ref = origin.refs[branch_name]
else:
raise PackitException(
f"Remote origin doesn't have ref {branch_name!r}."
)
# this is important to fedpkg: build can't find the tracking branch otherwise
head.set_tracking_branch(remote_ref)
return head
def checkout_pr(self, pr_id: Union[str, int]):
"""
Fetch selected PR and check it out.
"""
logger.info(f"Checking out PR {pr_id}.")
is_gitlab = isinstance(self.git_service, GitlabService)
remote_ref = "+refs/{}/{}/head".format(
"merge-requests" if is_gitlab else "pull", pr_id
)
remote_name = self.remote or "origin"
local_ref = f"refs/remotes/{remote_name}/pr/{pr_id}"
local_branch = f"pr/{pr_id}"
self.git_repo.remotes[remote_name].fetch(f"{remote_ref}:{local_ref}")
self.git_repo.create_head(local_branch, f"{remote_name}/{local_branch}")
self.git_repo.branches[local_branch].checkout()
logger.info(f"Checked out commit {self.git_repo.head.commit}")
def checkout_release(self, tag: str) -> None:
logger.info(f"Checking out upstream version {tag}.")
try:
self.git_repo.git.checkout(tag)
except Exception as ex:
raise PackitException(f"Cannot checkout release tag: {ex!r}.")
def push(
self, refspec: str, remote_name: str = "origin", force: bool = False
) -> Iterable[git.PushInfo]:
"""
push changes to a remote using provided refspec
:param refspec: e.g. "main", "HEAD:f30"
:param remote_name: name of the remote where we push
:param force: force push: yes or no?
:return: a list of git.remote.PushInfo objects - have fun
"""
return self.git_repo.remote(name=remote_name).push(refspec=refspec, force=force)
def stage(self, path: str = ".", force: bool = True):
"""
stage provided path from working tree to index
force: bypass gitignore
"""
self.git_repo.git.add(path, force=force)
def commit(
self,
message: str,
body: Optional[str] = None,
allow_empty: bool = True,
amend: bool = False,
):
"""Commit staged changes"""
other_message_kwargs = {"message": body} if body else {}
# some of the commits may be empty and it's not an error,
# e.g. extra source files
self.git_repo.git.commit(
allow_empty=allow_empty, m=message, amend=amend, **other_message_kwargs
)
def get_commits(self, ref: str = "HEAD") -> Iterator[git.Commit]:
return self.git_repo.iter_commits(ref)
def fetch(self, remote: str, refspec: Optional[str] = None):
"""
fetch refs from a remote to this repo
@param remote: str or path of the repo we fetch from
@param refspec: see man git-fetch
"""
if refspec:
self.git_repo.git.fetch(remote, refspec)
else:
self.git_repo.git.fetch(remote, "--tags")
def rebase(self, ref: str):
self.git_repo.git.rebase(ref)
def reset(self, ref: str):
"""git reset --hard $ref"""
self.git_repo.head.reset(ref, index=True, working_tree=True)
def __del__(self):
self.clean()
| 37.510397
| 95
| 0.600615
|
import logging
import shutil
from contextlib import contextmanager
from pathlib import Path
from typing import Optional, Union, Iterable, Iterator
import git
from ogr import GitlabService
from ogr.abstract import GitProject, GitService
from ogr.parsing import parse_git_repo
from packit.exceptions import PackitException
from packit.utils.repo import RepositoryCache, is_git_repo, get_repo, is_a_git_ref
logger = logging.getLogger(__name__)
class LocalProject:
def __init__(
self,
git_repo: git.Repo = None,
working_dir: Union[Path, str] = None,
ref: str = "",
git_project: GitProject = None,
git_service: GitService = None,
git_url: str = "",
full_name: str = "",
namespace: str = "",
repo_name: str = "",
offline: bool = False,
refresh: bool = True,
remote: str = "",
pr_id: Optional[str] = None,
cache: Optional[RepositoryCache] = None,
) -> None:
self.working_dir_temporary = False
self.git_repo: git.Repo = git_repo
self.working_dir: Path = Path(working_dir) if working_dir else None
self._ref = ref
self.git_project = git_project
self.git_service = git_service
self.git_url = git_url
self.full_name = full_name
self.repo_name = repo_name
self.namespace = namespace
self.offline = offline
self.remote = remote
self.cache = cache
logger.debug(
"Arguments received in the init method of the LocalProject class: \n"
f"git_repo: {git_repo}\n"
f"working_dir: {working_dir}\n"
f"ref: {ref}\n"
f"git_project: {git_project}\n"
f"git_service: {git_service}\n"
f"git_url: {git_url}\n"
f"full_name: {full_name}\n"
f"namespace: {namespace}\n"
f"repo_name: {repo_name}\n"
f"offline: {offline}\n"
f"refresh {refresh}\n"
f"remote: {remote}\n"
f"pr_id: {pr_id}\n"
f"cache: {cache}\n"
)
if refresh:
self.refresh_the_arguments()
# the PR itself, so if both are specified, PR ID > ref
if pr_id:
self.checkout_pr(pr_id)
elif ref:
self.checkout_ref(ref)
def __repr__(self):
return (
"LocalProject("
f"working_dir_temporary='{self.working_dir_temporary}', "
f"git_repo='{self.git_repo}', "
f"working_dir='{self.working_dir}', "
f"ref='{self.ref}', "
f"git_project='{self.git_project}', "
f"git_service='{self.git_service}', "
f"git_url='{self.git_url}', "
f"full_name='{self.full_name}', "
f"repo_name='{self.repo_name}', "
f"namespace='{self.namespace}', "
f"offline='{self.offline}', "
f"remote='{self.remote}', "
f"commit_hexsha='{self.commit_hexsha}')"
)
@property
def ref(self) -> Optional[str]:
if self.git_repo:
return self._get_ref_from_git_repo()
return None
@property
def commit_hexsha(self) -> str:
if self.git_repo.head.is_detached:
return self.git_repo.head.commit.hexsha[:8]
else:
return self.git_repo.active_branch.commit.hexsha[:8]
def clean(self):
if self.working_dir_temporary:
logger.debug(f"Cleaning: {self.working_dir}")
shutil.rmtree(self.working_dir)
self.working_dir_temporary = False
def refresh_the_arguments(self):
change = True
while change:
# we are trying to get new information while it is possible
# new iteration is done only if there was a change in the last iteration
change = (
self._parse_repo_name_full_name_and_namespace()
or self._parse_git_repo_from_working_dir()
or self._parse_git_project_from_repo_namespace_and_git_service()
or self._parse_git_service_from_git_project()
or self._parse_ref_from_git_repo()
or self._parse_working_dir_from_git_repo()
or self._parse_git_repo_from_git_url()
or self._parse_git_url_from_git_project()
or self._parse_repo_name_from_git_project()
or self._parse_namespace_from_git_project()
or self._parse_git_url_from_git_repo()
or self._parse_namespace_from_git_url()
)
@contextmanager
def git_checkout_block(self, ref: str = None):
current_head = self._get_ref_from_git_repo()
if ref:
logger.debug(
f"Leaving old ref {current_head!r} and checkout new ref {ref!r}"
)
if ref not in self.git_repo.refs:
if not is_a_git_ref(self.git_repo, ref):
raise PackitException(
f"Git ref {ref!r} not found, cannot checkout."
)
ref = self.git_repo.commit(ref).hexsha
self.git_repo.git.checkout(ref)
yield
if ref:
logger.debug(
f"Leaving new ref {ref!r} and checkout old ref {current_head!r}"
)
self.git_repo.git.checkout(current_head)
def _parse_repo_name_full_name_and_namespace(self):
change = False
if self.repo_name and self.namespace and not self.full_name:
self.full_name = f"{self.namespace}/{self.repo_name}"
change = True
if self.full_name and not self.namespace:
self.namespace = self.full_name.split("/")[0]
change = True
if self.full_name and not self.repo_name:
self.repo_name = self.full_name.split("/")[1]
change = True
if change:
logger.debug(f"Parsed full repo name '{self.namespace}/{self.repo_name}'.")
return change
def _parse_git_repo_from_working_dir(self) -> bool:
if self.working_dir and not self.git_repo:
logger.debug(
"`working_dir` is set and `git_repo` is not: let's discover..."
)
if is_git_repo(directory=self.working_dir):
logger.debug("It's a git repo!")
self.git_repo = git.Repo(path=self.working_dir)
return True
elif self.git_url and not self.offline:
self.git_repo = self._get_repo(
url=self.git_url, directory=self.working_dir
)
logger.debug(
f"We just cloned git repo {self.git_url} to {self.working_dir}."
)
return True
return False
def _parse_git_project_from_repo_namespace_and_git_service(
self,
) -> bool:
if (
self.repo_name
and self.namespace
and self.git_service
and not self.git_project
and not self.offline
):
self.git_project = self.git_service.get_project(
repo=self.repo_name, namespace=self.namespace
)
logger.debug(f"Parsed project '{self.namespace}/{self.repo_name}'.")
return True
return False
def _parse_git_service_from_git_project(self):
if not (self.git_project is None or self.git_service or self.offline):
self.git_service = self.git_project.service
logger.debug(
f"Parsed service {self.git_service} from the project {self.git_project}."
)
return True
return False
def _parse_ref_from_git_repo(self):
if self.git_repo and not self._ref:
self._ref = self._get_ref_from_git_repo()
logger.debug(f"Parsed ref {self._ref!r} from the repo {self.git_repo}.")
return bool(self._ref)
return False
def _parse_working_dir_from_git_repo(self):
if self.git_repo and not self.working_dir:
self.working_dir = Path(self.git_repo.working_dir)
logger.debug(
f"Parsed working directory {self.working_dir} from the repo {self.git_repo}."
)
return True
return False
def _parse_git_repo_from_git_url(self):
if (
self.git_url
and not self.working_dir
and not self.git_repo
and not self.offline
):
self.git_repo = self._get_repo(url=self.git_url)
self.working_dir_temporary = True
logger.debug(f"Parsed repo {self.git_repo} from url {self.git_url!r}.")
return True
return False
def _parse_git_url_from_git_project(self):
if self.git_project and not self.git_url and not self.offline:
self.git_url = self.git_project.get_git_urls()["git"]
logger.debug(
f"Parsed remote url {self.git_url!r} from the project {self.git_project}."
)
return True
return False
def _parse_repo_name_from_git_project(self):
if self.git_project and not self.repo_name:
self.repo_name = self.git_project.repo
if not self.repo_name:
raise PackitException(
"Repo name should have been set but isn't, this is bug!"
)
logger.debug(
f"Parsed repo name {self.repo_name!r} from the git project {self.git_project}."
)
return True
return False
def _parse_namespace_from_git_project(self):
if self.git_project and not self.namespace:
self.namespace = self.git_project.namespace
logger.debug(
f"Parsed namespace {self.namespace!r} from the project {self.git_project}."
)
return True
return False
def _parse_git_url_from_git_repo(self):
if not self.git_repo or self.git_url:
return False
if self.remote:
self.git_url = next(self.git_repo.remote(self.remote).urls)
elif self.git_repo.remotes:
for remote in self.git_repo.remotes:
if remote.name == "origin":
self.git_url = remote.url
break
else:
self.git_url = next(self.git_repo.remotes[0].urls)
else:
return False
logger.debug(
f"Parsed remote url {self.git_url!r} from the repo {self.git_repo}."
)
return True
def _parse_namespace_from_git_url(self):
if self.git_url and not (self.namespace and self.repo_name):
parsed_repo_url = parse_git_repo(potential_url=self.git_url)
if (
parsed_repo_url.namespace == self.namespace
and parsed_repo_url.repo == self.repo_name
):
return False
self.namespace, self.repo_name = (
parsed_repo_url.namespace,
parsed_repo_url.repo,
)
logger.debug(
f"Parsed namespace and repo name ({self.namespace}, {self.repo_name}) "
f"from url {self.git_url!r}."
)
return True
return False
def _get_ref_from_git_repo(self) -> str:
if self.git_repo.head.is_detached:
return self.git_repo.head.commit.hexsha
else:
return self.git_repo.active_branch.name
def _get_repo(self, url, directory=None):
if self.cache:
return self.cache.get_repo(url, directory=directory)
return get_repo(url=url, directory=directory)
def checkout_ref(self, ref: str):
logger.info(f"Checking out ref {ref!r}.")
self.git_repo.git.checkout(ref)
logger.debug(f"Current commit is '{self.git_repo.commit()}'")
def create_branch(
self, branch_name: str, base: str = "HEAD", setup_tracking: bool = False
) -> git.Head:
if branch_name in self.git_repo.branches:
logger.debug(
f"It seems that branch {branch_name!r} already exists, checking it out."
)
head = self.git_repo.branches[branch_name]
else:
head = self.git_repo.create_head(branch_name, commit=base)
if setup_tracking:
origin = self.git_repo.remote("origin")
if branch_name in origin.refs:
remote_ref = origin.refs[branch_name]
else:
raise PackitException(
f"Remote origin doesn't have ref {branch_name!r}."
)
head.set_tracking_branch(remote_ref)
return head
def checkout_pr(self, pr_id: Union[str, int]):
logger.info(f"Checking out PR {pr_id}.")
is_gitlab = isinstance(self.git_service, GitlabService)
remote_ref = "+refs/{}/{}/head".format(
"merge-requests" if is_gitlab else "pull", pr_id
)
remote_name = self.remote or "origin"
local_ref = f"refs/remotes/{remote_name}/pr/{pr_id}"
local_branch = f"pr/{pr_id}"
self.git_repo.remotes[remote_name].fetch(f"{remote_ref}:{local_ref}")
self.git_repo.create_head(local_branch, f"{remote_name}/{local_branch}")
self.git_repo.branches[local_branch].checkout()
logger.info(f"Checked out commit {self.git_repo.head.commit}")
def checkout_release(self, tag: str) -> None:
logger.info(f"Checking out upstream version {tag}.")
try:
self.git_repo.git.checkout(tag)
except Exception as ex:
raise PackitException(f"Cannot checkout release tag: {ex!r}.")
def push(
self, refspec: str, remote_name: str = "origin", force: bool = False
) -> Iterable[git.PushInfo]:
return self.git_repo.remote(name=remote_name).push(refspec=refspec, force=force)
def stage(self, path: str = ".", force: bool = True):
self.git_repo.git.add(path, force=force)
def commit(
self,
message: str,
body: Optional[str] = None,
allow_empty: bool = True,
amend: bool = False,
):
other_message_kwargs = {"message": body} if body else {}
# some of the commits may be empty and it's not an error,
self.git_repo.git.commit(
allow_empty=allow_empty, m=message, amend=amend, **other_message_kwargs
)
def get_commits(self, ref: str = "HEAD") -> Iterator[git.Commit]:
return self.git_repo.iter_commits(ref)
def fetch(self, remote: str, refspec: Optional[str] = None):
if refspec:
self.git_repo.git.fetch(remote, refspec)
else:
self.git_repo.git.fetch(remote, "--tags")
def rebase(self, ref: str):
self.git_repo.git.rebase(ref)
def reset(self, ref: str):
self.git_repo.head.reset(ref, index=True, working_tree=True)
def __del__(self):
self.clean()
| true
| true
|
1c48c6886726c1cb9b76872b441d3af09b2da743
| 682
|
py
|
Python
|
string/substring_search/brute_force.py
|
ImadDabbura/data-structures-and-algorithms
|
d8eaf545ddcd443a1b36483337c778587bf52366
|
[
"Apache-2.0"
] | null | null | null |
string/substring_search/brute_force.py
|
ImadDabbura/data-structures-and-algorithms
|
d8eaf545ddcd443a1b36483337c778587bf52366
|
[
"Apache-2.0"
] | null | null | null |
string/substring_search/brute_force.py
|
ImadDabbura/data-structures-and-algorithms
|
d8eaf545ddcd443a1b36483337c778587bf52366
|
[
"Apache-2.0"
] | null | null | null |
"""Implementation of Brute-Force algorithm of substring search."""
def find_brute_force(T, P):
"""Return the index of first occurance of P; otherwise, returns -1."""
n, m = len(T), len(P)
if m == 0:
return 0
for i in range(n - m + 1):
j = 0
while j < m and T[i + j] == P[j]:
j += 1
if j == m:
return i
return -1
def find_brute_force_v1(T, P):
n, m = len(T), len(P)
if m == 0:
return 0
i = j = 0
while i < n and j < m:
if T[i] == P[j]:
j += 1
else:
i -= j
j = 0
i += 1
if j == m:
return i - m
return -1
| 20.666667
| 74
| 0.428152
|
def find_brute_force(T, P):
n, m = len(T), len(P)
if m == 0:
return 0
for i in range(n - m + 1):
j = 0
while j < m and T[i + j] == P[j]:
j += 1
if j == m:
return i
return -1
def find_brute_force_v1(T, P):
n, m = len(T), len(P)
if m == 0:
return 0
i = j = 0
while i < n and j < m:
if T[i] == P[j]:
j += 1
else:
i -= j
j = 0
i += 1
if j == m:
return i - m
return -1
| true
| true
|
1c48c79e0c90a3d612b502860b62b83d4205477d
| 11,545
|
py
|
Python
|
drl_negotiation/core.py
|
YueNing/tn_source_code
|
515713c9349a2444021fdc9b02fd483f5ffd3e56
|
[
"MIT"
] | null | null | null |
drl_negotiation/core.py
|
YueNing/tn_source_code
|
515713c9349a2444021fdc9b02fd483f5ffd3e56
|
[
"MIT"
] | null | null | null |
drl_negotiation/core.py
|
YueNing/tn_source_code
|
515713c9349a2444021fdc9b02fd483f5ffd3e56
|
[
"MIT"
] | null | null | null |
'''
Core class, functions
Author: naodongbanana
E-Mail: n1085633848@outlook.com
'''
import os, sys
import numpy as np
from scml.scml2020 import SCML2020World, SCML2020Agent, is_system_agent
from typing import Optional
from drl_negotiation.hyperparameters import *
import yaml
import copy
import pickle
class AgentState:
'''
Agent state
'''
def __init__(self):
# physical position for rendering
self.p_pos = (0, 0)
# others state
self.o_negotiation_step = 0
# financial report
self.f: np.array = np.zeros(3)
# self.f_init = 0
# self.f_begin = 0
# self.f_end = 0
# current step
# self.o_current_step = 0
# management state, e.g. issues range
# self.m = None
# communication utterance
self.c = None
class NegotiationRequestAction:
DEFAULT_REQUEST = 0.0
ACCEPT_REQUEST = 1.0
REJECT_REQUEST = -1.0
class Action:
'''
agent's action
m: management action
e.g. discrete action --- accept or reject negotiation request
continuous action --- range of issues for negotiating,
(min, max, min, max, min, max)
c: communication action
e.g. send the info into public channel, secured, needs, negotiations, requests,
or info of competitors predicted by agent
'''
def __init__(self):
# agent management action, used after training, in test periode
self.s = None
self.s_vel = None
# seller, used in training
self.m = None
self.m_vel = 10
# buyer, used in training
self.b = None
self.b_vel = 10
# agent communication action, communication channel
self.c = None
class MySCML2020Agent(SCML2020Agent):
'''
My scml 2020 agent, subclass of scml2020agent,
action_callback: action decided by the callback
hook:
init
'''
Owner = 'My'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# agents are manageable by default
self.manageable = MANAGEABLE
# cannot send communication signals
self.silent = SLIENT
# cannot observe the world
self.blind = BLIND
# management noise amount
self.m_nois = None
# communication noise amount
self.c_nois = None
# manageable range
self.m_range = 1.0
self.b_range = 1.0
# state
self.state = AgentState()
# action
self.action = Action()
# heuristic behavior to execute
self.action_callback = None
# agents are interactive
self.interative = False
# agents are adversary
self.adversary = False
def init(self):
super(MySCML2020Agent, self).init()
@property
def running_negotiations(self) -> [int, int]:
"""
Returns:
number of runniing negotiations
"""
return self._count(super(MySCML2020Agent, self).running_negotiations)
@property
def negotiation_requests(self) -> [int, int]:
"""
Returns:
number of standing negotiation requests, sell, buy
"""
return self._count(super(MySCML2020Agent, self).negotiation_requests)
def _count(self, negotiations):
sell = 0
buy = 0
for n in negotiations:
if n.annotation["seller"] == self.id:
sell +=1
elif n.annotation["buyer"] == self.id:
buy +=1
return sell, buy
def _get_obs(self, seller=True, scenario="scml"):
# local observation
# TODO: different observation of buyer and seller, will be implemented here
if scenario == "scml":
o_m = self.awi.profile.costs
o_m = o_m[:, self.awi.profile.processes]
# agent information, agent's
o_a = np.array([self._horizon])
# catalog prices of products
o_u_c = self.awi.catalog_prices
# TODO: excepted value after predict
o_u_e = np.array([self.expected_inputs, self.expected_outputs, self.input_cost, self.output_price])
# TODO: trading strategy, needed and secured
o_u_t = np.array([self.outputs_needed, self.outputs_secured, self.inputs_needed, self.inputs_secured])
# running negotiation and negotiation request of agent
o_q_n = np.array([
self.running_negotiations,
self.negotiation_requests,
])
o_t_c = np.array([self.awi.current_step / self.awi.n_steps])
# 2. Economic gap
economic_gaps = []
economic_gaps.append(self.state.f[2] - self.state.f[1])
economic_gaps = np.array(economic_gaps)
# return np.concatenate(economic_gaps + o_m.flatten() + o_a + o_u_c + o_u_e + o_u_t + o_q_n.flatten() + o_t_c)
return np.concatenate((economic_gaps.flatten(), o_m.flatten(), o_a, o_u_c, o_q_n.flatten(), o_t_c))
def init(self):
super(MySCML2020Agent, self).init()
if RUNNING_IN_SCML2020World:
if not self.train:
self._setup_model()
class TrainWorld(SCML2020World):
"""
Multi-Agent, SCML world, used for training
"""
def __init__(self, configuration=None, *args, **kwargs):
# maddpg drived agents, heuristic agents, script drived agents, interative agents
# self.agents = []
# SELLER, BUYER
self.system_entities = []
# communication channel dimensionality
self.dim_c = 2
# negotiation management dimensionality
self.dim_m = DIM_M # seller
self.dim_b = DIM_B # buyer
# simulation timestep
self.dt = 0.1
# world done
self.__done = False
# set up the scml2020world
if configuration is None:
configuration = SCML2020World.generate(
*args,
**kwargs
)
self.configuration = copy.deepcopy(configuration)
super().__init__(**self.configuration)
# set action_callback for agent which hasnot it
for agent in self.agents.values():
if not hasattr(agent, 'action_callback'):
if is_system_agent(agent.id):
agent.action_callback = 'system'
self.system_entities.append(agent)
else:
agent.action_callback = 'heuristic'
if not hasattr(agent, 'interactive'):
agent.interactive = False
if not hasattr(agent, 'state'):
agent.state = AgentState()
@property
def entities(self):
'''
agents + system_entities
'''
return [agent for agent in self.agents.values()]
@property
def policy_agents(self):
'''
e.g. maddpg drived agents,
'''
return [agent for agent in self.entities if agent.action_callback is None]
@property
def heuristic_agents(self):
'''
e.g. heuristic agents, BuyCheapSellExpensiveAgent
'''
return [agent for agent in self.entities if agent.action_callback=='heuristic']
@property
def interactive_agents(self):
'''
e.g. controlled by user
'''
return [agent for agent in self.entities if agent.interactive]
@property
def script_agents(self):
'''
My script-drived agents, with action_callback
'''
return [agent for agent in self.entities if callable(agent.action_callback)]
def step(self):
# actions of policy agents are preset in environement.
# set actions for heuristic_agents
# controlled by scripts
# agents have action_callback
for agent in self.script_agents:
agent.action = agent.action_callback(agent, self)
# simulation is already ends
if self.time >= self.time_limit:
self.__done = True
return
if not super().step():
self.__done = True
return
# update agents' state
# policy agents
for agent in self.policy_agents:
self.update_agent_state(agent)
@property
def world_done(self):
'''
running info of world
'''
return self.__done
def update_agent_state(self, agent: Optional[MySCML2020Agent]):
# initial update the state of
if agent.awi.current_step == 0:
f_init = [_.initial_balance for _ in self.factories if _.agent_id == agent.id][0]
f_begin = f_init
f_end = f_begin
agent.state.f = np.array([f_init, f_begin, f_end])
else:
# set financial status
if agent.blind:
# agent.state.m = np.zeros(self.dim_m)
agent.state.f = np.zeros(3)
else:
# update agent state, get the management state
# qvalues = (1, agent.target_quantity(agent.state.o_step, agent.state.o_is_sell))
# tvalues = agent._trange(agent.state.o_negotiation_step, agent.state.o_step)
# uvalues = agent._urange(agent.state.o_step, agent.state.o_is_sell, tvalues)
# agent.state.m = [qvalues, tvalues, uvalues]
f_end = [_.current_balance for _ in self.factories if _.agent_id == agent.id][0]
agent.state.f[2] = f_end
#TODO: interactive test
agent.state.o_negotiation_step = agent.awi.current_step
if agent.state.o_negotiation_step == agent.awi.current_step:
# after calculate the reward, then update the f_begin
pass
else:
f_begin = f_end
agent.state.f[1] = f_begin
# set communication status
if agent.silent:
agent.state.c = np.zeros(self.dim_c)
else:
noise = np.random.randn(*agent.action.c.shape) * agent.c_nois if agent.c_nois else 0.0
agent.state.c = agent.action.c + noise
def save_config(self, file_name: str):
dump_data = {
"agent_types": [_._type_name() for _ in self.configuration['agent_types']],
'agent_params': self.configuration['agent_params'],
"n_steps": self.n_steps
}
try:
with open(file_name+'.yaml', "w") as file:
yaml.safe_dump(dump_data, file)
except FileNotFoundError as e:
logging.info(f"not find file {file_name}")
logging.error(str(e))
os.makedirs('/'.join(file_name.split('/')[0:-1]))
try:
with open(file_name + '.yaml', "w") as file:
yaml.safe_dump(dump_data, file)
except FileNotFoundError as e:
logging.info(f"not find file {file_name}!")
logging.error(str(e))
except Exception as e:
logging.info(f"other errors when open file {file_name}!")
logging.error(str(e))
sys.exit(1)
with open(file_name+'.pkl', 'wb') as file:
pickle.dump(dump_data, file)
# super().save_config(file_name=file_name)
| 32.158774
| 122
| 0.569684
|
import os, sys
import numpy as np
from scml.scml2020 import SCML2020World, SCML2020Agent, is_system_agent
from typing import Optional
from drl_negotiation.hyperparameters import *
import yaml
import copy
import pickle
class AgentState:
def __init__(self):
self.p_pos = (0, 0)
self.o_negotiation_step = 0
self.f: np.array = np.zeros(3)
self.c = None
class NegotiationRequestAction:
DEFAULT_REQUEST = 0.0
ACCEPT_REQUEST = 1.0
REJECT_REQUEST = -1.0
class Action:
def __init__(self):
self.s = None
self.s_vel = None
self.m = None
self.m_vel = 10
self.b = None
self.b_vel = 10
self.c = None
class MySCML2020Agent(SCML2020Agent):
Owner = 'My'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.manageable = MANAGEABLE
self.silent = SLIENT
self.blind = BLIND
self.m_nois = None
self.c_nois = None
self.m_range = 1.0
self.b_range = 1.0
self.state = AgentState()
self.action = Action()
self.action_callback = None
self.interative = False
self.adversary = False
def init(self):
super(MySCML2020Agent, self).init()
@property
def running_negotiations(self) -> [int, int]:
return self._count(super(MySCML2020Agent, self).running_negotiations)
@property
def negotiation_requests(self) -> [int, int]:
return self._count(super(MySCML2020Agent, self).negotiation_requests)
def _count(self, negotiations):
sell = 0
buy = 0
for n in negotiations:
if n.annotation["seller"] == self.id:
sell +=1
elif n.annotation["buyer"] == self.id:
buy +=1
return sell, buy
def _get_obs(self, seller=True, scenario="scml"):
if scenario == "scml":
o_m = self.awi.profile.costs
o_m = o_m[:, self.awi.profile.processes]
o_a = np.array([self._horizon])
# catalog prices of products
o_u_c = self.awi.catalog_prices
# TODO: excepted value after predict
o_u_e = np.array([self.expected_inputs, self.expected_outputs, self.input_cost, self.output_price])
# TODO: trading strategy, needed and secured
o_u_t = np.array([self.outputs_needed, self.outputs_secured, self.inputs_needed, self.inputs_secured])
# running negotiation and negotiation request of agent
o_q_n = np.array([
self.running_negotiations,
self.negotiation_requests,
])
o_t_c = np.array([self.awi.current_step / self.awi.n_steps])
# 2. Economic gap
economic_gaps = []
economic_gaps.append(self.state.f[2] - self.state.f[1])
economic_gaps = np.array(economic_gaps)
# return np.concatenate(economic_gaps + o_m.flatten() + o_a + o_u_c + o_u_e + o_u_t + o_q_n.flatten() + o_t_c)
return np.concatenate((economic_gaps.flatten(), o_m.flatten(), o_a, o_u_c, o_q_n.flatten(), o_t_c))
def init(self):
super(MySCML2020Agent, self).init()
if RUNNING_IN_SCML2020World:
if not self.train:
self._setup_model()
class TrainWorld(SCML2020World):
def __init__(self, configuration=None, *args, **kwargs):
# maddpg drived agents, heuristic agents, script drived agents, interative agents
# self.agents = []
# SELLER, BUYER
self.system_entities = []
# communication channel dimensionality
self.dim_c = 2
# negotiation management dimensionality
self.dim_m = DIM_M # seller
self.dim_b = DIM_B # buyer
# simulation timestep
self.dt = 0.1
# world done
self.__done = False
# set up the scml2020world
if configuration is None:
configuration = SCML2020World.generate(
*args,
**kwargs
)
self.configuration = copy.deepcopy(configuration)
super().__init__(**self.configuration)
# set action_callback for agent which hasnot it
for agent in self.agents.values():
if not hasattr(agent, 'action_callback'):
if is_system_agent(agent.id):
agent.action_callback = 'system'
self.system_entities.append(agent)
else:
agent.action_callback = 'heuristic'
if not hasattr(agent, 'interactive'):
agent.interactive = False
if not hasattr(agent, 'state'):
agent.state = AgentState()
@property
def entities(self):
return [agent for agent in self.agents.values()]
@property
def policy_agents(self):
return [agent for agent in self.entities if agent.action_callback is None]
@property
def heuristic_agents(self):
return [agent for agent in self.entities if agent.action_callback=='heuristic']
@property
def interactive_agents(self):
return [agent for agent in self.entities if agent.interactive]
@property
def script_agents(self):
return [agent for agent in self.entities if callable(agent.action_callback)]
def step(self):
# actions of policy agents are preset in environement.
# set actions for heuristic_agents
# controlled by scripts
# agents have action_callback
for agent in self.script_agents:
agent.action = agent.action_callback(agent, self)
# simulation is already ends
if self.time >= self.time_limit:
self.__done = True
return
if not super().step():
self.__done = True
return
# update agents' state
for agent in self.policy_agents:
self.update_agent_state(agent)
@property
def world_done(self):
return self.__done
def update_agent_state(self, agent: Optional[MySCML2020Agent]):
if agent.awi.current_step == 0:
f_init = [_.initial_balance for _ in self.factories if _.agent_id == agent.id][0]
f_begin = f_init
f_end = f_begin
agent.state.f = np.array([f_init, f_begin, f_end])
else:
if agent.blind:
agent.state.f = np.zeros(3)
else:
f_end = [_.current_balance for _ in self.factories if _.agent_id == agent.id][0]
agent.state.f[2] = f_end
agent.state.o_negotiation_step = agent.awi.current_step
if agent.state.o_negotiation_step == agent.awi.current_step:
pass
else:
f_begin = f_end
agent.state.f[1] = f_begin
if agent.silent:
agent.state.c = np.zeros(self.dim_c)
else:
noise = np.random.randn(*agent.action.c.shape) * agent.c_nois if agent.c_nois else 0.0
agent.state.c = agent.action.c + noise
def save_config(self, file_name: str):
dump_data = {
"agent_types": [_._type_name() for _ in self.configuration['agent_types']],
'agent_params': self.configuration['agent_params'],
"n_steps": self.n_steps
}
try:
with open(file_name+'.yaml', "w") as file:
yaml.safe_dump(dump_data, file)
except FileNotFoundError as e:
logging.info(f"not find file {file_name}")
logging.error(str(e))
os.makedirs('/'.join(file_name.split('/')[0:-1]))
try:
with open(file_name + '.yaml', "w") as file:
yaml.safe_dump(dump_data, file)
except FileNotFoundError as e:
logging.info(f"not find file {file_name}!")
logging.error(str(e))
except Exception as e:
logging.info(f"other errors when open file {file_name}!")
logging.error(str(e))
sys.exit(1)
with open(file_name+'.pkl', 'wb') as file:
pickle.dump(dump_data, file)
| true
| true
|
1c48c84f8ee59e6804807e245e7a717305b54ca8
| 20,452
|
py
|
Python
|
course/analytics.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | 284
|
2015-01-09T12:02:28.000Z
|
2022-03-27T14:30:46.000Z
|
course/analytics.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | 799
|
2015-02-26T08:49:46.000Z
|
2022-03-31T16:09:26.000Z
|
course/analytics.py
|
inducer/courseflow
|
0f9786e3616dbedf08365d81a731f672b97ba9f5
|
[
"Unlicense"
] | 120
|
2015-01-30T18:00:56.000Z
|
2022-03-28T06:24:43.000Z
|
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.utils.translation import gettext as _, pgettext
from django.shortcuts import ( # noqa
render, get_object_or_404, redirect)
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db import connection
from django.urls import reverse
from django.core.exceptions import ObjectDoesNotExist
from django import http
from django.contrib import messages
from course.utils import course_view, render_course_page, PageInstanceCache
from course.models import (
FlowSession,
FlowPageVisit,
flow_permission)
from course.constants import (
participation_permission as pperm,
)
from course.content import get_flow_desc
# {{{ flow list
@login_required
@course_view
def flow_list(pctx):
if not pctx.has_permission(pperm.view_analytics):
raise PermissionDenied(_("may not view analytics"))
cursor = connection.cursor()
cursor.execute("select distinct flow_id from course_flowsession "
"where course_id=%s order by flow_id",
[pctx.course.id])
flow_ids = [row[0] for row in cursor.fetchall()]
return render_course_page(pctx, "course/analytics-flows.html", {
"flow_ids": flow_ids,
})
# }}}
# {{{ histogram tool
class BinInfo:
def __init__(self, title, raw_weight, percentage, url=None):
self.title = title
self.raw_weight = raw_weight
self.percentage = percentage
self.url = url
class Histogram:
def __init__(self, num_bin_count=10, num_bin_starts=None,
num_min_value=None, num_max_value=None,
num_enforce_bounds=False, num_log_bins=False,
num_bin_title_formatter=str):
self.string_weights = {}
self.num_values = []
self.num_bin_starts = num_bin_starts
self.num_min_value = num_min_value
self.num_max_value = num_max_value
self.num_bin_count = num_bin_count
self.num_log_bins = num_log_bins
self.num_bin_title_formatter = num_bin_title_formatter
def add_data_point(self, value, weight=1):
if isinstance(value, str):
self.string_weights[value] = \
self.string_weights.get(value, 0) + weight
elif value is None:
self.add_data_point(
"".join([
"(",
pgettext("No data", "None"),
")"]),
weight)
else:
if (self.num_max_value is not None
and value > self.num_max_value):
self.add_data_point(
"".join([
"(",
pgettext("Value of grade", "value greater than max"),
")"]),
weight)
elif (self.num_min_value is not None
and value < self.num_min_value):
self.add_data_point(
"".join([
"(",
pgettext("Value of grade", "value smaller than min"),
")"]),
weight)
else:
self.num_values.append((value, weight))
def total_weight(self):
return (
sum(weight for val, weight in self.num_values)
+ sum(self.string_weights.values()))
def get_bin_info_list(self):
min_value = self.num_min_value
max_value = self.num_max_value
if self.num_bin_starts is not None:
num_bin_starts = self.num_bin_starts
else:
if min_value is None:
if self.num_values:
min_value, _ = min(self.num_values)
else:
min_value = 1
if max_value is None:
if self.num_values:
max_value, _ = max(self.num_values)
else:
max_value = 1
if self.num_log_bins:
min_value = max(min_value, 1e-15)
max_value = max(max_value, 1.01*min_value)
from math import log, exp
bin_width = (log(max_value) - log(min_value))/self.num_bin_count
num_bin_starts = [
exp(log(min_value)+bin_width*i)
for i in range(self.num_bin_count)]
# Rounding error means exp(log(min_value)) may be greater
# than min_value, so set start of first bin to min_value
num_bin_starts[0] = min_value
else:
bin_width = (max_value - min_value)/self.num_bin_count
num_bin_starts = [
min_value+bin_width*i
for i in range(self.num_bin_count)]
bins = [0 for i in range(len(num_bin_starts))]
temp_string_weights = self.string_weights.copy()
oob = pgettext("Value in histogram", "<out of bounds>")
from bisect import bisect
for value, weight in self.num_values:
if ((max_value is not None
and value > max_value)
or value < num_bin_starts[0]):
temp_string_weights[oob] = \
temp_string_weights.get(oob, 0) + weight
else:
bin_nr = bisect(num_bin_starts, value)-1
bins[bin_nr] += weight
total_weight = self.total_weight()
num_bin_info = [
BinInfo(
title=self.num_bin_title_formatter(start),
raw_weight=weight,
percentage=(
100*weight/total_weight
if total_weight
else None))
for start, weight in zip(num_bin_starts, bins)]
str_bin_info = [
BinInfo(
title=key,
raw_weight=temp_string_weights[key],
percentage=100*temp_string_weights[key]/total_weight)
for key in sorted(temp_string_weights)]
return num_bin_info + str_bin_info
def html(self):
bin_info_list = self.get_bin_info_list()
max_len = max(len(bin.title) for bin in bin_info_list)
if max_len < 20:
from django.template.loader import render_to_string
return render_to_string("course/histogram-wide.html", {
"bin_info_list": bin_info_list,
})
else:
from django.template.loader import render_to_string
return render_to_string("course/histogram.html", {
"bin_info_list": bin_info_list,
})
# }}}
def is_flow_multiple_submit(flow_desc):
if not hasattr(flow_desc, "rules"):
return False
for rule in flow_desc.rules.access:
if flow_permission.change_answer in rule.permissions:
return True
return False
def is_page_multiple_submit(flow_desc, page_desc):
result = is_flow_multiple_submit(flow_desc)
page_rules = getattr(page_desc, "access_rules", None)
if page_rules is None:
return result
add_permissions = getattr(page_rules, "add_permissions", None)
remove_permissions = getattr(page_rules, "remove_permissions", None)
if result:
if remove_permissions is not None:
if flow_permission.change_answer in remove_permissions:
result = False
else:
if add_permissions is not None:
if flow_permission.change_answer in add_permissions:
result = True
return result
# {{{ flow analytics
def make_grade_histogram(pctx, flow_id):
qset = FlowSession.objects.filter(
course=pctx.course,
flow_id=flow_id,
participation__roles__permissions__permission=(
pperm.included_in_grade_statistics))
hist = Histogram(
num_min_value=0,
num_max_value=100)
for session in qset:
if session.in_progress:
hist.add_data_point(
"".join(["<",
pgettext("Status of session", "in progress"),
">"]))
else:
hist.add_data_point(session.points_percentage())
return hist
class PageAnswerStats:
def __init__(self, group_id, page_id, title, average_correctness,
average_emptiness, answer_count, total_count, url=None):
self.group_id = group_id
self.page_id = page_id
self.title = title
self.average_correctness_percent = 99.99*average_correctness
self.average_emptiness_percent = 99.99*average_emptiness
self.average_wrongness_percent = 99.99*(
1-average_correctness-average_emptiness)
self.answer_count = answer_count
self.total_count = total_count
self.url = url
def safe_div(num, denom):
if denom == 0:
return 0
return num/denom
def make_page_answer_stats_list(pctx, flow_id, restrict_to_first_attempt):
flow_desc = get_flow_desc(pctx.repo, pctx.course, flow_id,
pctx.course_commit_sha)
page_cache = PageInstanceCache(pctx.repo, pctx.course, flow_id)
page_info_list = []
for group_desc in flow_desc.groups:
for page_desc in group_desc.pages:
points = 0
graded_count = 0
empty_count = 0
answer_count = 0
total_count = 0
visits = (FlowPageVisit.objects
.filter(
flow_session__course=pctx.course,
flow_session__flow_id=flow_id,
flow_session__participation__roles__permissions__permission=(
pperm.included_in_grade_statistics),
page_data__group_id=group_desc.id,
page_data__page_id=page_desc.id,
is_submitted_answer=True,
))
if connection.features.can_distinct_on_fields:
if restrict_to_first_attempt:
visits = (visits
.distinct("flow_session__participation__id")
.order_by("flow_session__participation__id",
"visit_time"))
elif is_page_multiple_submit(flow_desc, page_desc):
visits = (visits
.distinct("page_data__id")
.order_by("page_data__id", "-visit_time"))
visits = (visits
.select_related("flow_session")
.select_related("page_data"))
answer_expected = False
title = None
for visit in visits:
page = page_cache.get_page(group_desc.id, page_desc.id,
pctx.course_commit_sha)
answer_expected = answer_expected or page.expects_answer()
from course.page import PageContext
grading_page_context = PageContext(
course=pctx.course,
repo=pctx.repo,
commit_sha=pctx.course_commit_sha,
flow_session=visit.flow_session)
title = page.title(grading_page_context, visit.page_data.data)
answer_feedback = visit.get_most_recent_feedback()
if visit.answer is not None:
answer_count += 1
else:
empty_count += 1
total_count += 1
if (answer_feedback is not None
and answer_feedback.correctness is not None):
if visit.answer is None:
assert answer_feedback.correctness == 0
else:
points += answer_feedback.correctness
graded_count += 1
if not answer_expected:
continue
page_info_list.append(
PageAnswerStats(
group_id=group_desc.id,
page_id=page_desc.id,
title=title,
average_correctness=safe_div(points, graded_count),
average_emptiness=safe_div(
empty_count, graded_count),
answer_count=answer_count,
total_count=total_count,
url=reverse(
"relate-page_analytics",
args=(
pctx.course_identifier,
flow_id,
group_desc.id,
page_desc.id,
))))
return page_info_list
def make_time_histogram(pctx, flow_id):
qset = FlowSession.objects.filter(
course=pctx.course,
flow_id=flow_id)
from relate.utils import string_concat
hist = Histogram(
num_log_bins=True,
num_bin_title_formatter=(
lambda minutes: string_concat(
"$>$ %.1f ",
pgettext("Minute (time unit)", "min"))
% minutes))
for session in qset:
if session.in_progress:
hist.add_data_point(
"".join(["<",
pgettext("Status of session", "in progress"),
">"]))
else:
delta = session.completion_time - session.start_time
minutes = delta.total_seconds() / 60
hist.add_data_point(minutes)
return hist
def count_participants(pctx, flow_id):
if not connection.features.can_distinct_on_fields:
return None
qset = (FlowSession.objects
.filter(
course=pctx.course,
flow_id=flow_id)
.order_by("participation__id")
.distinct("participation__id"))
return qset.count()
@login_required
@course_view
def flow_analytics(pctx, flow_id):
if not pctx.has_permission(pperm.view_analytics):
raise PermissionDenied(_("may not view analytics"))
restrict_to_first_attempt = int(
bool(pctx.request.GET.get("restrict_to_first_attempt") == "1"))
try:
stats_list = make_page_answer_stats_list(pctx, flow_id,
restrict_to_first_attempt)
except ObjectDoesNotExist:
messages.add_message(pctx.request, messages.ERROR,
_("Flow '%s' was not found in the repository, but it exists in "
"the database--maybe it was deleted?")
% flow_id)
raise http.Http404()
return render_course_page(pctx, "course/analytics-flow.html", {
"flow_identifier": flow_id,
"grade_histogram": make_grade_histogram(pctx, flow_id),
"page_answer_stats_list": stats_list,
"time_histogram": make_time_histogram(pctx, flow_id),
"participant_count": count_participants(pctx, flow_id),
"restrict_to_first_attempt": restrict_to_first_attempt,
})
# }}}
# {{{ page analytics
class AnswerStats:
def __init__(self, normalized_answer, correctness, count,
percentage):
self.normalized_answer = normalized_answer
self.correctness = correctness
self.count = count
self.percentage = percentage
@login_required
@course_view
def page_analytics(pctx, flow_id, group_id, page_id):
if not pctx.has_permission(pperm.view_analytics):
raise PermissionDenied(_("may not view analytics"))
flow_desc = get_flow_desc(pctx.repo, pctx.course, flow_id,
pctx.course_commit_sha)
restrict_to_first_attempt = int(
bool(pctx.request.GET.get("restrict_to_first_attempt") == "1"))
page_cache = PageInstanceCache(pctx.repo, pctx.course, flow_id)
visits = (FlowPageVisit.objects
.filter(
flow_session__course=pctx.course,
flow_session__flow_id=flow_id,
flow_session__participation__roles__permissions__permission=(
pperm.included_in_grade_statistics),
page_data__group_id=group_id,
page_data__page_id=page_id,
is_submitted_answer=True,
))
if connection.features.can_distinct_on_fields:
is_multiple_submit = is_flow_multiple_submit(flow_desc)
if restrict_to_first_attempt:
visits = (visits
.distinct("flow_session__participation__id")
.order_by("flow_session__participation__id", "visit_time"))
elif is_multiple_submit:
visits = (visits
.distinct("page_data__id")
.order_by("page_data__id", "-visit_time"))
visits = (visits
.select_related("flow_session")
.select_related("page_data"))
normalized_answer_and_correctness_to_count = {}
title = None
body = None
total_count = 0
graded_count = 0
for visit in visits:
page = page_cache.get_page(group_id, page_id, pctx.course_commit_sha)
from course.page import PageContext
grading_page_context = PageContext(
course=pctx.course,
repo=pctx.repo,
commit_sha=pctx.course_commit_sha,
flow_session=visit.flow_session)
title = page.title(grading_page_context, visit.page_data.data)
body = page.analytic_view_body(grading_page_context, visit.page_data.data)
normalized_answer = page.normalized_answer(
grading_page_context, visit.page_data.data, visit.answer)
answer_feedback = visit.get_most_recent_feedback()
if answer_feedback is not None:
key = (normalized_answer, answer_feedback.correctness)
normalized_answer_and_correctness_to_count[key] = \
normalized_answer_and_correctness_to_count.get(key, 0) + 1
graded_count += 1
else:
key = (normalized_answer, None)
normalized_answer_and_correctness_to_count[key] = \
normalized_answer_and_correctness_to_count.get(key, 0) + 1
total_count += 1
answer_stats = []
for (normalized_answer, correctness), count in \
normalized_answer_and_correctness_to_count.items():
answer_stats.append(
AnswerStats(
normalized_answer=normalized_answer,
correctness=correctness,
count=count,
percentage=safe_div(100 * count, total_count)))
answer_stats = sorted(
answer_stats,
key=lambda astats: astats.percentage,
reverse=True)
return render_course_page(pctx, "course/analytics-page.html", {
"flow_identifier": flow_id,
"group_id": group_id,
"page_id": page_id,
"title": title,
"body": body,
"answer_stats_list": answer_stats,
"restrict_to_first_attempt": restrict_to_first_attempt,
})
# }}}
# vim: foldmethod=marker
| 34.547297
| 85
| 0.583953
|
__copyright__ = "Copyright (C) 2014 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from django.utils.translation import gettext as _, pgettext
from django.shortcuts import ( render, get_object_or_404, redirect)
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db import connection
from django.urls import reverse
from django.core.exceptions import ObjectDoesNotExist
from django import http
from django.contrib import messages
from course.utils import course_view, render_course_page, PageInstanceCache
from course.models import (
FlowSession,
FlowPageVisit,
flow_permission)
from course.constants import (
participation_permission as pperm,
)
from course.content import get_flow_desc
@login_required
@course_view
def flow_list(pctx):
if not pctx.has_permission(pperm.view_analytics):
raise PermissionDenied(_("may not view analytics"))
cursor = connection.cursor()
cursor.execute("select distinct flow_id from course_flowsession "
"where course_id=%s order by flow_id",
[pctx.course.id])
flow_ids = [row[0] for row in cursor.fetchall()]
return render_course_page(pctx, "course/analytics-flows.html", {
"flow_ids": flow_ids,
})
class BinInfo:
def __init__(self, title, raw_weight, percentage, url=None):
self.title = title
self.raw_weight = raw_weight
self.percentage = percentage
self.url = url
class Histogram:
def __init__(self, num_bin_count=10, num_bin_starts=None,
num_min_value=None, num_max_value=None,
num_enforce_bounds=False, num_log_bins=False,
num_bin_title_formatter=str):
self.string_weights = {}
self.num_values = []
self.num_bin_starts = num_bin_starts
self.num_min_value = num_min_value
self.num_max_value = num_max_value
self.num_bin_count = num_bin_count
self.num_log_bins = num_log_bins
self.num_bin_title_formatter = num_bin_title_formatter
def add_data_point(self, value, weight=1):
if isinstance(value, str):
self.string_weights[value] = \
self.string_weights.get(value, 0) + weight
elif value is None:
self.add_data_point(
"".join([
"(",
pgettext("No data", "None"),
")"]),
weight)
else:
if (self.num_max_value is not None
and value > self.num_max_value):
self.add_data_point(
"".join([
"(",
pgettext("Value of grade", "value greater than max"),
")"]),
weight)
elif (self.num_min_value is not None
and value < self.num_min_value):
self.add_data_point(
"".join([
"(",
pgettext("Value of grade", "value smaller than min"),
")"]),
weight)
else:
self.num_values.append((value, weight))
def total_weight(self):
return (
sum(weight for val, weight in self.num_values)
+ sum(self.string_weights.values()))
def get_bin_info_list(self):
min_value = self.num_min_value
max_value = self.num_max_value
if self.num_bin_starts is not None:
num_bin_starts = self.num_bin_starts
else:
if min_value is None:
if self.num_values:
min_value, _ = min(self.num_values)
else:
min_value = 1
if max_value is None:
if self.num_values:
max_value, _ = max(self.num_values)
else:
max_value = 1
if self.num_log_bins:
min_value = max(min_value, 1e-15)
max_value = max(max_value, 1.01*min_value)
from math import log, exp
bin_width = (log(max_value) - log(min_value))/self.num_bin_count
num_bin_starts = [
exp(log(min_value)+bin_width*i)
for i in range(self.num_bin_count)]
num_bin_starts[0] = min_value
else:
bin_width = (max_value - min_value)/self.num_bin_count
num_bin_starts = [
min_value+bin_width*i
for i in range(self.num_bin_count)]
bins = [0 for i in range(len(num_bin_starts))]
temp_string_weights = self.string_weights.copy()
oob = pgettext("Value in histogram", "<out of bounds>")
from bisect import bisect
for value, weight in self.num_values:
if ((max_value is not None
and value > max_value)
or value < num_bin_starts[0]):
temp_string_weights[oob] = \
temp_string_weights.get(oob, 0) + weight
else:
bin_nr = bisect(num_bin_starts, value)-1
bins[bin_nr] += weight
total_weight = self.total_weight()
num_bin_info = [
BinInfo(
title=self.num_bin_title_formatter(start),
raw_weight=weight,
percentage=(
100*weight/total_weight
if total_weight
else None))
for start, weight in zip(num_bin_starts, bins)]
str_bin_info = [
BinInfo(
title=key,
raw_weight=temp_string_weights[key],
percentage=100*temp_string_weights[key]/total_weight)
for key in sorted(temp_string_weights)]
return num_bin_info + str_bin_info
def html(self):
bin_info_list = self.get_bin_info_list()
max_len = max(len(bin.title) for bin in bin_info_list)
if max_len < 20:
from django.template.loader import render_to_string
return render_to_string("course/histogram-wide.html", {
"bin_info_list": bin_info_list,
})
else:
from django.template.loader import render_to_string
return render_to_string("course/histogram.html", {
"bin_info_list": bin_info_list,
})
def is_flow_multiple_submit(flow_desc):
if not hasattr(flow_desc, "rules"):
return False
for rule in flow_desc.rules.access:
if flow_permission.change_answer in rule.permissions:
return True
return False
def is_page_multiple_submit(flow_desc, page_desc):
result = is_flow_multiple_submit(flow_desc)
page_rules = getattr(page_desc, "access_rules", None)
if page_rules is None:
return result
add_permissions = getattr(page_rules, "add_permissions", None)
remove_permissions = getattr(page_rules, "remove_permissions", None)
if result:
if remove_permissions is not None:
if flow_permission.change_answer in remove_permissions:
result = False
else:
if add_permissions is not None:
if flow_permission.change_answer in add_permissions:
result = True
return result
def make_grade_histogram(pctx, flow_id):
qset = FlowSession.objects.filter(
course=pctx.course,
flow_id=flow_id,
participation__roles__permissions__permission=(
pperm.included_in_grade_statistics))
hist = Histogram(
num_min_value=0,
num_max_value=100)
for session in qset:
if session.in_progress:
hist.add_data_point(
"".join(["<",
pgettext("Status of session", "in progress"),
">"]))
else:
hist.add_data_point(session.points_percentage())
return hist
class PageAnswerStats:
def __init__(self, group_id, page_id, title, average_correctness,
average_emptiness, answer_count, total_count, url=None):
self.group_id = group_id
self.page_id = page_id
self.title = title
self.average_correctness_percent = 99.99*average_correctness
self.average_emptiness_percent = 99.99*average_emptiness
self.average_wrongness_percent = 99.99*(
1-average_correctness-average_emptiness)
self.answer_count = answer_count
self.total_count = total_count
self.url = url
def safe_div(num, denom):
if denom == 0:
return 0
return num/denom
def make_page_answer_stats_list(pctx, flow_id, restrict_to_first_attempt):
flow_desc = get_flow_desc(pctx.repo, pctx.course, flow_id,
pctx.course_commit_sha)
page_cache = PageInstanceCache(pctx.repo, pctx.course, flow_id)
page_info_list = []
for group_desc in flow_desc.groups:
for page_desc in group_desc.pages:
points = 0
graded_count = 0
empty_count = 0
answer_count = 0
total_count = 0
visits = (FlowPageVisit.objects
.filter(
flow_session__course=pctx.course,
flow_session__flow_id=flow_id,
flow_session__participation__roles__permissions__permission=(
pperm.included_in_grade_statistics),
page_data__group_id=group_desc.id,
page_data__page_id=page_desc.id,
is_submitted_answer=True,
))
if connection.features.can_distinct_on_fields:
if restrict_to_first_attempt:
visits = (visits
.distinct("flow_session__participation__id")
.order_by("flow_session__participation__id",
"visit_time"))
elif is_page_multiple_submit(flow_desc, page_desc):
visits = (visits
.distinct("page_data__id")
.order_by("page_data__id", "-visit_time"))
visits = (visits
.select_related("flow_session")
.select_related("page_data"))
answer_expected = False
title = None
for visit in visits:
page = page_cache.get_page(group_desc.id, page_desc.id,
pctx.course_commit_sha)
answer_expected = answer_expected or page.expects_answer()
from course.page import PageContext
grading_page_context = PageContext(
course=pctx.course,
repo=pctx.repo,
commit_sha=pctx.course_commit_sha,
flow_session=visit.flow_session)
title = page.title(grading_page_context, visit.page_data.data)
answer_feedback = visit.get_most_recent_feedback()
if visit.answer is not None:
answer_count += 1
else:
empty_count += 1
total_count += 1
if (answer_feedback is not None
and answer_feedback.correctness is not None):
if visit.answer is None:
assert answer_feedback.correctness == 0
else:
points += answer_feedback.correctness
graded_count += 1
if not answer_expected:
continue
page_info_list.append(
PageAnswerStats(
group_id=group_desc.id,
page_id=page_desc.id,
title=title,
average_correctness=safe_div(points, graded_count),
average_emptiness=safe_div(
empty_count, graded_count),
answer_count=answer_count,
total_count=total_count,
url=reverse(
"relate-page_analytics",
args=(
pctx.course_identifier,
flow_id,
group_desc.id,
page_desc.id,
))))
return page_info_list
def make_time_histogram(pctx, flow_id):
qset = FlowSession.objects.filter(
course=pctx.course,
flow_id=flow_id)
from relate.utils import string_concat
hist = Histogram(
num_log_bins=True,
num_bin_title_formatter=(
lambda minutes: string_concat(
"$>$ %.1f ",
pgettext("Minute (time unit)", "min"))
% minutes))
for session in qset:
if session.in_progress:
hist.add_data_point(
"".join(["<",
pgettext("Status of session", "in progress"),
">"]))
else:
delta = session.completion_time - session.start_time
minutes = delta.total_seconds() / 60
hist.add_data_point(minutes)
return hist
def count_participants(pctx, flow_id):
if not connection.features.can_distinct_on_fields:
return None
qset = (FlowSession.objects
.filter(
course=pctx.course,
flow_id=flow_id)
.order_by("participation__id")
.distinct("participation__id"))
return qset.count()
@login_required
@course_view
def flow_analytics(pctx, flow_id):
if not pctx.has_permission(pperm.view_analytics):
raise PermissionDenied(_("may not view analytics"))
restrict_to_first_attempt = int(
bool(pctx.request.GET.get("restrict_to_first_attempt") == "1"))
try:
stats_list = make_page_answer_stats_list(pctx, flow_id,
restrict_to_first_attempt)
except ObjectDoesNotExist:
messages.add_message(pctx.request, messages.ERROR,
_("Flow '%s' was not found in the repository, but it exists in "
"the database--maybe it was deleted?")
% flow_id)
raise http.Http404()
return render_course_page(pctx, "course/analytics-flow.html", {
"flow_identifier": flow_id,
"grade_histogram": make_grade_histogram(pctx, flow_id),
"page_answer_stats_list": stats_list,
"time_histogram": make_time_histogram(pctx, flow_id),
"participant_count": count_participants(pctx, flow_id),
"restrict_to_first_attempt": restrict_to_first_attempt,
})
class AnswerStats:
def __init__(self, normalized_answer, correctness, count,
percentage):
self.normalized_answer = normalized_answer
self.correctness = correctness
self.count = count
self.percentage = percentage
@login_required
@course_view
def page_analytics(pctx, flow_id, group_id, page_id):
if not pctx.has_permission(pperm.view_analytics):
raise PermissionDenied(_("may not view analytics"))
flow_desc = get_flow_desc(pctx.repo, pctx.course, flow_id,
pctx.course_commit_sha)
restrict_to_first_attempt = int(
bool(pctx.request.GET.get("restrict_to_first_attempt") == "1"))
page_cache = PageInstanceCache(pctx.repo, pctx.course, flow_id)
visits = (FlowPageVisit.objects
.filter(
flow_session__course=pctx.course,
flow_session__flow_id=flow_id,
flow_session__participation__roles__permissions__permission=(
pperm.included_in_grade_statistics),
page_data__group_id=group_id,
page_data__page_id=page_id,
is_submitted_answer=True,
))
if connection.features.can_distinct_on_fields:
is_multiple_submit = is_flow_multiple_submit(flow_desc)
if restrict_to_first_attempt:
visits = (visits
.distinct("flow_session__participation__id")
.order_by("flow_session__participation__id", "visit_time"))
elif is_multiple_submit:
visits = (visits
.distinct("page_data__id")
.order_by("page_data__id", "-visit_time"))
visits = (visits
.select_related("flow_session")
.select_related("page_data"))
normalized_answer_and_correctness_to_count = {}
title = None
body = None
total_count = 0
graded_count = 0
for visit in visits:
page = page_cache.get_page(group_id, page_id, pctx.course_commit_sha)
from course.page import PageContext
grading_page_context = PageContext(
course=pctx.course,
repo=pctx.repo,
commit_sha=pctx.course_commit_sha,
flow_session=visit.flow_session)
title = page.title(grading_page_context, visit.page_data.data)
body = page.analytic_view_body(grading_page_context, visit.page_data.data)
normalized_answer = page.normalized_answer(
grading_page_context, visit.page_data.data, visit.answer)
answer_feedback = visit.get_most_recent_feedback()
if answer_feedback is not None:
key = (normalized_answer, answer_feedback.correctness)
normalized_answer_and_correctness_to_count[key] = \
normalized_answer_and_correctness_to_count.get(key, 0) + 1
graded_count += 1
else:
key = (normalized_answer, None)
normalized_answer_and_correctness_to_count[key] = \
normalized_answer_and_correctness_to_count.get(key, 0) + 1
total_count += 1
answer_stats = []
for (normalized_answer, correctness), count in \
normalized_answer_and_correctness_to_count.items():
answer_stats.append(
AnswerStats(
normalized_answer=normalized_answer,
correctness=correctness,
count=count,
percentage=safe_div(100 * count, total_count)))
answer_stats = sorted(
answer_stats,
key=lambda astats: astats.percentage,
reverse=True)
return render_course_page(pctx, "course/analytics-page.html", {
"flow_identifier": flow_id,
"group_id": group_id,
"page_id": page_id,
"title": title,
"body": body,
"answer_stats_list": answer_stats,
"restrict_to_first_attempt": restrict_to_first_attempt,
})
| true
| true
|
1c48ca72aee02d8e1582caaa2ffbd93fd9a5f68a
| 35,864
|
py
|
Python
|
angr/analyses/reaching_definitions/engine_ail.py
|
mikenawrocki/angr
|
57f5593e902f5ad58709bc8f4ce7859134300ffb
|
[
"BSD-2-Clause"
] | 1
|
2021-05-21T02:41:28.000Z
|
2021-05-21T02:41:28.000Z
|
angr/analyses/reaching_definitions/engine_ail.py
|
mikenawrocki/angr
|
57f5593e902f5ad58709bc8f4ce7859134300ffb
|
[
"BSD-2-Clause"
] | null | null | null |
angr/analyses/reaching_definitions/engine_ail.py
|
mikenawrocki/angr
|
57f5593e902f5ad58709bc8f4ce7859134300ffb
|
[
"BSD-2-Clause"
] | null | null | null |
from itertools import chain
from typing import Iterable, Optional
import logging
import archinfo
import claripy
import ailment
from ...engines.light import SimEngineLight, SimEngineLightAILMixin, SpOffset
from ...errors import SimEngineError, SimMemoryMissingError
from ...calling_conventions import DEFAULT_CC, SimRegArg, SimStackArg
from ...storage.memory_mixins.paged_memory.pages.multi_values import MultiValues
from ...knowledge_plugins.key_definitions.atoms import Register, Tmp, MemoryLocation
from ...knowledge_plugins.key_definitions.constants import OP_BEFORE, OP_AFTER
from ...knowledge_plugins.key_definitions.live_definitions import Definition
from .external_codeloc import ExternalCodeLocation
from .rd_state import ReachingDefinitionsState
l = logging.getLogger(name=__name__)
class SimEngineRDAIL(
SimEngineLightAILMixin,
SimEngineLight,
): # pylint:disable=abstract-method
arch: archinfo.Arch
state: ReachingDefinitionsState
def __init__(self, project, call_stack, maximum_local_call_depth, function_handler=None):
super().__init__()
self.project = project
self._call_stack = call_stack
self._maximum_local_call_depth = maximum_local_call_depth
self._function_handler = function_handler
self._visited_blocks = None
self._dep_graph = None
def process(self, state, *args, **kwargs):
self._dep_graph = kwargs.pop('dep_graph', None)
self._visited_blocks = kwargs.pop('visited_blocks', None)
# we are using a completely different state. Therefore, we directly call our _process() method before
# SimEngine becomes flexible enough.
try:
self._process(
state,
None,
block=kwargs.pop('block', None),
)
except SimEngineError as e:
if kwargs.pop('fail_fast', False) is True:
raise e
return self.state, self._visited_blocks, self._dep_graph
def sp_offset(self, offset: int):
return self.state.stack_address(offset)
#
# Private methods
#
@staticmethod
def _external_codeloc():
return ExternalCodeLocation()
#
# AIL statement handlers
#
def _handle_Stmt(self, stmt):
if self.state.analysis:
self.state.analysis.insn_observe(self.ins_addr, stmt, self.block, self.state, OP_BEFORE)
super()._handle_Stmt(stmt)
if self.state.analysis:
self.state.analysis.insn_observe(self.ins_addr, stmt, self.block, self.state, OP_AFTER)
def _ail_handle_Assignment(self, stmt):
"""
:param ailment.Assignment stmt:
:return:
"""
src = self._expr(stmt.src)
dst = stmt.dst
if src is None:
src = self.state.top(dst.bits)
if isinstance(dst, ailment.Tmp):
self.state.kill_and_add_definition(Tmp(dst.tmp_idx, dst.size), self._codeloc(), src)
self.tmps[dst.tmp_idx] = src
elif isinstance(dst, ailment.Register):
reg = Register(dst.reg_offset, dst.size)
self.state.kill_and_add_definition(reg, self._codeloc(), src)
if dst.reg_offset == self.arch.sp_offset:
# TODO: Special logic that frees all definitions above the current stack pointer
pass
else:
l.warning('Unsupported type of Assignment dst %s.', type(dst).__name__)
def _ail_handle_Store(self, stmt: ailment.Stmt.Store) -> None:
data: MultiValues = self._expr(stmt.data)
addr: MultiValues = self._expr(stmt.addr)
size: int = stmt.size
if stmt.guard is not None:
guard = self._expr(stmt.guard) # pylint:disable=unused-variable
else:
guard = None # pylint:disable=unused-variable
addr_v = addr.one_value()
if addr_v is not None and not self.state.is_top(addr_v):
if self.state.is_stack_address(addr_v):
stack_offset = self.state.get_stack_offset(addr_v)
if stack_offset is not None:
memory_location = MemoryLocation(SpOffset(self.arch.bits, stack_offset), size, endness=stmt.endness)
else:
memory_location = None
elif self.state.is_heap_address(addr_v):
memory_location = None
else:
memory_location = MemoryLocation(addr_v._model_concrete.value, size, endness=stmt.endness)
if memory_location is not None:
self.state.kill_and_add_definition(memory_location,
self._codeloc(),
data,
endness=stmt.endness)
def _ail_handle_Jump(self, stmt):
_ = self._expr(stmt.target)
def _ail_handle_ConditionalJump(self, stmt):
cond = self._expr(stmt.condition) # pylint:disable=unused-variable
true_target = self._expr(stmt.true_target) # pylint:disable=unused-variable
false_target = self._expr(stmt.false_target) # pylint:disable=unused-variable
ip = Register(self.arch.ip_offset, self.arch.bytes)
codeloc = self._codeloc()
# Use the same annotated data for kill_definitions() to avoid creating ASTs multiple times
# Note that the cached dummy definition is always the IP register. This is intentional.
top_v = self.state.top(self.arch.bits)
dummy_def = Definition(Register(self.arch.ip_offset, self.arch.bytes), codeloc, dummy=True)
top_v = self.state.annotate_with_def(top_v, dummy_def)
top_mv = MultiValues(offset_to_values={0: {top_v}})
self.state.kill_definitions(ip, codeloc, data=top_mv, annotated=True)
# kill all cc_ops
if 'cc_op' in self.arch.registers:
self.state.kill_definitions(Register(*self.arch.registers['cc_op']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_dep1']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_dep2']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_ndep']), codeloc, data=top_mv, annotated=True)
def _ail_handle_Call(self, stmt: ailment.Stmt.Call):
self._handle_Call_base(stmt, is_expr=False)
def _handle_Call_base(self, stmt: ailment.Stmt.Call, is_expr: bool=False):
target = self._expr(stmt.target) # pylint:disable=unused-variable
codeloc = self._codeloc()
# Use the same annotated data for kill_definitions() to avoid creating ASTs multiple times
# Note that the cached dummy definition is always the IP register. This is intentional.
top_v = self.state.top(self.arch.bits)
dummy_def = Definition(Register(self.arch.ip_offset, self.arch.bytes), codeloc, dummy=True)
top_v = self.state.annotate_with_def(top_v, dummy_def)
top_mv = MultiValues(offset_to_values={0: {top_v}})
ip = Register(self.arch.ip_offset, self.arch.bytes)
self.state.kill_definitions(ip, codeloc, data=top_mv, annotated=True)
# When stmt.args are available, used registers/stack variables are decided by stmt.args. Otherwise we fall-back
# to using all argument registers.
if stmt.args is not None:
# getting used expressions from stmt.args
used_exprs = stmt.args
elif stmt.calling_convention is not None and (
stmt.calling_convention.func_ty is not None or stmt.calling_convention.args is not None):
# getting used expressions from the function prototype, its arguments, and the calling convention
used_exprs = [ ]
for arg_loc in stmt.calling_convention.arg_locs():
if isinstance(arg_loc, SimRegArg):
used_exprs.append(Register(self.arch.registers[arg_loc.reg_name], arg_loc.size))
elif isinstance(arg_loc, SimStackArg):
used_exprs.append(SpOffset(arg_loc.size * 8, arg_loc.stack_offset, is_base=False))
else:
l.warning("_handle_Call(): Unsupported arg_loc %r.", arg_loc)
else:
used_exprs = None
# All caller-saved registers will always be killed.
if stmt.calling_convention is not None:
cc = stmt.calling_convention
else:
# Fall back to the default calling convention
l.debug("Unknown calling convention for function %s. Fall back to default calling convention.", target)
cc = self.project.factory.cc()
killed_vars = [ Register(*self.arch.registers[reg_name]) for reg_name in cc.CALLER_SAVED_REGS ]
# Add uses
if used_exprs is None:
used_exprs = [ Register(*self.arch.registers[reg_name]) for reg_name in cc.ARG_REGS ]
for expr in used_exprs:
self._expr(expr)
# Add definition
return_reg_offset = None
if not is_expr:
if stmt.ret_expr is not None:
if isinstance(stmt.ret_expr, ailment.Expr.Register):
return_reg_offset = stmt.ret_expr.reg_offset
return_reg_size = stmt.ret_expr.size
reg_atom = Register(return_reg_offset, return_reg_size)
top = self.state.top(return_reg_size * self.arch.byte_width)
self.state.kill_and_add_definition(reg_atom, codeloc, MultiValues(offset_to_values={0: {top}}))
else:
l.warning("Unsupported ret_expr type %s. Please report to GitHub.", stmt.ret_expr.__class__)
else:
# Return value is redefined here, so it is not a dummy value
return_reg_offset, return_reg_size = self.arch.registers[cc.RETURN_VAL.reg_name]
self.state.kill_definitions(Register(return_reg_offset, return_reg_size), codeloc, dummy=False)
# Kill those ones that should be killed
for var in killed_vars:
if var.reg_offset == return_reg_offset:
# Skip the return variable
continue
self.state.kill_definitions(var, codeloc, data=top_mv, annotated=True)
# kill all cc_ops
if 'cc_op' in self.arch.registers:
self.state.kill_definitions(Register(*self.arch.registers['cc_op']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_dep1']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_dep2']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_ndep']), codeloc, data=top_mv, annotated=True)
def _ail_handle_Return(self, stmt: ailment.Stmt.Return): # pylint:disable=unused-argument
if stmt.ret_exprs:
# Handle return expressions
for ret_expr in stmt.ret_exprs:
self._expr(ret_expr)
return
# No return expressions are available.
# consume registers that are potentially useful
# TODO: Consider the calling convention of the current function
cc_cls = DEFAULT_CC.get(self.project.arch.name, None)
if cc_cls is None:
l.warning("Unknown default calling convention for architecture %s.", self.project.arch.name)
return
cc = cc_cls(self.project.arch)
codeloc = self._codeloc()
size = self.project.arch.bits // 8
# return value
if cc.RETURN_VAL is not None:
if isinstance(cc.RETURN_VAL, SimRegArg):
offset = cc.RETURN_VAL._fix_offset(None, size, arch=self.project.arch)
self.state.add_use(Register(offset, size), codeloc)
# base pointer
# TODO: Check if the stack base pointer is used as a stack base pointer in this function or not
self.state.add_use(Register(self.project.arch.bp_offset, self.project.arch.bits // 8), codeloc)
# We don't add sp since stack pointers are supposed to be get rid of in AIL. this is definitely a hack though
# self.state.add_use(Register(self.project.arch.sp_offset, self.project.arch.bits // 8), codeloc)
def _ail_handle_DirtyStatement(self, stmt: ailment.Stmt.DirtyStatement):
# TODO: The logic below is subject to change when ailment.Stmt.DirtyStatement is changed
tmp = stmt.dirty_stmt.dst
cvt_sizes = {
'ILGop_IdentV128': 16,
'ILGop_Ident64': 8,
'ILGop_Ident32': 4,
'ILGop_16Uto32': 4,
'ILGop_16Sto32': 4,
'ILGop_8Uto32': 4,
'ILGop_8Sto32': 4,
}
size = cvt_sizes[stmt.dirty_stmt.cvt]
self.state.kill_and_add_definition(Tmp(tmp, size), self._codeloc(), None)
self.tmps[tmp] = None
#
# AIL expression handlers
#
def _ail_handle_BV(self, expr: claripy.ast.Base) -> MultiValues:
return MultiValues(offset_to_values={0: {expr}})
def _ail_handle_Tmp(self, expr: ailment.Expr.Tmp) -> MultiValues:
self.state.add_use(Tmp(expr.tmp_idx, expr.size), self._codeloc())
return super()._ail_handle_Tmp(expr)
def _ail_handle_CallExpr(self, expr: ailment.Stmt.Call) -> MultiValues:
self._handle_Call_base(expr, is_expr=True)
return MultiValues(offset_to_values={0: {self.state.top(expr.bits)}})
def _ail_handle_Register(self, expr) -> MultiValues:
self.state: ReachingDefinitionsState
reg_offset = expr.reg_offset
size = expr.size
# bits = size * 8
reg_atom = Register(reg_offset, size)
# first check if it is ever defined
try:
value: MultiValues = self.state.register_definitions.load(reg_offset, size=size)
except SimMemoryMissingError:
# the value does not exist
top = self.state.top(size * self.state.arch.byte_width)
# annotate it
top = self.state.annotate_with_def(top, Definition(reg_atom, ExternalCodeLocation()))
value = MultiValues(offset_to_values={0: {top}})
# write it back
self.state.kill_and_add_definition(reg_atom, self._external_codeloc(), value)
# extract Definitions
defs: Optional[Iterable[Definition]] = None
for vs in value.values.values():
for v in vs:
if defs is None:
defs = self.state.extract_defs(v)
else:
defs = chain(defs, self.state.extract_defs(v))
if defs is None:
# define it right away as an external dependency
self.state.kill_and_add_definition(reg_atom, self._external_codeloc(), value)
else:
codeloc = self._codeloc()
for def_ in defs:
self.state.add_use_by_def(def_, codeloc)
return value
def _ail_handle_Load(self, expr: ailment.Expr.Load) -> MultiValues:
addrs: MultiValues = self._expr(expr.addr)
size = expr.size
bits = expr.bits
if expr.guard is not None:
guard = self._expr(expr.guard) # pylint:disable=unused-variable
alt = self._expr(expr.alt) # pylint:disable=unused-variable
else:
guard = None # pylint:disable=unused-variable
alt = None # pylint:disable=unused-variable
# convert addrs from MultiValues to a list of valid addresses
if len(addrs.values) == 1:
addrs_v = next(iter(addrs.values.values()))
else:
top = self.state.top(bits)
# annotate it
dummy_atom = MemoryLocation(0, size, endness=expr.endness)
top = self.state.annotate_with_def(top, Definition(dummy_atom, ExternalCodeLocation()))
# add use
self.state.add_use(dummy_atom, self._codeloc())
return MultiValues(offset_to_values={0: {top}})
result: Optional[MultiValues] = None
for addr in addrs_v:
if not isinstance(addr, claripy.ast.Base):
continue
if addr.concrete:
# a concrete address
addr = addr._model_concrete.value
try:
vs: MultiValues = self.state.memory_definitions.load(addr, size=size, endness=expr.endness)
except SimMemoryMissingError:
continue
memory_location = MemoryLocation(addr, size, endness=expr.endness)
self.state.add_use(memory_location, self._codeloc())
result = result.merge(vs) if result is not None else vs
elif self.state.is_stack_address(addr):
stack_offset = self.state.get_stack_offset(addr)
if stack_offset is not None:
stack_addr = self.state.live_definitions.stack_offset_to_stack_addr(stack_offset)
try:
vs: MultiValues = self.state.stack_definitions.load(stack_addr, size=size, endness=expr.endness)
except SimMemoryMissingError:
continue
memory_location = MemoryLocation(SpOffset(self.arch.bits, stack_offset), size, endness=expr.endness)
self.state.add_use(memory_location, self._codeloc())
result = result.merge(vs) if result is not None else vs
else:
l.debug('Memory address %r undefined or unsupported at pc %#x.', addr, self.ins_addr)
if result is None:
top = self.state.top(bits)
# TODO: Annotate top with a definition
result = MultiValues(offset_to_values={0: {top}})
return result
def _ail_handle_Convert(self, expr: ailment.Expr.Convert) -> MultiValues:
to_conv: MultiValues = self._expr(expr.operand)
bits = expr.to_bits
size = bits // self.arch.byte_width
if len(to_conv.values) == 1 and 0 in to_conv.values:
values = to_conv.values[0]
else:
top = self.state.top(expr.to_bits)
# annotate it
dummy_atom = MemoryLocation(0, size, endness=self.arch.memory_endness)
top = self.state.annotate_with_def(top, Definition(dummy_atom, ExternalCodeLocation()))
# add use
self.state.add_use(dummy_atom, self._codeloc())
return MultiValues(offset_to_values={0: {top}})
converted = set()
for v in values:
if expr.to_bits < expr.from_bits:
conv = v[expr.to_bits - 1:0]
elif expr.to_bits > expr.from_bits:
conv = claripy.ZeroExt(expr.to_bits - expr.from_bits, v)
else:
conv = v
converted.add(conv)
return MultiValues(offset_to_values={0: converted})
def _ail_handle_ITE(self, expr: ailment.Expr.ITE) -> MultiValues:
_: MultiValues = self._expr(expr.cond)
iftrue: MultiValues = self._expr(expr.iftrue)
_: MultiValues = self._expr(expr.iffalse)
top = self.state.top(len(iftrue))
return MultiValues(offset_to_values={0: {top}})
def _ail_handle_Not(self, expr: ailment.Expr.UnaryOp) -> MultiValues:
operand: MultiValues = self._expr(expr.operand)
bits = expr.bits
r = None
operand_v = operand.one_value()
if operand_v is None or self.state.is_top(operand_v):
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
else:
r = MultiValues(offset_to_values={0: {~operand_v}})
return r
def _ail_handle_BinaryOp(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
r = super()._ail_handle_BinaryOp(expr)
if isinstance(r, ailment.Expr.BinaryOp):
l.warning("Unimplemented operation %s.", expr.op)
top = self.state.top(expr.bits)
return MultiValues(offset_to_values={0: {top}})
return r
def _ail_handle_Add(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# adding a single value to a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v + expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# adding a single value to a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {v + expr0_v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# adding two single values together
r = MultiValues(offset_to_values={0: {expr0_v + expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Sub(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# subtracting a single value from a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v - expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# subtracting a single value from a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {expr0_v - v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
r = MultiValues(offset_to_values={0: {expr0_v - expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Shr(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# each value in expr0 >> expr1_v
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {(claripy.LShR(v, expr1_v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v >> each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {(claripy.LShR(expr0_v, v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr1_v.concrete:
r = MultiValues(offset_to_values={0: {claripy.LShR(expr0_v, expr1_v._model_concrete.value)}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Sar(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# each value in expr0 >> expr1_v
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {(claripy.LShR(v, expr1_v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v >> each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {(claripy.LShR(expr0_v, v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr1_v.concrete:
r = MultiValues(offset_to_values={0: {expr0_v >> expr1_v._model_concrete.value}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Shl(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# each value in expr0 << expr1_v
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {((v << expr1_v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v >> each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {((expr0_v << v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr1_v.concrete:
r = MultiValues(offset_to_values={0: {expr0_v << expr1_v._model_concrete.value}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_And(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
if expr0_v is None and expr1_v is not None:
# expr1_v & each value in expr0
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v & expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v & each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {expr0_v & v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# spcial handling for stack alignment
if self.state.is_stack_address(expr0_v):
r = MultiValues(offset_to_values={0: {expr0_v}})
else:
r = MultiValues(offset_to_values={0: {expr0_v & expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Or(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# expr1_v | each value in expr0
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v | expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v | each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {expr0_v | v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
r = MultiValues(offset_to_values={0: {expr0_v | expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Xor(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# expr1_v ^ each value in expr0
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v ^ expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v ^ each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {expr0_v ^ v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
r = MultiValues(offset_to_values={0: {expr0_v ^ expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Concat(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# concatenate expr1_v with each value in expr0
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {claripy.Concat(v, expr1_v) for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# concatenate expr0_v with each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {claripy.Concat(expr0_v, v) for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
r = MultiValues(offset_to_values={0: {claripy.Concat(expr0_v, expr1_v)}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Cmp(self, expr) -> MultiValues:
op0 = self._expr(expr.operands[0])
op1 = self._expr(expr.operands[1])
if op0 is None: op0 = expr.operands[0]
if op1 is None: op1 = expr.operands[1]
top = self.state.top(expr.bits)
return MultiValues(offset_to_values={0: {top}})
_ail_handle_CmpEQ = _ail_handle_Cmp
_ail_handle_CmpNE = _ail_handle_Cmp
_ail_handle_CmpLE = _ail_handle_Cmp
_ail_handle_CmpLEs = _ail_handle_Cmp
_ail_handle_CmpLT = _ail_handle_Cmp
_ail_handle_CmpLTs = _ail_handle_Cmp
_ail_handle_CmpGE = _ail_handle_Cmp
_ail_handle_CmpGEs = _ail_handle_Cmp
_ail_handle_CmpGT = _ail_handle_Cmp
_ail_handle_CmpGTs = _ail_handle_Cmp
def _ail_handle_Const(self, expr) -> MultiValues:
return MultiValues(offset_to_values={0: {claripy.BVV(expr.value, expr.bits)}})
def _ail_handle_StackBaseOffset(self, expr: ailment.Expr.StackBaseOffset) -> MultiValues:
stack_addr = self.state.stack_address(expr.offset)
return MultiValues(offset_to_values={0: {stack_addr}})
def _ail_handle_DirtyExpression(self, expr: ailment.Expr.DirtyExpression) -> MultiValues: # pylint:disable=no-self-use
# FIXME: DirtyExpression needs .bits
top = self.state.top(expr.bits)
return MultiValues(offset_to_values={0: {top}})
#
# User defined high-level statement handlers
#
def _handle_function(self):
if len(self._call_stack) + 1 > self._maximum_local_call_depth:
l.warning('The analysis reached its maximum recursion depth.')
return None
defs_ip = self.state.register_definitions.get_objects_by_offset(self.arch.ip_offset)
if len(defs_ip) != 1:
l.error('Invalid definition(s) for IP.')
return None
ip_data = next(iter(defs_ip)).data
if len(ip_data) != 1:
l.error('Invalid number of values for IP.')
return None
ip_addr = ip_data.get_first_element()
if not isinstance(ip_addr, int):
l.error('Invalid type %s for IP.', type(ip_addr).__name__)
return None
is_internal = False
ext_func_name = None
if self.project.loader.main_object.contains_addr(ip_addr) is True:
ext_func_name = self.project.loader.find_plt_stub_name(ip_addr)
if ext_func_name is None:
is_internal = True
else:
symbol = self.project.loader.find_symbol(ip_addr)
if symbol is not None:
ext_func_name = symbol.name
if ext_func_name is not None:
handler_name = 'handle_%s' % ext_func_name
if hasattr(self._function_handler, handler_name):
getattr(self._function_handler, handler_name)(self.state, self._codeloc())
else:
l.warning('Please implement the external function handler for %s() with your own logic.',
ext_func_name)
elif is_internal is True:
handler_name = 'handle_local_function'
if hasattr(self._function_handler, handler_name):
is_updated, state, visited_blocks, dep_graph = getattr(self._function_handler, handler_name)(
self.state,
ip_addr,
self._call_stack,
self._maximum_local_call_depth,
self._visited_blocks,
self._dep_graph,
)
if is_updated is True:
self.state = state
self._visited_blocks = visited_blocks
self._dep_graph = dep_graph
else:
l.warning('Please implement the local function handler with your own logic.')
else:
l.warning('Could not find function name for external function at address %#x.', ip_addr)
return None
| 42.644471
| 136
| 0.615687
|
from itertools import chain
from typing import Iterable, Optional
import logging
import archinfo
import claripy
import ailment
from ...engines.light import SimEngineLight, SimEngineLightAILMixin, SpOffset
from ...errors import SimEngineError, SimMemoryMissingError
from ...calling_conventions import DEFAULT_CC, SimRegArg, SimStackArg
from ...storage.memory_mixins.paged_memory.pages.multi_values import MultiValues
from ...knowledge_plugins.key_definitions.atoms import Register, Tmp, MemoryLocation
from ...knowledge_plugins.key_definitions.constants import OP_BEFORE, OP_AFTER
from ...knowledge_plugins.key_definitions.live_definitions import Definition
from .external_codeloc import ExternalCodeLocation
from .rd_state import ReachingDefinitionsState
l = logging.getLogger(name=__name__)
class SimEngineRDAIL(
SimEngineLightAILMixin,
SimEngineLight,
):
arch: archinfo.Arch
state: ReachingDefinitionsState
def __init__(self, project, call_stack, maximum_local_call_depth, function_handler=None):
super().__init__()
self.project = project
self._call_stack = call_stack
self._maximum_local_call_depth = maximum_local_call_depth
self._function_handler = function_handler
self._visited_blocks = None
self._dep_graph = None
def process(self, state, *args, **kwargs):
self._dep_graph = kwargs.pop('dep_graph', None)
self._visited_blocks = kwargs.pop('visited_blocks', None)
try:
self._process(
state,
None,
block=kwargs.pop('block', None),
)
except SimEngineError as e:
if kwargs.pop('fail_fast', False) is True:
raise e
return self.state, self._visited_blocks, self._dep_graph
def sp_offset(self, offset: int):
return self.state.stack_address(offset)
@staticmethod
def _external_codeloc():
return ExternalCodeLocation()
def _handle_Stmt(self, stmt):
if self.state.analysis:
self.state.analysis.insn_observe(self.ins_addr, stmt, self.block, self.state, OP_BEFORE)
super()._handle_Stmt(stmt)
if self.state.analysis:
self.state.analysis.insn_observe(self.ins_addr, stmt, self.block, self.state, OP_AFTER)
def _ail_handle_Assignment(self, stmt):
src = self._expr(stmt.src)
dst = stmt.dst
if src is None:
src = self.state.top(dst.bits)
if isinstance(dst, ailment.Tmp):
self.state.kill_and_add_definition(Tmp(dst.tmp_idx, dst.size), self._codeloc(), src)
self.tmps[dst.tmp_idx] = src
elif isinstance(dst, ailment.Register):
reg = Register(dst.reg_offset, dst.size)
self.state.kill_and_add_definition(reg, self._codeloc(), src)
if dst.reg_offset == self.arch.sp_offset:
pass
else:
l.warning('Unsupported type of Assignment dst %s.', type(dst).__name__)
def _ail_handle_Store(self, stmt: ailment.Stmt.Store) -> None:
data: MultiValues = self._expr(stmt.data)
addr: MultiValues = self._expr(stmt.addr)
size: int = stmt.size
if stmt.guard is not None:
guard = self._expr(stmt.guard) else:
guard = None
addr_v = addr.one_value()
if addr_v is not None and not self.state.is_top(addr_v):
if self.state.is_stack_address(addr_v):
stack_offset = self.state.get_stack_offset(addr_v)
if stack_offset is not None:
memory_location = MemoryLocation(SpOffset(self.arch.bits, stack_offset), size, endness=stmt.endness)
else:
memory_location = None
elif self.state.is_heap_address(addr_v):
memory_location = None
else:
memory_location = MemoryLocation(addr_v._model_concrete.value, size, endness=stmt.endness)
if memory_location is not None:
self.state.kill_and_add_definition(memory_location,
self._codeloc(),
data,
endness=stmt.endness)
def _ail_handle_Jump(self, stmt):
_ = self._expr(stmt.target)
def _ail_handle_ConditionalJump(self, stmt):
cond = self._expr(stmt.condition) true_target = self._expr(stmt.true_target) false_target = self._expr(stmt.false_target)
ip = Register(self.arch.ip_offset, self.arch.bytes)
codeloc = self._codeloc()
top_v = self.state.top(self.arch.bits)
dummy_def = Definition(Register(self.arch.ip_offset, self.arch.bytes), codeloc, dummy=True)
top_v = self.state.annotate_with_def(top_v, dummy_def)
top_mv = MultiValues(offset_to_values={0: {top_v}})
self.state.kill_definitions(ip, codeloc, data=top_mv, annotated=True)
if 'cc_op' in self.arch.registers:
self.state.kill_definitions(Register(*self.arch.registers['cc_op']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_dep1']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_dep2']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_ndep']), codeloc, data=top_mv, annotated=True)
def _ail_handle_Call(self, stmt: ailment.Stmt.Call):
self._handle_Call_base(stmt, is_expr=False)
def _handle_Call_base(self, stmt: ailment.Stmt.Call, is_expr: bool=False):
target = self._expr(stmt.target) codeloc = self._codeloc()
top_v = self.state.top(self.arch.bits)
dummy_def = Definition(Register(self.arch.ip_offset, self.arch.bytes), codeloc, dummy=True)
top_v = self.state.annotate_with_def(top_v, dummy_def)
top_mv = MultiValues(offset_to_values={0: {top_v}})
ip = Register(self.arch.ip_offset, self.arch.bytes)
self.state.kill_definitions(ip, codeloc, data=top_mv, annotated=True)
if stmt.args is not None:
used_exprs = stmt.args
elif stmt.calling_convention is not None and (
stmt.calling_convention.func_ty is not None or stmt.calling_convention.args is not None):
used_exprs = [ ]
for arg_loc in stmt.calling_convention.arg_locs():
if isinstance(arg_loc, SimRegArg):
used_exprs.append(Register(self.arch.registers[arg_loc.reg_name], arg_loc.size))
elif isinstance(arg_loc, SimStackArg):
used_exprs.append(SpOffset(arg_loc.size * 8, arg_loc.stack_offset, is_base=False))
else:
l.warning("_handle_Call(): Unsupported arg_loc %r.", arg_loc)
else:
used_exprs = None
if stmt.calling_convention is not None:
cc = stmt.calling_convention
else:
l.debug("Unknown calling convention for function %s. Fall back to default calling convention.", target)
cc = self.project.factory.cc()
killed_vars = [ Register(*self.arch.registers[reg_name]) for reg_name in cc.CALLER_SAVED_REGS ]
if used_exprs is None:
used_exprs = [ Register(*self.arch.registers[reg_name]) for reg_name in cc.ARG_REGS ]
for expr in used_exprs:
self._expr(expr)
return_reg_offset = None
if not is_expr:
if stmt.ret_expr is not None:
if isinstance(stmt.ret_expr, ailment.Expr.Register):
return_reg_offset = stmt.ret_expr.reg_offset
return_reg_size = stmt.ret_expr.size
reg_atom = Register(return_reg_offset, return_reg_size)
top = self.state.top(return_reg_size * self.arch.byte_width)
self.state.kill_and_add_definition(reg_atom, codeloc, MultiValues(offset_to_values={0: {top}}))
else:
l.warning("Unsupported ret_expr type %s. Please report to GitHub.", stmt.ret_expr.__class__)
else:
return_reg_offset, return_reg_size = self.arch.registers[cc.RETURN_VAL.reg_name]
self.state.kill_definitions(Register(return_reg_offset, return_reg_size), codeloc, dummy=False)
for var in killed_vars:
if var.reg_offset == return_reg_offset:
continue
self.state.kill_definitions(var, codeloc, data=top_mv, annotated=True)
if 'cc_op' in self.arch.registers:
self.state.kill_definitions(Register(*self.arch.registers['cc_op']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_dep1']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_dep2']), codeloc, data=top_mv, annotated=True)
self.state.kill_definitions(Register(*self.arch.registers['cc_ndep']), codeloc, data=top_mv, annotated=True)
def _ail_handle_Return(self, stmt: ailment.Stmt.Return):
if stmt.ret_exprs:
for ret_expr in stmt.ret_exprs:
self._expr(ret_expr)
return
cc_cls = DEFAULT_CC.get(self.project.arch.name, None)
if cc_cls is None:
l.warning("Unknown default calling convention for architecture %s.", self.project.arch.name)
return
cc = cc_cls(self.project.arch)
codeloc = self._codeloc()
size = self.project.arch.bits // 8
if cc.RETURN_VAL is not None:
if isinstance(cc.RETURN_VAL, SimRegArg):
offset = cc.RETURN_VAL._fix_offset(None, size, arch=self.project.arch)
self.state.add_use(Register(offset, size), codeloc)
self.state.add_use(Register(self.project.arch.bp_offset, self.project.arch.bits // 8), codeloc)
# self.state.add_use(Register(self.project.arch.sp_offset, self.project.arch.bits // 8), codeloc)
def _ail_handle_DirtyStatement(self, stmt: ailment.Stmt.DirtyStatement):
# TODO: The logic below is subject to change when ailment.Stmt.DirtyStatement is changed
tmp = stmt.dirty_stmt.dst
cvt_sizes = {
'ILGop_IdentV128': 16,
'ILGop_Ident64': 8,
'ILGop_Ident32': 4,
'ILGop_16Uto32': 4,
'ILGop_16Sto32': 4,
'ILGop_8Uto32': 4,
'ILGop_8Sto32': 4,
}
size = cvt_sizes[stmt.dirty_stmt.cvt]
self.state.kill_and_add_definition(Tmp(tmp, size), self._codeloc(), None)
self.tmps[tmp] = None
#
# AIL expression handlers
#
def _ail_handle_BV(self, expr: claripy.ast.Base) -> MultiValues:
return MultiValues(offset_to_values={0: {expr}})
def _ail_handle_Tmp(self, expr: ailment.Expr.Tmp) -> MultiValues:
self.state.add_use(Tmp(expr.tmp_idx, expr.size), self._codeloc())
return super()._ail_handle_Tmp(expr)
def _ail_handle_CallExpr(self, expr: ailment.Stmt.Call) -> MultiValues:
self._handle_Call_base(expr, is_expr=True)
return MultiValues(offset_to_values={0: {self.state.top(expr.bits)}})
def _ail_handle_Register(self, expr) -> MultiValues:
self.state: ReachingDefinitionsState
reg_offset = expr.reg_offset
size = expr.size
# bits = size * 8
reg_atom = Register(reg_offset, size)
# first check if it is ever defined
try:
value: MultiValues = self.state.register_definitions.load(reg_offset, size=size)
except SimMemoryMissingError:
# the value does not exist
top = self.state.top(size * self.state.arch.byte_width)
# annotate it
top = self.state.annotate_with_def(top, Definition(reg_atom, ExternalCodeLocation()))
value = MultiValues(offset_to_values={0: {top}})
# write it back
self.state.kill_and_add_definition(reg_atom, self._external_codeloc(), value)
# extract Definitions
defs: Optional[Iterable[Definition]] = None
for vs in value.values.values():
for v in vs:
if defs is None:
defs = self.state.extract_defs(v)
else:
defs = chain(defs, self.state.extract_defs(v))
if defs is None:
# define it right away as an external dependency
self.state.kill_and_add_definition(reg_atom, self._external_codeloc(), value)
else:
codeloc = self._codeloc()
for def_ in defs:
self.state.add_use_by_def(def_, codeloc)
return value
def _ail_handle_Load(self, expr: ailment.Expr.Load) -> MultiValues:
addrs: MultiValues = self._expr(expr.addr)
size = expr.size
bits = expr.bits
if expr.guard is not None:
guard = self._expr(expr.guard) # pylint:disable=unused-variable
alt = self._expr(expr.alt) # pylint:disable=unused-variable
else:
guard = None # pylint:disable=unused-variable
alt = None # pylint:disable=unused-variable
# convert addrs from MultiValues to a list of valid addresses
if len(addrs.values) == 1:
addrs_v = next(iter(addrs.values.values()))
else:
top = self.state.top(bits)
# annotate it
dummy_atom = MemoryLocation(0, size, endness=expr.endness)
top = self.state.annotate_with_def(top, Definition(dummy_atom, ExternalCodeLocation()))
# add use
self.state.add_use(dummy_atom, self._codeloc())
return MultiValues(offset_to_values={0: {top}})
result: Optional[MultiValues] = None
for addr in addrs_v:
if not isinstance(addr, claripy.ast.Base):
continue
if addr.concrete:
# a concrete address
addr = addr._model_concrete.value
try:
vs: MultiValues = self.state.memory_definitions.load(addr, size=size, endness=expr.endness)
except SimMemoryMissingError:
continue
memory_location = MemoryLocation(addr, size, endness=expr.endness)
self.state.add_use(memory_location, self._codeloc())
result = result.merge(vs) if result is not None else vs
elif self.state.is_stack_address(addr):
stack_offset = self.state.get_stack_offset(addr)
if stack_offset is not None:
stack_addr = self.state.live_definitions.stack_offset_to_stack_addr(stack_offset)
try:
vs: MultiValues = self.state.stack_definitions.load(stack_addr, size=size, endness=expr.endness)
except SimMemoryMissingError:
continue
memory_location = MemoryLocation(SpOffset(self.arch.bits, stack_offset), size, endness=expr.endness)
self.state.add_use(memory_location, self._codeloc())
result = result.merge(vs) if result is not None else vs
else:
l.debug('Memory address %r undefined or unsupported at pc %
if result is None:
top = self.state.top(bits)
# TODO: Annotate top with a definition
result = MultiValues(offset_to_values={0: {top}})
return result
def _ail_handle_Convert(self, expr: ailment.Expr.Convert) -> MultiValues:
to_conv: MultiValues = self._expr(expr.operand)
bits = expr.to_bits
size = bits // self.arch.byte_width
if len(to_conv.values) == 1 and 0 in to_conv.values:
values = to_conv.values[0]
else:
top = self.state.top(expr.to_bits)
# annotate it
dummy_atom = MemoryLocation(0, size, endness=self.arch.memory_endness)
top = self.state.annotate_with_def(top, Definition(dummy_atom, ExternalCodeLocation()))
# add use
self.state.add_use(dummy_atom, self._codeloc())
return MultiValues(offset_to_values={0: {top}})
converted = set()
for v in values:
if expr.to_bits < expr.from_bits:
conv = v[expr.to_bits - 1:0]
elif expr.to_bits > expr.from_bits:
conv = claripy.ZeroExt(expr.to_bits - expr.from_bits, v)
else:
conv = v
converted.add(conv)
return MultiValues(offset_to_values={0: converted})
def _ail_handle_ITE(self, expr: ailment.Expr.ITE) -> MultiValues:
_: MultiValues = self._expr(expr.cond)
iftrue: MultiValues = self._expr(expr.iftrue)
_: MultiValues = self._expr(expr.iffalse)
top = self.state.top(len(iftrue))
return MultiValues(offset_to_values={0: {top}})
def _ail_handle_Not(self, expr: ailment.Expr.UnaryOp) -> MultiValues:
operand: MultiValues = self._expr(expr.operand)
bits = expr.bits
r = None
operand_v = operand.one_value()
if operand_v is None or self.state.is_top(operand_v):
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
else:
r = MultiValues(offset_to_values={0: {~operand_v}})
return r
def _ail_handle_BinaryOp(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
r = super()._ail_handle_BinaryOp(expr)
if isinstance(r, ailment.Expr.BinaryOp):
l.warning("Unimplemented operation %s.", expr.op)
top = self.state.top(expr.bits)
return MultiValues(offset_to_values={0: {top}})
return r
def _ail_handle_Add(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# adding a single value to a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v + expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# adding a single value to a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {v + expr0_v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# adding two single values together
r = MultiValues(offset_to_values={0: {expr0_v + expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Sub(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# subtracting a single value from a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v - expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# subtracting a single value from a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {expr0_v - v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
r = MultiValues(offset_to_values={0: {expr0_v - expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Shr(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# each value in expr0 >> expr1_v
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {(claripy.LShR(v, expr1_v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v >> each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {(claripy.LShR(expr0_v, v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr1_v.concrete:
r = MultiValues(offset_to_values={0: {claripy.LShR(expr0_v, expr1_v._model_concrete.value)}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Sar(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# each value in expr0 >> expr1_v
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {(claripy.LShR(v, expr1_v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v >> each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {(claripy.LShR(expr0_v, v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr1_v.concrete:
r = MultiValues(offset_to_values={0: {expr0_v >> expr1_v._model_concrete.value}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Shl(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# each value in expr0 << expr1_v
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {((v << expr1_v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v >> each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {((expr0_v << v._model_concrete.value) if v.concrete else self.state.top(bits)) for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr1_v.concrete:
r = MultiValues(offset_to_values={0: {expr0_v << expr1_v._model_concrete.value}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_And(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
if expr0_v is None and expr1_v is not None:
# expr1_v & each value in expr0
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v & expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v & each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {expr0_v & v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# spcial handling for stack alignment
if self.state.is_stack_address(expr0_v):
r = MultiValues(offset_to_values={0: {expr0_v}})
else:
r = MultiValues(offset_to_values={0: {expr0_v & expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Or(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# expr1_v | each value in expr0
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v | expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v | each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {expr0_v | v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
r = MultiValues(offset_to_values={0: {expr0_v | expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Xor(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# expr1_v ^ each value in expr0
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v ^ expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v ^ each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {expr0_v ^ v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
r = MultiValues(offset_to_values={0: {expr0_v ^ expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Concat(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# concatenate expr1_v with each value in expr0
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {claripy.Concat(v, expr1_v) for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# concatenate expr0_v with each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {claripy.Concat(expr0_v, v) for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
r = MultiValues(offset_to_values={0: {claripy.Concat(expr0_v, expr1_v)}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Cmp(self, expr) -> MultiValues:
op0 = self._expr(expr.operands[0])
op1 = self._expr(expr.operands[1])
if op0 is None: op0 = expr.operands[0]
if op1 is None: op1 = expr.operands[1]
top = self.state.top(expr.bits)
return MultiValues(offset_to_values={0: {top}})
_ail_handle_CmpEQ = _ail_handle_Cmp
_ail_handle_CmpNE = _ail_handle_Cmp
_ail_handle_CmpLE = _ail_handle_Cmp
_ail_handle_CmpLEs = _ail_handle_Cmp
_ail_handle_CmpLT = _ail_handle_Cmp
_ail_handle_CmpLTs = _ail_handle_Cmp
_ail_handle_CmpGE = _ail_handle_Cmp
_ail_handle_CmpGEs = _ail_handle_Cmp
_ail_handle_CmpGT = _ail_handle_Cmp
_ail_handle_CmpGTs = _ail_handle_Cmp
def _ail_handle_Const(self, expr) -> MultiValues:
return MultiValues(offset_to_values={0: {claripy.BVV(expr.value, expr.bits)}})
def _ail_handle_StackBaseOffset(self, expr: ailment.Expr.StackBaseOffset) -> MultiValues:
stack_addr = self.state.stack_address(expr.offset)
return MultiValues(offset_to_values={0: {stack_addr}})
def _ail_handle_DirtyExpression(self, expr: ailment.Expr.DirtyExpression) -> MultiValues: # pylint:disable=no-self-use
# FIXME: DirtyExpression needs .bits
top = self.state.top(expr.bits)
return MultiValues(offset_to_values={0: {top}})
#
# User defined high-level statement handlers
#
def _handle_function(self):
if len(self._call_stack) + 1 > self._maximum_local_call_depth:
l.warning('The analysis reached its maximum recursion depth.')
return None
defs_ip = self.state.register_definitions.get_objects_by_offset(self.arch.ip_offset)
if len(defs_ip) != 1:
l.error('Invalid definition(s) for IP.')
return None
ip_data = next(iter(defs_ip)).data
if len(ip_data) != 1:
l.error('Invalid number of values for IP.')
return None
ip_addr = ip_data.get_first_element()
if not isinstance(ip_addr, int):
l.error('Invalid type %s for IP.', type(ip_addr).__name__)
return None
is_internal = False
ext_func_name = None
if self.project.loader.main_object.contains_addr(ip_addr) is True:
ext_func_name = self.project.loader.find_plt_stub_name(ip_addr)
if ext_func_name is None:
is_internal = True
else:
symbol = self.project.loader.find_symbol(ip_addr)
if symbol is not None:
ext_func_name = symbol.name
if ext_func_name is not None:
handler_name = 'handle_%s' % ext_func_name
if hasattr(self._function_handler, handler_name):
getattr(self._function_handler, handler_name)(self.state, self._codeloc())
else:
l.warning('Please implement the external function handler for %s() with your own logic.',
ext_func_name)
elif is_internal is True:
handler_name = 'handle_local_function'
if hasattr(self._function_handler, handler_name):
is_updated, state, visited_blocks, dep_graph = getattr(self._function_handler, handler_name)(
self.state,
ip_addr,
self._call_stack,
self._maximum_local_call_depth,
self._visited_blocks,
self._dep_graph,
)
if is_updated is True:
self.state = state
self._visited_blocks = visited_blocks
self._dep_graph = dep_graph
else:
l.warning('Please implement the local function handler with your own logic.')
else:
l.warning('Could not find function name for external function at address % return None
| true
| true
|
1c48ca8060baf98f40b83661b0b59646600fa52c
| 2,589
|
py
|
Python
|
toolcraft/tools/__base__.py
|
SpikingNeurons/toolcraft
|
7290fa70a5d2680ebacf1bc421efaf09545f7c7e
|
[
"BSD-3-Clause"
] | 6
|
2021-04-06T09:27:48.000Z
|
2021-12-17T02:13:11.000Z
|
toolcraft/tools/__base__.py
|
SpikingNeurons/toolcraft
|
7290fa70a5d2680ebacf1bc421efaf09545f7c7e
|
[
"BSD-3-Clause"
] | 57
|
2021-03-19T07:33:13.000Z
|
2022-03-30T18:59:29.000Z
|
toolcraft/tools/__base__.py
|
SpikingNeurons/toolcraft
|
7290fa70a5d2680ebacf1bc421efaf09545f7c7e
|
[
"BSD-3-Clause"
] | 2
|
2021-04-08T18:24:36.000Z
|
2021-04-08T22:40:50.000Z
|
"""
Get inspirations from
https://github.com/python-poetry/poetry/tree/master/poetry/console/commands
"""
import abc
import inspect
import typer
import typing as t
from .. import error as e
from .. import logger
APP = typer.Typer(name="toolcraft")
_LOGGER = logger.get_logger()
class Tool(abc.ABC):
AVAILABLE_TOOL_CLASSES = {} # type: t.Dict[str, t.Type[Tool]]
def __init_subclass__(cls, **kwargs):
global APP
# -------------------------------------------------------- 01
# Validations
# -------------------------------------------------------- 01.01
# all subclasses must be concrete
if inspect.isabstract(cls):
e.code.CodingError(
msgs=[
f"Class {cls} is not concrete ..."
]
)
# -------------------------------------------------------- 01.02
# there can be only one tool class per module
else:
if cls.tool_name() in cls.AVAILABLE_TOOL_CLASSES.keys():
e.code.CodingError(
msgs=[
f"you can have only one concrete subclass of {Tool} in "
f"module {cls.__module__}"
]
)
# -------------------------------------------------------- 01.03
# you need to define `command_fn` method in order to register it with
# `typer_app`
if Tool.command_fn.__name__ not in cls.__dict__.keys():
e.code.CodingError(
msgs=[
f"Please override method `{Tool.command_fn.__name__}` in "
f"class {cls}."
]
)
# -------------------------------------------------------- 02
# store tool classes for future reference ...
cls.AVAILABLE_TOOL_CLASSES[cls.tool_name()] = cls
# -------------------------------------------------------- 03
# register command_fn in typer_app
APP.command(name=cls.tool_name())(cls.command_fn)
# -------------------------------------------------------- 04
# log
# _LOGGER.info(
# msg=f"Registered tool `{cls.tool_name()}`"
# )
@classmethod
def tool_name(cls) -> str:
"""
There can be ony one tool per module
"""
return cls.__module__.split(".")[-1]
@classmethod
def command_fn(cls, **kwargs):
raise NotImplementedError(
f"Please implement this method in the respective "
f"subclass ..."
)
| 31.192771
| 80
| 0.445732
|
import abc
import inspect
import typer
import typing as t
from .. import error as e
from .. import logger
APP = typer.Typer(name="toolcraft")
_LOGGER = logger.get_logger()
class Tool(abc.ABC):
AVAILABLE_TOOL_CLASSES = {}
def __init_subclass__(cls, **kwargs):
global APP
if inspect.isabstract(cls):
e.code.CodingError(
msgs=[
f"Class {cls} is not concrete ..."
]
)
else:
if cls.tool_name() in cls.AVAILABLE_TOOL_CLASSES.keys():
e.code.CodingError(
msgs=[
f"you can have only one concrete subclass of {Tool} in "
f"module {cls.__module__}"
]
)
if Tool.command_fn.__name__ not in cls.__dict__.keys():
e.code.CodingError(
msgs=[
f"Please override method `{Tool.command_fn.__name__}` in "
f"class {cls}."
]
)
cls.AVAILABLE_TOOL_CLASSES[cls.tool_name()] = cls
APP.command(name=cls.tool_name())(cls.command_fn)
@classmethod
def tool_name(cls) -> str:
return cls.__module__.split(".")[-1]
@classmethod
def command_fn(cls, **kwargs):
raise NotImplementedError(
f"Please implement this method in the respective "
f"subclass ..."
)
| true
| true
|
1c48cbc22c51d021fa49f06c4e01f0e308d3c262
| 264
|
py
|
Python
|
configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py
|
heytanay/mmsegmentation
|
7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8
|
[
"Apache-2.0"
] | 11
|
2022-02-04T01:09:45.000Z
|
2022-03-08T05:49:16.000Z
|
configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py
|
heytanay/mmsegmentation
|
7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8
|
[
"Apache-2.0"
] | 2
|
2022-02-25T03:07:23.000Z
|
2022-03-08T12:54:05.000Z
|
configs/unet/deeplabv3_unet_s5-d16_ce-1.0-dice-3.0_128x128_40k_chase-db1.py
|
heytanay/mmsegmentation
|
7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8
|
[
"Apache-2.0"
] | 1
|
2022-01-25T05:13:37.000Z
|
2022-01-25T05:13:37.000Z
|
_base_ = './deeplabv3_unet_s5-d16_128x128_40k_chase_db1.py'
model = dict(
decode_head=dict(loss_decode=[
dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0),
dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0)
]))
| 37.714286
| 76
| 0.700758
|
_base_ = './deeplabv3_unet_s5-d16_128x128_40k_chase_db1.py'
model = dict(
decode_head=dict(loss_decode=[
dict(type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0),
dict(type='DiceLoss', loss_name='loss_dice', loss_weight=3.0)
]))
| true
| true
|
1c48cc2f520b1dcd46156055cdb4f50c8d087a8d
| 2,035
|
py
|
Python
|
actions/lib/actions.py
|
xod442/stackstorm-hpe-arubacx
|
d790c7dfd75a61131d5c89204e59ee6362db1563
|
[
"Apache-2.0"
] | null | null | null |
actions/lib/actions.py
|
xod442/stackstorm-hpe-arubacx
|
d790c7dfd75a61131d5c89204e59ee6362db1563
|
[
"Apache-2.0"
] | null | null | null |
actions/lib/actions.py
|
xod442/stackstorm-hpe-arubacx
|
d790c7dfd75a61131d5c89204e59ee6362db1563
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# (C) Copyright 2019-2020 Hewlett Packard Enterprise Development LP.
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __author__ = "@netwookie"
# __credits__ = ["Rick Kauffman"]
# __license__ = "Apache2.0"
# __maintainer__ = "Rick Kauffman"
# __email__ = "rick.a.kauffman@hpe.com"
import requests
from st2common.runners.base_action import Action
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import logging
logging.basicConfig(level=logging.INFO)
from pyaoscx import session
class ArubaCxBaseAction(Action):
def __init__(self,config):
super(ArubaCxBaseAction, self).__init__(config)
self.username, self.version, self.switchip, self.password = self._get_client()
def _get_client(self):
# self.config['username'] = 'admin'
# self.config['password'] = 'siesta3'
base_url = "https://{0}/rest/{1}/".format('10.132.0.213', 'v10.04')
# base_url = "https://{0}/rest/{1}/".format(self.config['switchip'], self.config['version'])
print(base_url)
try:
session_dict = dict(s=session.login(base_url, 'admin', 'siesta3'), url=base_url)
# session_dict = dict(s=session.login(base_url, self.config['username'], self.config['password']), url=base_url)
except Exception as error:
print('Ran into exception: {}. Logging out..'.format(error))
session.logout(**session_dict)
return (session, session_dict)
| 41.530612
| 124
| 0.710565
|
import requests
from st2common.runners.base_action import Action
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import logging
logging.basicConfig(level=logging.INFO)
from pyaoscx import session
class ArubaCxBaseAction(Action):
def __init__(self,config):
super(ArubaCxBaseAction, self).__init__(config)
self.username, self.version, self.switchip, self.password = self._get_client()
def _get_client(self):
base_url = "https://{0}/rest/{1}/".format('10.132.0.213', 'v10.04')
print(base_url)
try:
session_dict = dict(s=session.login(base_url, 'admin', 'siesta3'), url=base_url)
except Exception as error:
print('Ran into exception: {}. Logging out..'.format(error))
session.logout(**session_dict)
return (session, session_dict)
| true
| true
|
1c48cc5812f28abb373c572416f41aba50d03e9c
| 25,683
|
py
|
Python
|
Lib/site-packages/ginga/AstroImage.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
Lib/site-packages/ginga/AstroImage.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/ginga/AstroImage.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
#
# AstroImage.py -- Abstraction of an astronomical data image.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import math
import traceback
import numpy
from ginga.util import wcsmod, io_fits
from ginga.util import wcs, iqcalc
from ginga.BaseImage import BaseImage, ImageError, Header
from ginga.misc import Bunch
from ginga import trcalc
import ginga.util.six as six
from ginga.util.six.moves import map
class AstroHeader(Header):
pass
class AstroImage(BaseImage):
"""
Abstraction of an astronomical data (image).
NOTE: this module is NOT thread-safe!
"""
# class variables for WCS and IO can be set
wcsClass = None
ioClass = None
@classmethod
def set_wcsClass(cls, klass):
cls.wcsClass = klass
@classmethod
def set_ioClass(cls, klass):
cls.ioClass = klass
def __init__(self, data_np=None, metadata=None, logger=None,
name=None, wcsclass=wcsClass, ioclass=ioClass,
inherit_primary_header=False):
BaseImage.__init__(self, data_np=data_np, metadata=metadata,
logger=logger, name=name)
# wcsclass specifies a pluggable WCS module
if wcsclass is None:
wcsclass = wcsmod.WCS
self.wcs = wcsclass(self.logger)
# ioclass specifies a pluggable IO module
if ioclass is None:
ioclass = io_fits.fitsLoaderClass
self.io = ioclass(self.logger)
self.io.register_type('image', self.__class__)
self.inherit_primary_header = inherit_primary_header
if self.inherit_primary_header:
# User wants to inherit from primary header--this will hold it
self._primary_hdr = AstroHeader()
else:
self._primary_hdr = None
if metadata is not None:
header = self.get_header()
self.wcs.load_header(header)
# For navigating multidimensional data
self.naxispath = []
self.revnaxis = []
self._md_data = None
def load_hdu(self, hdu, fobj=None, naxispath=None,
inherit_primary_header=None):
if self.io is None:
# need image loader for the fromHDU() call below
raise ImageError("No IO loader defined")
self.clear_metadata()
# collect HDU header
ahdr = self.get_header()
self.io.fromHDU(hdu, ahdr)
# Set PRIMARY header
if inherit_primary_header is None:
inherit_primary_header = self.inherit_primary_header
if inherit_primary_header and (fobj is not None):
if self._primary_hdr is None:
self._primary_hdr = AstroHeader()
self.io.fromHDU(fobj[0], self._primary_hdr)
data = hdu.data
if data is None:
data = numpy.zeros((0, 0))
elif not isinstance(data, numpy.ndarray):
data = numpy.zeros((0, 0))
elif 0 in data.shape:
data = numpy.zeros((0, 0))
elif len(data.shape) < 2:
# Expand 1D arrays into 1xN array
data = data.reshape((1, data.shape[0]))
# this is a handle to the full data array
self._md_data = data
# this will get reset in set_naxispath() if array is
# multidimensional
self._data = data
if naxispath is None:
naxispath = []
# Set naxispath to drill down to 2D data slice
if len(naxispath) == 0:
naxispath = ([0] * (len(data.shape) - 2))
self.set_naxispath(naxispath)
# Try to make a wcs object on the header
self.wcs.load_header(hdu.header, fobj=fobj)
def load_file(self, filespec, **kwargs):
if self.io is None:
raise ImageError("No IO loader defined")
self.io.load_file(filespec, dstobj=self, **kwargs)
def load_buffer(self, data, dims, dtype, byteswap=False,
metadata=None):
data = numpy.fromstring(data, dtype=dtype)
if byteswap:
data.byteswap(True)
data = data.reshape(dims)
self.set_data(data, metadata=metadata)
def get_mddata(self):
return self._md_data
def set_naxispath(self, naxispath):
"""Choose a slice out of multidimensional data.
"""
revnaxis = list(naxispath)
revnaxis.reverse()
# construct slice view and extract it
view = revnaxis + [slice(None), slice(None)]
data = self.get_mddata()[view]
if len(data.shape) != 2:
raise ImageError(
"naxispath does not lead to a 2D slice: {}".format(naxispath))
self.naxispath = naxispath
self.revnaxis = revnaxis
self.set_data(data)
def set_wcs(self, wcs):
self.wcs = wcs
def set_io(self, io):
self.io = io
def get_data_size(self):
return self.get_size()
def get_header(self, create=True):
try:
# By convention, the fits header is stored in a dictionary
# under the metadata keyword 'header'
hdr = self.metadata['header']
if self.inherit_primary_header and self._primary_hdr is not None:
# Inherit PRIMARY header for display but keep metadata intact
displayhdr = AstroHeader()
for key in hdr.keyorder:
card = hdr.get_card(key)
bnch = displayhdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
for key in self._primary_hdr.keyorder:
if key not in hdr:
card = self._primary_hdr.get_card(key)
bnch = displayhdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
else:
# Normal, separate header
displayhdr = hdr
except KeyError as e:
if not create:
raise e
hdr = AstroHeader()
self.metadata['header'] = hdr
displayhdr = hdr
return displayhdr
def get_keyword(self, kwd, *args):
"""Get an item from the fits header, if any."""
try:
kwds = self.get_header()
return kwds[kwd]
except KeyError:
# return a default if there is one
if len(args) > 0:
return args[0]
raise KeyError(kwd)
def get_keywords_list(self, *args):
return list(map(self.get_keyword, args))
def set_keyword(self, kwd, value, create=True):
kwds = self.get_header(create=create)
kwd = kwd.upper()
if not create:
prev = kwds[kwd] # noqa, this raises KeyError
kwds[kwd] = value
def update_keywords(self, keyDict):
hdr = self.get_header()
# Upcase all keywords
for kwd, val in keyDict.items():
hdr[kwd.upper()] = val
# Try to make a wcs object on the header
if hasattr(self, 'wcs'):
self.wcs.load_header(hdr)
def set_keywords(self, **kwds):
"""Set an item in the fits header, if any."""
return self.update_keywords(kwds)
def update_data(self, data_np, metadata=None, astype=None):
"""DO NOT USE: this method will be deprecated!
"""
self.set_data(data_np.copy(), metadata=metadata,
astype=astype)
def update_metadata(self, keyDict):
for key, val in keyDict.items():
self.metadata[key] = val
# refresh the WCS
if hasattr(self, 'wcs'):
header = self.get_header()
self.wcs.load_header(header)
def clear_all(self):
# clear metadata and data
super(AstroImage, self).clear_all()
# unreference full data array
self._md_data = self._data
def transfer(self, other, astype=None):
data = self._get_data()
other.update_data(data, astype=astype)
other.update_metadata(self.metadata)
def copy(self, astype=None):
data = self._get_data()
other = AstroImage(data, logger=self.logger)
self.transfer(other, astype=astype)
return other
def save_as_file(self, filepath, **kwdargs):
data = self._get_data()
header = self.get_header()
self.io.save_as_file(filepath, data, header, **kwdargs)
def pixtocoords(self, x, y, system=None, coords='data'):
args = [x, y] + self.revnaxis
return self.wcs.pixtocoords(args, system=system, coords=coords)
def spectral_coord(self, coords='data'):
args = [0, 0] + self.revnaxis
return self.wcs.spectral_coord(args, coords=coords)
def pixtoradec(self, x, y, format='deg', coords='data'):
args = [x, y] + self.revnaxis
ra_deg, dec_deg = self.wcs.pixtoradec(args, coords=coords)
if format == 'deg':
return ra_deg, dec_deg
return wcs.deg2fmt(ra_deg, dec_deg, format)
def radectopix(self, ra_deg, dec_deg, format='deg', coords='data'):
if format != 'deg':
# convert coordinates to degrees
ra_deg = wcs.lon_to_deg(ra_deg)
dec_deg = wcs.lat_to_deg(dec_deg)
return self.wcs.radectopix(ra_deg, dec_deg, coords=coords,
naxispath=self.revnaxis)
# -----> TODO: merge into wcs.py ?
#
def get_starsep_XY(self, x1, y1, x2, y2):
# source point
ra_org, dec_org = self.pixtoradec(x1, y1)
# destination point
ra_dst, dec_dst = self.pixtoradec(x2, y2)
return wcs.get_starsep_RaDecDeg(ra_org, dec_org, ra_dst, dec_dst)
def calc_radius_xy(self, x, y, radius_deg):
"""Calculate a radius (in pixels) from the point (x, y) to a circle
defined by radius in degrees.
"""
# calculate ra/dec of x,y pixel
ra_deg, dec_deg = self.pixtoradec(x, y)
# Calculate position 1 degree from the given one
# NOTE: this needs to add in DEC, not RA
ra2_deg, dec2_deg = wcs.add_offset_radec(ra_deg, dec_deg,
0.0, 1.0)
# Calculate the length of this segment--it is pixels/deg
x2, y2 = self.radectopix(ra2_deg, dec2_deg)
px_per_deg_e = math.sqrt(math.fabs(x2-x)**2 + math.fabs(y2-y)**2)
# calculate radius based on desired radius_deg
radius_px = px_per_deg_e * radius_deg
return radius_px
def calc_radius_deg2pix(self, ra_deg, dec_deg, delta_deg,
equinox=None):
x, y = self.radectopix(ra_deg, dec_deg, equinox=equinox)
return self.calc_radius_xy(x, y, delta_deg)
def add_offset_xy(self, x, y, delta_deg_x, delta_deg_y):
# calculate ra/dec of x,y pixel
ra_deg, dec_deg = self.pixtoradec(x, y)
# add offsets
ra2_deg, dec2_deg = wcs.add_offset_radec(ra_deg, dec_deg,
delta_deg_x, delta_deg_y)
# then back to new pixel coords
x2, y2 = self.radectopix(ra2_deg, dec2_deg)
return (x2, y2)
def calc_radius_center(self, delta_deg):
return self.calc_radius_xy(float(self.width / 2.0),
float(self.height / 2.0),
delta_deg)
def calc_compass(self, x, y, len_deg_e, len_deg_n):
# Get east and north coordinates
xe, ye = self.add_offset_xy(x, y, len_deg_e, 0.0)
xe = int(round(xe))
ye = int(round(ye))
xn, yn = self.add_offset_xy(x, y, 0.0, len_deg_n)
xn = int(round(xn))
yn = int(round(yn))
return (x, y, xn, yn, xe, ye)
def calc_compass_radius(self, x, y, radius_px):
xe, ye = self.add_offset_xy(x, y, 1.0, 0.0)
xn, yn = self.add_offset_xy(x, y, 0.0, 1.0)
# now calculate the length in pixels of those arcs
# (planar geometry is good enough here)
px_per_deg_e = math.sqrt(math.fabs(ye - y)**2 + math.fabs(xe - x)**2)
px_per_deg_n = math.sqrt(math.fabs(yn - y)**2 + math.fabs(xn - x)**2)
# now calculate the arm length in degrees for each arm
# (this produces same-length arms)
len_deg_e = radius_px / px_per_deg_e
len_deg_n = radius_px / px_per_deg_n
return self.calc_compass(x, y, len_deg_e, len_deg_n)
def calc_compass_center(self):
# calculate center of data
x = float(self.width) / 2.0
y = float(self.height) / 2.0
# radius we want the arms to be (approx 1/4 the smallest dimension)
radius_px = float(min(self.width, self.height)) / 4.0
return self.calc_compass_radius(x, y, radius_px)
#
# <----- TODO: merge this into wcs.py ?
def get_wcs_rotation_deg(self):
header = self.get_header()
(rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header)
return rot
def rotate(self, deg, update_wcs=False):
#old_deg = self.get_wcs_rotation_deg()
super(AstroImage, self).rotate(deg)
# TODO: currently this is not working!
## if update_wcs:
## self.wcs.rotate(deg)
def mosaic_inline(self, imagelist, bg_ref=None, trim_px=None,
merge=False, allow_expand=True, expand_pad_deg=0.01,
max_expand_pct=None,
update_minmax=True, suppress_callback=False):
"""Drops new images into the current image (if there is room),
relocating them according the WCS between the two images.
"""
# Get our own (mosaic) rotation and scale
header = self.get_header()
((xrot_ref, yrot_ref),
(cdelt1_ref, cdelt2_ref)) = wcs.get_xy_rotation_and_scale(header)
scale_x, scale_y = math.fabs(cdelt1_ref), math.fabs(cdelt2_ref)
# drop each image in the right place in the new data array
mydata = self._get_data()
count = 1
res = []
for image in imagelist:
name = image.get('name', 'image%d' % (count))
count += 1
data_np = image._get_data()
# Calculate sky position at the center of the piece
ctr_x, ctr_y = trcalc.get_center(data_np)
ra, dec = image.pixtoradec(ctr_x, ctr_y)
# User specified a trim? If so, trim edge pixels from each
# side of the array
ht, wd = data_np.shape[:2]
if trim_px:
xlo, xhi = trim_px, wd - trim_px
ylo, yhi = trim_px, ht - trim_px
data_np = data_np[ylo:yhi, xlo:xhi, ...]
ht, wd = data_np.shape[:2]
# If caller asked us to match background of pieces then
# get the median of this piece
if bg_ref is not None:
bg = iqcalc.get_median(data_np)
bg_inc = bg_ref - bg
data_np = data_np + bg_inc
# Determine max/min to update our values
if update_minmax:
maxval = numpy.nanmax(data_np)
minval = numpy.nanmin(data_np)
self.maxval = max(self.maxval, maxval)
self.minval = min(self.minval, minval)
# Get rotation and scale of piece
header = image.get_header()
((xrot, yrot),
(cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)
self.logger.debug("image(%s) xrot=%f yrot=%f cdelt1=%f "
"cdelt2=%f" % (name, xrot, yrot, cdelt1, cdelt2))
# scale if necessary
# TODO: combine with rotation?
if (not numpy.isclose(math.fabs(cdelt1), scale_x) or
not numpy.isclose(math.fabs(cdelt2), scale_y)):
nscale_x = math.fabs(cdelt1) / scale_x
nscale_y = math.fabs(cdelt2) / scale_y
self.logger.debug("scaling piece by x(%f), y(%f)" % (
nscale_x, nscale_y))
data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic(
data_np, 0, 0, wd-1, ht-1, nscale_x, nscale_y,
logger=self.logger)
# Rotate piece into our orientation, according to wcs
rot_dx, rot_dy = xrot - xrot_ref, yrot - yrot_ref
flip_x = False
flip_y = False
# Optomization for 180 rotations
if (numpy.isclose(math.fabs(rot_dx), 180.0) or
numpy.isclose(math.fabs(rot_dy), 180.0)):
rotdata = trcalc.transform(data_np,
flip_x=True, flip_y=True)
rot_dx = 0.0
rot_dy = 0.0
else:
rotdata = data_np
# Finish with any necessary rotation of piece
if not numpy.isclose(rot_dy, 0.0):
rot_deg = rot_dy
self.logger.debug("rotating %s by %f deg" % (name, rot_deg))
rotdata = trcalc.rotate(rotdata, rot_deg,
#rotctr_x=ctr_x, rotctr_y=ctr_y
logger=self.logger)
# Flip X due to negative CDELT1
if numpy.sign(cdelt1) != numpy.sign(cdelt1_ref):
flip_x = True
# Flip Y due to negative CDELT2
if numpy.sign(cdelt2) != numpy.sign(cdelt2_ref):
flip_y = True
if flip_x or flip_y:
rotdata = trcalc.transform(rotdata,
flip_x=flip_x, flip_y=flip_y)
# Get size and data of new image
ht, wd = rotdata.shape[:2]
ctr_x, ctr_y = trcalc.get_center(rotdata)
# Find location of image piece (center) in our array
x0, y0 = self.radectopix(ra, dec)
# Merge piece as closely as possible into our array
# Unfortunately we lose a little precision rounding to the
# nearest pixel--can't be helped with this approach
x0, y0 = int(round(x0)), int(round(y0))
self.logger.debug("Fitting image '%s' into mosaic at %d,%d" % (
name, x0, y0))
# This is for useful debugging info only
my_ctr_x, my_ctr_y = trcalc.get_center(mydata)
off_x, off_y = x0 - my_ctr_x, y0 - my_ctr_y
self.logger.debug("centering offsets: %d,%d" % (off_x, off_y))
# Sanity check piece placement
xlo, xhi = x0 - ctr_x, x0 + wd - ctr_x
ylo, yhi = y0 - ctr_y, y0 + ht - ctr_y
assert (xhi - xlo == wd), \
Exception("Width differential %d != %d" % (xhi - xlo, wd))
assert (yhi - ylo == ht), \
Exception("Height differential %d != %d" % (yhi - ylo, ht))
mywd, myht = self.get_size()
if xlo < 0 or xhi > mywd or ylo < 0 or yhi > myht:
if not allow_expand:
raise Exception("New piece doesn't fit on image and "
"allow_expand=False")
# <-- Resize our data array to allow the new image
# determine amount to pad expansion by
expand_x = max(int(expand_pad_deg / scale_x), 0)
expand_y = max(int(expand_pad_deg / scale_y), 0)
nx1_off, nx2_off = 0, 0
if xlo < 0:
nx1_off = abs(xlo) + expand_x
if xhi > mywd:
nx2_off = (xhi - mywd) + expand_x
xlo, xhi = xlo + nx1_off, xhi + nx1_off
ny1_off, ny2_off = 0, 0
if ylo < 0:
ny1_off = abs(ylo) + expand_y
if yhi > myht:
ny2_off = (yhi - myht) + expand_y
ylo, yhi = ylo + ny1_off, yhi + ny1_off
new_wd = mywd + nx1_off + nx2_off
new_ht = myht + ny1_off + ny2_off
# sanity check on new mosaic size
old_area = mywd * myht
new_area = new_wd * new_ht
expand_pct = new_area / old_area
if ((max_expand_pct is not None) and
(expand_pct > max_expand_pct)):
raise Exception("New area exceeds current one by %.2f %%;"
"increase max_expand_pct (%.2f) to allow" %
(expand_pct*100, max_expand_pct))
# go for it!
new_data = numpy.zeros((new_ht, new_wd))
# place current data into new data
new_data[ny1_off:ny1_off+myht, nx1_off:nx1_off+mywd] = \
mydata
self._data = new_data
mydata = new_data
if (nx1_off > 0) or (ny1_off > 0):
# Adjust our WCS for relocation of the reference pixel
crpix1, crpix2 = self.get_keywords_list('CRPIX1', 'CRPIX2')
kwds = dict(CRPIX1=crpix1 + nx1_off,
CRPIX2=crpix2 + ny1_off)
self.update_keywords(kwds)
# fit image piece into our array
try:
if merge:
mydata[ylo:yhi, xlo:xhi, ...] += rotdata[0:ht, 0:wd, ...]
else:
idx = (mydata[ylo:yhi, xlo:xhi, ...] == 0.0)
mydata[ylo:yhi, xlo:xhi, ...][idx] = \
rotdata[0:ht, 0:wd, ...][idx]
except Exception as e:
self.logger.error("Error fitting tile: %s" % (str(e)))
raise
res.append((xlo, ylo, xhi, yhi))
# TODO: recalculate min and max values
# Can't use usual techniques because it adds too much time to the
# mosacing
#self._set_minmax()
# Notify watchers that our data has changed
if not suppress_callback:
self.make_callback('modified')
return res
def info_xy(self, data_x, data_y, settings):
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = self.get_data_xy(int(data_x+0.5), int(data_y+0.5))
except Exception as e:
value = None
system = settings.get('wcs_coords', None)
format = settings.get('wcs_display', 'sexagesimal')
ra_lbl, dec_lbl = six.unichr(945), six.unichr(948)
# Calculate WCS coords, if available
try:
if self.wcs is None:
self.logger.debug("No WCS for this image")
ra_txt = dec_txt = 'NO WCS'
elif self.wcs.coordsys == 'raw':
self.logger.debug("No coordinate system determined")
ra_txt = dec_txt = 'NO WCS'
elif self.wcs.coordsys == 'pixel':
args = [data_x, data_y] + self.revnaxis
x, y = self.wcs.pixtosystem(args, system=system, coords='data')
ra_txt = "%+.3f" % (x)
dec_txt = "%+.3f" % (y)
ra_lbl, dec_lbl = "X", "Y"
else:
args = [data_x, data_y] + self.revnaxis
lon_deg, lat_deg = self.wcs.pixtosystem(
args, system=system, coords='data')
if format == 'sexagesimal':
if system in ('galactic', 'ecliptic'):
sign, deg, min, sec = wcs.degToDms(lon_deg,
isLatitude=False)
ra_txt = '+%03d:%02d:%06.3f' % (deg, min, sec)
else:
deg, min, sec = wcs.degToHms(lon_deg)
ra_txt = '%02d:%02d:%06.3f' % (deg, min, sec)
sign, deg, min, sec = wcs.degToDms(lat_deg)
if sign < 0:
sign = '-'
else:
sign = '+'
dec_txt = '%s%02d:%02d:%06.3f' % (sign, deg, min, sec)
else:
ra_txt = '%+10.7f' % (lon_deg)
dec_txt = '%+10.7f' % (lat_deg)
if system == 'galactic':
ra_lbl, dec_lbl = "l", "b"
elif system == 'ecliptic':
ra_lbl, dec_lbl = six.unichr(0x03BB), six.unichr(0x03B2)
elif system == 'helioprojective':
ra_txt = "%+5.3f" % (lon_deg*3600)
dec_txt = "%+5.3f" % (lat_deg*3600)
ra_lbl, dec_lbl = "x-Solar", "y-Solar"
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = dec_txt = 'BAD WCS'
try:
# log traceback, if possible
(type_, value_, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
info = Bunch.Bunch(itype='astro', data_x=data_x, data_y=data_y,
x=data_x, y=data_y,
ra_txt=ra_txt, dec_txt=dec_txt,
ra_lbl=ra_lbl, dec_lbl=dec_lbl,
value=value)
return info
# END
| 36.021038
| 79
| 0.539501
|
import sys
import math
import traceback
import numpy
from ginga.util import wcsmod, io_fits
from ginga.util import wcs, iqcalc
from ginga.BaseImage import BaseImage, ImageError, Header
from ginga.misc import Bunch
from ginga import trcalc
import ginga.util.six as six
from ginga.util.six.moves import map
class AstroHeader(Header):
pass
class AstroImage(BaseImage):
wcsClass = None
ioClass = None
@classmethod
def set_wcsClass(cls, klass):
cls.wcsClass = klass
@classmethod
def set_ioClass(cls, klass):
cls.ioClass = klass
def __init__(self, data_np=None, metadata=None, logger=None,
name=None, wcsclass=wcsClass, ioclass=ioClass,
inherit_primary_header=False):
BaseImage.__init__(self, data_np=data_np, metadata=metadata,
logger=logger, name=name)
if wcsclass is None:
wcsclass = wcsmod.WCS
self.wcs = wcsclass(self.logger)
if ioclass is None:
ioclass = io_fits.fitsLoaderClass
self.io = ioclass(self.logger)
self.io.register_type('image', self.__class__)
self.inherit_primary_header = inherit_primary_header
if self.inherit_primary_header:
self._primary_hdr = AstroHeader()
else:
self._primary_hdr = None
if metadata is not None:
header = self.get_header()
self.wcs.load_header(header)
self.naxispath = []
self.revnaxis = []
self._md_data = None
def load_hdu(self, hdu, fobj=None, naxispath=None,
inherit_primary_header=None):
if self.io is None:
raise ImageError("No IO loader defined")
self.clear_metadata()
ahdr = self.get_header()
self.io.fromHDU(hdu, ahdr)
if inherit_primary_header is None:
inherit_primary_header = self.inherit_primary_header
if inherit_primary_header and (fobj is not None):
if self._primary_hdr is None:
self._primary_hdr = AstroHeader()
self.io.fromHDU(fobj[0], self._primary_hdr)
data = hdu.data
if data is None:
data = numpy.zeros((0, 0))
elif not isinstance(data, numpy.ndarray):
data = numpy.zeros((0, 0))
elif 0 in data.shape:
data = numpy.zeros((0, 0))
elif len(data.shape) < 2:
data = data.reshape((1, data.shape[0]))
self._md_data = data
self._data = data
if naxispath is None:
naxispath = []
if len(naxispath) == 0:
naxispath = ([0] * (len(data.shape) - 2))
self.set_naxispath(naxispath)
self.wcs.load_header(hdu.header, fobj=fobj)
def load_file(self, filespec, **kwargs):
if self.io is None:
raise ImageError("No IO loader defined")
self.io.load_file(filespec, dstobj=self, **kwargs)
def load_buffer(self, data, dims, dtype, byteswap=False,
metadata=None):
data = numpy.fromstring(data, dtype=dtype)
if byteswap:
data.byteswap(True)
data = data.reshape(dims)
self.set_data(data, metadata=metadata)
def get_mddata(self):
return self._md_data
def set_naxispath(self, naxispath):
revnaxis = list(naxispath)
revnaxis.reverse()
view = revnaxis + [slice(None), slice(None)]
data = self.get_mddata()[view]
if len(data.shape) != 2:
raise ImageError(
"naxispath does not lead to a 2D slice: {}".format(naxispath))
self.naxispath = naxispath
self.revnaxis = revnaxis
self.set_data(data)
def set_wcs(self, wcs):
self.wcs = wcs
def set_io(self, io):
self.io = io
def get_data_size(self):
return self.get_size()
def get_header(self, create=True):
try:
hdr = self.metadata['header']
if self.inherit_primary_header and self._primary_hdr is not None:
displayhdr = AstroHeader()
for key in hdr.keyorder:
card = hdr.get_card(key)
bnch = displayhdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
for key in self._primary_hdr.keyorder:
if key not in hdr:
card = self._primary_hdr.get_card(key)
bnch = displayhdr.__setitem__(card.key, card.value)
bnch.comment = card.comment
else:
displayhdr = hdr
except KeyError as e:
if not create:
raise e
hdr = AstroHeader()
self.metadata['header'] = hdr
displayhdr = hdr
return displayhdr
def get_keyword(self, kwd, *args):
try:
kwds = self.get_header()
return kwds[kwd]
except KeyError:
if len(args) > 0:
return args[0]
raise KeyError(kwd)
def get_keywords_list(self, *args):
return list(map(self.get_keyword, args))
def set_keyword(self, kwd, value, create=True):
kwds = self.get_header(create=create)
kwd = kwd.upper()
if not create:
prev = kwds[kwd] kwds[kwd] = value
def update_keywords(self, keyDict):
hdr = self.get_header()
for kwd, val in keyDict.items():
hdr[kwd.upper()] = val
if hasattr(self, 'wcs'):
self.wcs.load_header(hdr)
def set_keywords(self, **kwds):
return self.update_keywords(kwds)
def update_data(self, data_np, metadata=None, astype=None):
self.set_data(data_np.copy(), metadata=metadata,
astype=astype)
def update_metadata(self, keyDict):
for key, val in keyDict.items():
self.metadata[key] = val
if hasattr(self, 'wcs'):
header = self.get_header()
self.wcs.load_header(header)
def clear_all(self):
super(AstroImage, self).clear_all()
self._md_data = self._data
def transfer(self, other, astype=None):
data = self._get_data()
other.update_data(data, astype=astype)
other.update_metadata(self.metadata)
def copy(self, astype=None):
data = self._get_data()
other = AstroImage(data, logger=self.logger)
self.transfer(other, astype=astype)
return other
def save_as_file(self, filepath, **kwdargs):
data = self._get_data()
header = self.get_header()
self.io.save_as_file(filepath, data, header, **kwdargs)
def pixtocoords(self, x, y, system=None, coords='data'):
args = [x, y] + self.revnaxis
return self.wcs.pixtocoords(args, system=system, coords=coords)
def spectral_coord(self, coords='data'):
args = [0, 0] + self.revnaxis
return self.wcs.spectral_coord(args, coords=coords)
def pixtoradec(self, x, y, format='deg', coords='data'):
args = [x, y] + self.revnaxis
ra_deg, dec_deg = self.wcs.pixtoradec(args, coords=coords)
if format == 'deg':
return ra_deg, dec_deg
return wcs.deg2fmt(ra_deg, dec_deg, format)
def radectopix(self, ra_deg, dec_deg, format='deg', coords='data'):
if format != 'deg':
ra_deg = wcs.lon_to_deg(ra_deg)
dec_deg = wcs.lat_to_deg(dec_deg)
return self.wcs.radectopix(ra_deg, dec_deg, coords=coords,
naxispath=self.revnaxis)
def get_starsep_XY(self, x1, y1, x2, y2):
ra_org, dec_org = self.pixtoradec(x1, y1)
ra_dst, dec_dst = self.pixtoradec(x2, y2)
return wcs.get_starsep_RaDecDeg(ra_org, dec_org, ra_dst, dec_dst)
def calc_radius_xy(self, x, y, radius_deg):
ra_deg, dec_deg = self.pixtoradec(x, y)
ra2_deg, dec2_deg = wcs.add_offset_radec(ra_deg, dec_deg,
0.0, 1.0)
x2, y2 = self.radectopix(ra2_deg, dec2_deg)
px_per_deg_e = math.sqrt(math.fabs(x2-x)**2 + math.fabs(y2-y)**2)
radius_px = px_per_deg_e * radius_deg
return radius_px
def calc_radius_deg2pix(self, ra_deg, dec_deg, delta_deg,
equinox=None):
x, y = self.radectopix(ra_deg, dec_deg, equinox=equinox)
return self.calc_radius_xy(x, y, delta_deg)
def add_offset_xy(self, x, y, delta_deg_x, delta_deg_y):
ra_deg, dec_deg = self.pixtoradec(x, y)
ra2_deg, dec2_deg = wcs.add_offset_radec(ra_deg, dec_deg,
delta_deg_x, delta_deg_y)
x2, y2 = self.radectopix(ra2_deg, dec2_deg)
return (x2, y2)
def calc_radius_center(self, delta_deg):
return self.calc_radius_xy(float(self.width / 2.0),
float(self.height / 2.0),
delta_deg)
def calc_compass(self, x, y, len_deg_e, len_deg_n):
xe, ye = self.add_offset_xy(x, y, len_deg_e, 0.0)
xe = int(round(xe))
ye = int(round(ye))
xn, yn = self.add_offset_xy(x, y, 0.0, len_deg_n)
xn = int(round(xn))
yn = int(round(yn))
return (x, y, xn, yn, xe, ye)
def calc_compass_radius(self, x, y, radius_px):
xe, ye = self.add_offset_xy(x, y, 1.0, 0.0)
xn, yn = self.add_offset_xy(x, y, 0.0, 1.0)
px_per_deg_e = math.sqrt(math.fabs(ye - y)**2 + math.fabs(xe - x)**2)
px_per_deg_n = math.sqrt(math.fabs(yn - y)**2 + math.fabs(xn - x)**2)
len_deg_e = radius_px / px_per_deg_e
len_deg_n = radius_px / px_per_deg_n
return self.calc_compass(x, y, len_deg_e, len_deg_n)
def calc_compass_center(self):
x = float(self.width) / 2.0
y = float(self.height) / 2.0
radius_px = float(min(self.width, self.height)) / 4.0
return self.calc_compass_radius(x, y, radius_px)
def get_wcs_rotation_deg(self):
header = self.get_header()
(rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale(header)
return rot
def rotate(self, deg, update_wcs=False):
super(AstroImage, self).rotate(deg)
def mosaic_inline(self, imagelist, bg_ref=None, trim_px=None,
merge=False, allow_expand=True, expand_pad_deg=0.01,
max_expand_pct=None,
update_minmax=True, suppress_callback=False):
header = self.get_header()
((xrot_ref, yrot_ref),
(cdelt1_ref, cdelt2_ref)) = wcs.get_xy_rotation_and_scale(header)
scale_x, scale_y = math.fabs(cdelt1_ref), math.fabs(cdelt2_ref)
mydata = self._get_data()
count = 1
res = []
for image in imagelist:
name = image.get('name', 'image%d' % (count))
count += 1
data_np = image._get_data()
ctr_x, ctr_y = trcalc.get_center(data_np)
ra, dec = image.pixtoradec(ctr_x, ctr_y)
ht, wd = data_np.shape[:2]
if trim_px:
xlo, xhi = trim_px, wd - trim_px
ylo, yhi = trim_px, ht - trim_px
data_np = data_np[ylo:yhi, xlo:xhi, ...]
ht, wd = data_np.shape[:2]
if bg_ref is not None:
bg = iqcalc.get_median(data_np)
bg_inc = bg_ref - bg
data_np = data_np + bg_inc
if update_minmax:
maxval = numpy.nanmax(data_np)
minval = numpy.nanmin(data_np)
self.maxval = max(self.maxval, maxval)
self.minval = min(self.minval, minval)
header = image.get_header()
((xrot, yrot),
(cdelt1, cdelt2)) = wcs.get_xy_rotation_and_scale(header)
self.logger.debug("image(%s) xrot=%f yrot=%f cdelt1=%f "
"cdelt2=%f" % (name, xrot, yrot, cdelt1, cdelt2))
if (not numpy.isclose(math.fabs(cdelt1), scale_x) or
not numpy.isclose(math.fabs(cdelt2), scale_y)):
nscale_x = math.fabs(cdelt1) / scale_x
nscale_y = math.fabs(cdelt2) / scale_y
self.logger.debug("scaling piece by x(%f), y(%f)" % (
nscale_x, nscale_y))
data_np, (ascale_x, ascale_y) = trcalc.get_scaled_cutout_basic(
data_np, 0, 0, wd-1, ht-1, nscale_x, nscale_y,
logger=self.logger)
rot_dx, rot_dy = xrot - xrot_ref, yrot - yrot_ref
flip_x = False
flip_y = False
if (numpy.isclose(math.fabs(rot_dx), 180.0) or
numpy.isclose(math.fabs(rot_dy), 180.0)):
rotdata = trcalc.transform(data_np,
flip_x=True, flip_y=True)
rot_dx = 0.0
rot_dy = 0.0
else:
rotdata = data_np
if not numpy.isclose(rot_dy, 0.0):
rot_deg = rot_dy
self.logger.debug("rotating %s by %f deg" % (name, rot_deg))
rotdata = trcalc.rotate(rotdata, rot_deg,
logger=self.logger)
if numpy.sign(cdelt1) != numpy.sign(cdelt1_ref):
flip_x = True
if numpy.sign(cdelt2) != numpy.sign(cdelt2_ref):
flip_y = True
if flip_x or flip_y:
rotdata = trcalc.transform(rotdata,
flip_x=flip_x, flip_y=flip_y)
ht, wd = rotdata.shape[:2]
ctr_x, ctr_y = trcalc.get_center(rotdata)
x0, y0 = self.radectopix(ra, dec)
x0, y0 = int(round(x0)), int(round(y0))
self.logger.debug("Fitting image '%s' into mosaic at %d,%d" % (
name, x0, y0))
# This is for useful debugging info only
my_ctr_x, my_ctr_y = trcalc.get_center(mydata)
off_x, off_y = x0 - my_ctr_x, y0 - my_ctr_y
self.logger.debug("centering offsets: %d,%d" % (off_x, off_y))
# Sanity check piece placement
xlo, xhi = x0 - ctr_x, x0 + wd - ctr_x
ylo, yhi = y0 - ctr_y, y0 + ht - ctr_y
assert (xhi - xlo == wd), \
Exception("Width differential %d != %d" % (xhi - xlo, wd))
assert (yhi - ylo == ht), \
Exception("Height differential %d != %d" % (yhi - ylo, ht))
mywd, myht = self.get_size()
if xlo < 0 or xhi > mywd or ylo < 0 or yhi > myht:
if not allow_expand:
raise Exception("New piece doesn't fit on image and "
"allow_expand=False")
expand_x = max(int(expand_pad_deg / scale_x), 0)
expand_y = max(int(expand_pad_deg / scale_y), 0)
nx1_off, nx2_off = 0, 0
if xlo < 0:
nx1_off = abs(xlo) + expand_x
if xhi > mywd:
nx2_off = (xhi - mywd) + expand_x
xlo, xhi = xlo + nx1_off, xhi + nx1_off
ny1_off, ny2_off = 0, 0
if ylo < 0:
ny1_off = abs(ylo) + expand_y
if yhi > myht:
ny2_off = (yhi - myht) + expand_y
ylo, yhi = ylo + ny1_off, yhi + ny1_off
new_wd = mywd + nx1_off + nx2_off
new_ht = myht + ny1_off + ny2_off
old_area = mywd * myht
new_area = new_wd * new_ht
expand_pct = new_area / old_area
if ((max_expand_pct is not None) and
(expand_pct > max_expand_pct)):
raise Exception("New area exceeds current one by %.2f %%;"
"increase max_expand_pct (%.2f) to allow" %
(expand_pct*100, max_expand_pct))
new_data = numpy.zeros((new_ht, new_wd))
new_data[ny1_off:ny1_off+myht, nx1_off:nx1_off+mywd] = \
mydata
self._data = new_data
mydata = new_data
if (nx1_off > 0) or (ny1_off > 0):
crpix1, crpix2 = self.get_keywords_list('CRPIX1', 'CRPIX2')
kwds = dict(CRPIX1=crpix1 + nx1_off,
CRPIX2=crpix2 + ny1_off)
self.update_keywords(kwds)
try:
if merge:
mydata[ylo:yhi, xlo:xhi, ...] += rotdata[0:ht, 0:wd, ...]
else:
idx = (mydata[ylo:yhi, xlo:xhi, ...] == 0.0)
mydata[ylo:yhi, xlo:xhi, ...][idx] = \
rotdata[0:ht, 0:wd, ...][idx]
except Exception as e:
self.logger.error("Error fitting tile: %s" % (str(e)))
raise
res.append((xlo, ylo, xhi, yhi))
# mosacing
#self._set_minmax()
# Notify watchers that our data has changed
if not suppress_callback:
self.make_callback('modified')
return res
def info_xy(self, data_x, data_y, settings):
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = self.get_data_xy(int(data_x+0.5), int(data_y+0.5))
except Exception as e:
value = None
system = settings.get('wcs_coords', None)
format = settings.get('wcs_display', 'sexagesimal')
ra_lbl, dec_lbl = six.unichr(945), six.unichr(948)
# Calculate WCS coords, if available
try:
if self.wcs is None:
self.logger.debug("No WCS for this image")
ra_txt = dec_txt = 'NO WCS'
elif self.wcs.coordsys == 'raw':
self.logger.debug("No coordinate system determined")
ra_txt = dec_txt = 'NO WCS'
elif self.wcs.coordsys == 'pixel':
args = [data_x, data_y] + self.revnaxis
x, y = self.wcs.pixtosystem(args, system=system, coords='data')
ra_txt = "%+.3f" % (x)
dec_txt = "%+.3f" % (y)
ra_lbl, dec_lbl = "X", "Y"
else:
args = [data_x, data_y] + self.revnaxis
lon_deg, lat_deg = self.wcs.pixtosystem(
args, system=system, coords='data')
if format == 'sexagesimal':
if system in ('galactic', 'ecliptic'):
sign, deg, min, sec = wcs.degToDms(lon_deg,
isLatitude=False)
ra_txt = '+%03d:%02d:%06.3f' % (deg, min, sec)
else:
deg, min, sec = wcs.degToHms(lon_deg)
ra_txt = '%02d:%02d:%06.3f' % (deg, min, sec)
sign, deg, min, sec = wcs.degToDms(lat_deg)
if sign < 0:
sign = '-'
else:
sign = '+'
dec_txt = '%s%02d:%02d:%06.3f' % (sign, deg, min, sec)
else:
ra_txt = '%+10.7f' % (lon_deg)
dec_txt = '%+10.7f' % (lat_deg)
if system == 'galactic':
ra_lbl, dec_lbl = "l", "b"
elif system == 'ecliptic':
ra_lbl, dec_lbl = six.unichr(0x03BB), six.unichr(0x03B2)
elif system == 'helioprojective':
ra_txt = "%+5.3f" % (lon_deg*3600)
dec_txt = "%+5.3f" % (lat_deg*3600)
ra_lbl, dec_lbl = "x-Solar", "y-Solar"
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = dec_txt = 'BAD WCS'
try:
# log traceback, if possible
(type_, value_, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
info = Bunch.Bunch(itype='astro', data_x=data_x, data_y=data_y,
x=data_x, y=data_y,
ra_txt=ra_txt, dec_txt=dec_txt,
ra_lbl=ra_lbl, dec_lbl=dec_lbl,
value=value)
return info
# END
| true
| true
|
1c48cd2d9e1d346720ef488aece053fcba1c3248
| 1,098
|
py
|
Python
|
datasets.py
|
beiyan1911/conditional_aia_generation
|
0ace640d6e8dae41b63f26809a494b88cc3718e2
|
[
"Apache-2.0"
] | 1
|
2020-12-22T07:20:41.000Z
|
2020-12-22T07:20:41.000Z
|
datasets.py
|
beiyan1911/conditional_aia_generation
|
0ace640d6e8dae41b63f26809a494b88cc3718e2
|
[
"Apache-2.0"
] | null | null | null |
datasets.py
|
beiyan1911/conditional_aia_generation
|
0ace640d6e8dae41b63f26809a494b88cc3718e2
|
[
"Apache-2.0"
] | null | null | null |
import os.path
import torch
from glob2 import glob
from torch.utils.data.dataset import Dataset
from utils.him import fitsread
import numpy as np
class AIADataset(Dataset):
def __init__(self, dataroot):
self.paths = sorted(glob(os.path.join(dataroot, '*.fits')))
# ['0211', '0094', '0335', '0193', '0131', '0171']
def __getitem__(self, index):
path = self.paths[index]
name = os.path.basename(path)
fit_data = fitsread(path)[0]
in_idx = [0, 2, 3, 4, 5]
out_idx = [1]
inputs = np.stack([fit_data[i] for i in in_idx])
outputs = np.stack([fit_data[i] for i in out_idx])
inputs_t = torch.from_numpy(inputs)
labels_t = torch.from_numpy(outputs)
# return inputs_t, labels_t, name
return {'inputs': inputs_t, 'outputs': labels_t, 'name': name}
def __len__(self):
return len(self.paths)
if __name__ == '__main__':
dataroot = '/Volumes/BLBL/datasets/AIA/proce_and_crop_comp_xrt_2012'
dataset = AIADataset(dataroot)
data = dataset.__getitem__(1)
print(data)
| 27.45
| 72
| 0.634791
|
import os.path
import torch
from glob2 import glob
from torch.utils.data.dataset import Dataset
from utils.him import fitsread
import numpy as np
class AIADataset(Dataset):
def __init__(self, dataroot):
self.paths = sorted(glob(os.path.join(dataroot, '*.fits')))
def __getitem__(self, index):
path = self.paths[index]
name = os.path.basename(path)
fit_data = fitsread(path)[0]
in_idx = [0, 2, 3, 4, 5]
out_idx = [1]
inputs = np.stack([fit_data[i] for i in in_idx])
outputs = np.stack([fit_data[i] for i in out_idx])
inputs_t = torch.from_numpy(inputs)
labels_t = torch.from_numpy(outputs)
return {'inputs': inputs_t, 'outputs': labels_t, 'name': name}
def __len__(self):
return len(self.paths)
if __name__ == '__main__':
dataroot = '/Volumes/BLBL/datasets/AIA/proce_and_crop_comp_xrt_2012'
dataset = AIADataset(dataroot)
data = dataset.__getitem__(1)
print(data)
| true
| true
|
1c48cdbe52c0570a8ac0c75a70c17a69f2868711
| 1,458
|
py
|
Python
|
frontends/pytorch/test/torchscript_e2e_test/non_tensor_values.py
|
raikonenfnu/mlir-npcomp
|
29e1b2fe89848d58c9bc07e7df7ce651850a5244
|
[
"Apache-2.0"
] | null | null | null |
frontends/pytorch/test/torchscript_e2e_test/non_tensor_values.py
|
raikonenfnu/mlir-npcomp
|
29e1b2fe89848d58c9bc07e7df7ce651850a5244
|
[
"Apache-2.0"
] | null | null | null |
frontends/pytorch/test/torchscript_e2e_test/non_tensor_values.py
|
raikonenfnu/mlir-npcomp
|
29e1b2fe89848d58c9bc07e7df7ce651850a5244
|
[
"Apache-2.0"
] | null | null | null |
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.
# RUN: %PYTHON %s | FileCheck %s
from typing import List, Tuple, Dict
import torch
from torch_mlir_torchscript.e2e_test.framework import run_tests, TestUtils
from torch_mlir_torchscript.e2e_test.reporting import report_results
from torch_mlir_torchscript.e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY
from torch_mlir_torchscript_e2e_test_configs import TorchScriptTestConfig
class NonTensorValuesModule(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.export
def test_list(self, x: List[int]) -> List[int]:
return x
@torch.jit.export
def test_tuple(self, x: int) -> Tuple[int, int]:
return x, x
@torch.jit.export
def test_str(self, x: str) -> str:
return x
@torch.jit.export
def test_dict(self, x: Dict[str, int]) -> Dict[str, int]:
return x
# CHECK: PASS - "NonTensorValuesModule_basic"
@register_test_case(module_factory=lambda: NonTensorValuesModule())
def NonTensorValuesModule_basic(module, tu: TestUtils):
module.test_list([3])
module.test_tuple(3)
module.test_str("hello")
module.test_dict({"a": 1})
def main():
config = TorchScriptTestConfig()
results = run_tests(GLOBAL_TEST_REGISTRY, config)
report_results(results, set())
if __name__ == '__main__':
main()
| 26.509091
| 93
| 0.719479
|
from typing import List, Tuple, Dict
import torch
from torch_mlir_torchscript.e2e_test.framework import run_tests, TestUtils
from torch_mlir_torchscript.e2e_test.reporting import report_results
from torch_mlir_torchscript.e2e_test.registry import register_test_case, GLOBAL_TEST_REGISTRY
from torch_mlir_torchscript_e2e_test_configs import TorchScriptTestConfig
class NonTensorValuesModule(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.export
def test_list(self, x: List[int]) -> List[int]:
return x
@torch.jit.export
def test_tuple(self, x: int) -> Tuple[int, int]:
return x, x
@torch.jit.export
def test_str(self, x: str) -> str:
return x
@torch.jit.export
def test_dict(self, x: Dict[str, int]) -> Dict[str, int]:
return x
@register_test_case(module_factory=lambda: NonTensorValuesModule())
def NonTensorValuesModule_basic(module, tu: TestUtils):
module.test_list([3])
module.test_tuple(3)
module.test_str("hello")
module.test_dict({"a": 1})
def main():
config = TorchScriptTestConfig()
results = run_tests(GLOBAL_TEST_REGISTRY, config)
report_results(results, set())
if __name__ == '__main__':
main()
| true
| true
|
1c48ce53b36f7084a676a404e3f19be8f4b536a5
| 3,668
|
py
|
Python
|
player.py
|
skyleewu/snakepit-game
|
f53a1e90a77160fda917037a81287a2a8534a4c9
|
[
"Unlicense"
] | null | null | null |
player.py
|
skyleewu/snakepit-game
|
f53a1e90a77160fda917037a81287a2a8534a4c9
|
[
"Unlicense"
] | null | null | null |
player.py
|
skyleewu/snakepit-game
|
f53a1e90a77160fda917037a81287a2a8534a4c9
|
[
"Unlicense"
] | null | null | null |
from collections import deque
from random import randint
import settings
from datatypes import Vector, Position, Draw
class Player:
HEAD_CHAR = "@"
BODY_CHAR = "*"
TAIL_CHAR = "+"
DEAD_HEAD_CHAR = "x"
DEAD_BODY_CHAR = "*"
DEAD_TAIL_CHAR = "+"
UP = Vector(0, -1)
DOWN = Vector(0, 1)
LEFT = Vector(-1, 0)
RIGHT = Vector(1, 0)
DIRECTIONS = [UP, DOWN, LEFT, RIGHT]
keymap = {37: LEFT,
38: UP,
39: RIGHT,
40: DOWN
}
def __init__(self, player_id, name, ws):
self._id = player_id
self.name = name
self.ws = ws
self.alive = False
self.direction = None
def new_snake(self, color):
self.color = color
self.grow = 0
self.score = 0
self.alive = True
self.snake = deque()
def render_new_snake(self):
# try to spawn snake at some distance from world's borders
distance = settings.INIT_LENGHT + 2
x = randint(distance, settings.FIELD_SIZE_X - distance)
y = randint(distance, settings.FIELD_SIZE_Y - distance)
self.direction = self.DIRECTIONS[randint(0, 3)]
# create snake from tail to head
render = []
pos = Position(x, y)
for i in range(0, settings.INIT_LENGHT):
self.snake.appendleft(pos)
if i == 0:
char = self.TAIL_CHAR
elif i == settings.INIT_LENGHT - 1:
char = self.HEAD_CHAR
else:
char = self.BODY_CHAR
render.append(Draw(pos.x, pos.y, char, self.color))
pos = self.next_position()
return render
def next_position(self):
# next position of the snake's head
return Position(self.snake[0].x + self.direction.xdir,
self.snake[0].y + self.direction.ydir)
def render_move(self):
# moving snake to the next position
render = []
new_head = self.next_position()
self.snake.appendleft(new_head)
# draw head in the next position
render.append(Draw(new_head.x, new_head.y,
self.HEAD_CHAR, self.color))
# draw body in the old place of head
render.append(Draw(self.snake[1].x, self.snake[1].y,
self.BODY_CHAR, self.color))
# if we grow this turn, the tail remains in place
if self.grow > 0:
self.grow -= 1
else:
# otherwise the tail moves
old_tail = self.snake.pop()
render.append(Draw(old_tail.x, old_tail.y, " ", 0))
new_tail = self.snake[-1]
render.append(Draw(new_tail.x, new_tail.y,
self.TAIL_CHAR, self.color))
return render
def render_game_over(self):
render = []
# dead snake
for i, pos in enumerate(self.snake):
if i == 0:
render.append(Draw(pos.x, pos.y, self.DEAD_HEAD_CHAR, 0))
elif i == len(self.snake) - 1:
render.append(Draw(pos.x, pos.y, self.DEAD_TAIL_CHAR, 0))
else:
render.append(Draw(pos.x, pos.y, self.DEAD_BODY_CHAR, 0))
return render
def keypress(self, code):
if not self.alive:
return
direction = self.keymap.get(code)
if direction:
# do not move in the opposite direction
if not (self.direction and
direction.xdir == -self.direction.xdir and
direction.ydir == -self.direction.ydir):
self.direction = direction
| 31.084746
| 73
| 0.541985
|
from collections import deque
from random import randint
import settings
from datatypes import Vector, Position, Draw
class Player:
HEAD_CHAR = "@"
BODY_CHAR = "*"
TAIL_CHAR = "+"
DEAD_HEAD_CHAR = "x"
DEAD_BODY_CHAR = "*"
DEAD_TAIL_CHAR = "+"
UP = Vector(0, -1)
DOWN = Vector(0, 1)
LEFT = Vector(-1, 0)
RIGHT = Vector(1, 0)
DIRECTIONS = [UP, DOWN, LEFT, RIGHT]
keymap = {37: LEFT,
38: UP,
39: RIGHT,
40: DOWN
}
def __init__(self, player_id, name, ws):
self._id = player_id
self.name = name
self.ws = ws
self.alive = False
self.direction = None
def new_snake(self, color):
self.color = color
self.grow = 0
self.score = 0
self.alive = True
self.snake = deque()
def render_new_snake(self):
distance = settings.INIT_LENGHT + 2
x = randint(distance, settings.FIELD_SIZE_X - distance)
y = randint(distance, settings.FIELD_SIZE_Y - distance)
self.direction = self.DIRECTIONS[randint(0, 3)]
# create snake from tail to head
render = []
pos = Position(x, y)
for i in range(0, settings.INIT_LENGHT):
self.snake.appendleft(pos)
if i == 0:
char = self.TAIL_CHAR
elif i == settings.INIT_LENGHT - 1:
char = self.HEAD_CHAR
else:
char = self.BODY_CHAR
render.append(Draw(pos.x, pos.y, char, self.color))
pos = self.next_position()
return render
def next_position(self):
# next position of the snake's head
return Position(self.snake[0].x + self.direction.xdir,
self.snake[0].y + self.direction.ydir)
def render_move(self):
render = []
new_head = self.next_position()
self.snake.appendleft(new_head)
render.append(Draw(new_head.x, new_head.y,
self.HEAD_CHAR, self.color))
render.append(Draw(self.snake[1].x, self.snake[1].y,
self.BODY_CHAR, self.color))
if self.grow > 0:
self.grow -= 1
else:
old_tail = self.snake.pop()
render.append(Draw(old_tail.x, old_tail.y, " ", 0))
new_tail = self.snake[-1]
render.append(Draw(new_tail.x, new_tail.y,
self.TAIL_CHAR, self.color))
return render
def render_game_over(self):
render = []
for i, pos in enumerate(self.snake):
if i == 0:
render.append(Draw(pos.x, pos.y, self.DEAD_HEAD_CHAR, 0))
elif i == len(self.snake) - 1:
render.append(Draw(pos.x, pos.y, self.DEAD_TAIL_CHAR, 0))
else:
render.append(Draw(pos.x, pos.y, self.DEAD_BODY_CHAR, 0))
return render
def keypress(self, code):
if not self.alive:
return
direction = self.keymap.get(code)
if direction:
if not (self.direction and
direction.xdir == -self.direction.xdir and
direction.ydir == -self.direction.ydir):
self.direction = direction
| true
| true
|
1c48ce63cd1200fddbaf276b7685924b6e07921f
| 706
|
py
|
Python
|
ipm_library/ipm_library/exceptions.py
|
ijnek/ipm
|
dee4f2ac99f5d24bd0d2a8c9ff7c748b74727a2f
|
[
"Apache-2.0"
] | 3
|
2022-03-04T15:06:16.000Z
|
2022-03-15T04:00:18.000Z
|
ipm_library/ipm_library/exceptions.py
|
ijnek/ipm
|
dee4f2ac99f5d24bd0d2a8c9ff7c748b74727a2f
|
[
"Apache-2.0"
] | 4
|
2022-03-04T13:52:57.000Z
|
2022-03-27T00:59:08.000Z
|
ipm_library/ipm_library/exceptions.py
|
ijnek/ipm
|
dee4f2ac99f5d24bd0d2a8c9ff7c748b74727a2f
|
[
"Apache-2.0"
] | 2
|
2022-03-04T10:19:35.000Z
|
2022-03-15T01:05:00.000Z
|
# Copyright (c) 2022 Hamburg Bit-Bots
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NoIntersectionError(Exception):
"""Raised if a point is not able to be projected onto the plane."""
pass
| 35.3
| 74
| 0.749292
|
class NoIntersectionError(Exception):
pass
| true
| true
|
1c48ced8318bfbf20b62fbc295652a0d570fd2fd
| 4,262
|
py
|
Python
|
examples/cifar_generator_cnn.py
|
vishalbelsare/hyperas
|
add2baeaa67a90cb456934395c3bb81ee431a08d
|
[
"MIT"
] | 2,289
|
2016-02-19T18:27:31.000Z
|
2022-03-31T07:25:09.000Z
|
examples/cifar_generator_cnn.py
|
vishalbelsare/hyperas
|
add2baeaa67a90cb456934395c3bb81ee431a08d
|
[
"MIT"
] | 278
|
2016-02-21T12:53:47.000Z
|
2022-03-19T17:37:41.000Z
|
examples/cifar_generator_cnn.py
|
vishalbelsare/hyperas
|
add2baeaa67a90cb456934395c3bb81ee431a08d
|
[
"MIT"
] | 375
|
2016-02-19T22:38:36.000Z
|
2022-02-14T15:48:49.000Z
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import cifar10
from keras.utils import np_utils
def data():
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
return datagen, X_train, Y_train, X_test, Y_test
def model(datagen, X_train, Y_train, X_test, Y_test):
batch_size = 32
nb_epoch = 200
# input image dimensions
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
img_channels = 3
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
datagen, X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 37.385965
| 94
| 0.658611
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import cifar10
from keras.utils import np_utils
def data():
nb_classes = 10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
datagen = ImageDataGenerator(
featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, vertical_flip=False)
datagen.fit(X_train)
return datagen, X_train, Y_train, X_test, Y_test
def model(datagen, X_train, Y_train, X_test, Y_test):
batch_size = 32
nb_epoch = 200
img_rows, img_cols = 32, 32
img_channels = 3
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
datagen, X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| true
| true
|
1c48cf233b933b90261ca7a69c9a0870d84e1bbe
| 1,369
|
py
|
Python
|
launch/coverage.launch.py
|
slaghuis/coverage_planner
|
4598f2d4aa5baa1ce8aa0078d105fd3d46003e1d
|
[
"Apache-2.0"
] | null | null | null |
launch/coverage.launch.py
|
slaghuis/coverage_planner
|
4598f2d4aa5baa1ce8aa0078d105fd3d46003e1d
|
[
"Apache-2.0"
] | null | null | null |
launch/coverage.launch.py
|
slaghuis/coverage_planner
|
4598f2d4aa5baa1ce8aa0078d105fd3d46003e1d
|
[
"Apache-2.0"
] | null | null | null |
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
ld = LaunchDescription()
camera_model_node=Node(
package = 'coverage_planner',
name = 'camera_model_node',
executable = 'camera_model_node',
output="screen",
emulate_tty=True,
parameters = [
{"angle_of_view" : 1.08559479},
{"image_resolution_x" : 1920},
{"image_resolution_y" : 1080}
]
)
coverage_planner_node=Node(
package = 'coverage_planner',
name = 'coverage_planner_node',
executable = 'coverage_planner_node',
output="screen",
emulate_tty=True,
parameters = [
{"overlap" : 0.1},
{"minimum_height" : 5.0},
{"maximum_height" : 30.0}
]
)
photogrammetry_node=Node(
package = 'coverage_planner',
name = 'photogrammetry_node',
executable = 'photogrammetry_node',
output="screen",
emulate_tty=True,
parameters = [
{"images_folder" : "./"}
]
)
ld.add_action(camera_model_node)
ld.add_action(coverage_planner_node)
ld.add_action(photogrammetry_node)
return ld
| 27.38
| 67
| 0.592403
|
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
ld = LaunchDescription()
camera_model_node=Node(
package = 'coverage_planner',
name = 'camera_model_node',
executable = 'camera_model_node',
output="screen",
emulate_tty=True,
parameters = [
{"angle_of_view" : 1.08559479},
{"image_resolution_x" : 1920},
{"image_resolution_y" : 1080}
]
)
coverage_planner_node=Node(
package = 'coverage_planner',
name = 'coverage_planner_node',
executable = 'coverage_planner_node',
output="screen",
emulate_tty=True,
parameters = [
{"overlap" : 0.1},
{"minimum_height" : 5.0},
{"maximum_height" : 30.0}
]
)
photogrammetry_node=Node(
package = 'coverage_planner',
name = 'photogrammetry_node',
executable = 'photogrammetry_node',
output="screen",
emulate_tty=True,
parameters = [
{"images_folder" : "./"}
]
)
ld.add_action(camera_model_node)
ld.add_action(coverage_planner_node)
ld.add_action(photogrammetry_node)
return ld
| true
| true
|
1c48d153497615a5075c13da9738840fefede36e
| 400
|
py
|
Python
|
spacy/tests/regression/test_issue781.py
|
yuukos/spaCy
|
e4125383ed7221910ea955eae9b623c02bda64d8
|
[
"MIT"
] | 1
|
2017-11-18T08:53:26.000Z
|
2017-11-18T08:53:26.000Z
|
spacy/tests/regression/test_issue781.py
|
yuukos/spaCy
|
e4125383ed7221910ea955eae9b623c02bda64d8
|
[
"MIT"
] | null | null | null |
spacy/tests/regression/test_issue781.py
|
yuukos/spaCy
|
e4125383ed7221910ea955eae9b623c02bda64d8
|
[
"MIT"
] | 1
|
2018-08-25T03:09:50.000Z
|
2018-08-25T03:09:50.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
# Note: "chromosomes" worked previous the bug fix
@pytest.mark.parametrize('word,lemmas', [("chromosomes", ["chromosome"]), ("endosomes", ["endosome"]), ("colocalizes", ["colocalize", "colocaliz"])])
def test_issue781(lemmatizer, word, lemmas):
assert lemmatizer(word, 'noun', morphology={'number': 'plur'}) == set(lemmas)
| 36.363636
| 149
| 0.71
|
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize('word,lemmas', [("chromosomes", ["chromosome"]), ("endosomes", ["endosome"]), ("colocalizes", ["colocalize", "colocaliz"])])
def test_issue781(lemmatizer, word, lemmas):
assert lemmatizer(word, 'noun', morphology={'number': 'plur'}) == set(lemmas)
| true
| true
|
1c48d3841e5d7930b22a82350f5553fce4cc1d03
| 7,109
|
py
|
Python
|
pysnmp-with-texts/WWP-VOIP-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/WWP-VOIP-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/WWP-VOIP-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module WWP-VOIP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/WWP-VOIP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:38:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Counter32, Integer32, ModuleIdentity, Bits, iso, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Gauge32, ObjectIdentity, Counter64, MibIdentifier, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter32", "Integer32", "ModuleIdentity", "Bits", "iso", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Gauge32", "ObjectIdentity", "Counter64", "MibIdentifier", "Unsigned32")
MacAddress, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "DisplayString", "TextualConvention")
wwpModules, = mibBuilder.importSymbols("WWP-SMI", "wwpModules")
wwpVoipMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 6141, 2, 15))
wwpVoipMIB.setRevisions(('2001-04-03 17:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: wwpVoipMIB.setRevisionsDescriptions(('Initial creation.',))
if mibBuilder.loadTexts: wwpVoipMIB.setLastUpdated('200104031700Z')
if mibBuilder.loadTexts: wwpVoipMIB.setOrganization('World Wide Packets, Inc')
if mibBuilder.loadTexts: wwpVoipMIB.setContactInfo(' Mib Meister Postal: World Wide Packets P.O. Box 950 Veradale, WA 99037 USA Phone: +1 509 242 9000 Email: mib.meister@worldwidepackets.com')
if mibBuilder.loadTexts: wwpVoipMIB.setDescription('This MIB module is for Voice Over IP feature on WWP Products')
wwpVoipMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1))
wwpVoip = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1))
wwpVoipMIBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 2))
wwpVoipMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 2, 0))
wwpVoipMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 3))
wwpVoipMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 3, 1))
wwpVoipMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 3, 2))
wwpVoipTable = MibTable((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1), )
if mibBuilder.loadTexts: wwpVoipTable.setStatus('current')
if mibBuilder.loadTexts: wwpVoipTable.setDescription('The conceptual table listing all the Voice Over Ip Entries.')
wwpVoipEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1), ).setIndexNames((0, "WWP-VOIP-MIB", "wwpVoipIndex"))
if mibBuilder.loadTexts: wwpVoipEntry.setStatus('current')
if mibBuilder.loadTexts: wwpVoipEntry.setDescription('An entry in the wwpVoipTable.')
wwpVoipIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipIndex.setStatus('current')
if mibBuilder.loadTexts: wwpVoipIndex.setDescription('Index for the the Voip Entry.')
wwpVoipDownLoaderVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipDownLoaderVersion.setStatus('current')
if mibBuilder.loadTexts: wwpVoipDownLoaderVersion.setDescription('The Downloader version for this VOIP entry.')
wwpVoipApplicationVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipApplicationVersion.setStatus('current')
if mibBuilder.loadTexts: wwpVoipApplicationVersion.setDescription('The Aplication version for this VOIP entry.')
wwpVoipPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipPortNum.setStatus('current')
if mibBuilder.loadTexts: wwpVoipPortNum.setDescription('The Port Number for the VOIP.')
wwpVoipIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipIpAddr.setStatus('current')
if mibBuilder.loadTexts: wwpVoipIpAddr.setDescription('The IP Address for the VOIP Entry.')
wwpVoipNumResets = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipNumResets.setStatus('current')
if mibBuilder.loadTexts: wwpVoipNumResets.setDescription('The number of times the VOIP processor has been reset.')
wwpVoipCallAgentAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 7), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipCallAgentAddr.setStatus('current')
if mibBuilder.loadTexts: wwpVoipCallAgentAddr.setDescription('The IP address of the call agent to which this VOIP aplication is connected to.')
wwpVoipResetOp = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("none", 0), ("reset", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpVoipResetOp.setStatus('current')
if mibBuilder.loadTexts: wwpVoipResetOp.setDescription("This object reset the VOIP Aplication. A read on this object always returns 'none'.")
wwpVoipDiagFailNotification = NotificationType((1, 3, 6, 1, 4, 1, 6141, 2, 15, 2, 0, 1))
if mibBuilder.loadTexts: wwpVoipDiagFailNotification.setStatus('current')
if mibBuilder.loadTexts: wwpVoipDiagFailNotification.setDescription('A wwpVoipDiagFailNotification is sent if T2 VOIP ASIC fails diagnostics.')
mibBuilder.exportSymbols("WWP-VOIP-MIB", wwpVoipTable=wwpVoipTable, wwpVoipCallAgentAddr=wwpVoipCallAgentAddr, wwpVoipDownLoaderVersion=wwpVoipDownLoaderVersion, wwpVoipMIBCompliances=wwpVoipMIBCompliances, wwpVoipApplicationVersion=wwpVoipApplicationVersion, wwpVoipPortNum=wwpVoipPortNum, wwpVoipResetOp=wwpVoipResetOp, wwpVoipIndex=wwpVoipIndex, wwpVoipEntry=wwpVoipEntry, wwpVoipMIBNotificationPrefix=wwpVoipMIBNotificationPrefix, wwpVoipMIBConformance=wwpVoipMIBConformance, wwpVoipNumResets=wwpVoipNumResets, wwpVoipMIBNotifications=wwpVoipMIBNotifications, wwpVoipMIB=wwpVoipMIB, wwpVoipMIBObjects=wwpVoipMIBObjects, wwpVoip=wwpVoip, wwpVoipIpAddr=wwpVoipIpAddr, wwpVoipDiagFailNotification=wwpVoipDiagFailNotification, wwpVoipMIBGroups=wwpVoipMIBGroups, PYSNMP_MODULE_ID=wwpVoipMIB)
| 109.369231
| 790
| 0.775496
|
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Counter32, Integer32, ModuleIdentity, Bits, iso, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Gauge32, ObjectIdentity, Counter64, MibIdentifier, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter32", "Integer32", "ModuleIdentity", "Bits", "iso", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Gauge32", "ObjectIdentity", "Counter64", "MibIdentifier", "Unsigned32")
MacAddress, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "DisplayString", "TextualConvention")
wwpModules, = mibBuilder.importSymbols("WWP-SMI", "wwpModules")
wwpVoipMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 6141, 2, 15))
wwpVoipMIB.setRevisions(('2001-04-03 17:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: wwpVoipMIB.setRevisionsDescriptions(('Initial creation.',))
if mibBuilder.loadTexts: wwpVoipMIB.setLastUpdated('200104031700Z')
if mibBuilder.loadTexts: wwpVoipMIB.setOrganization('World Wide Packets, Inc')
if mibBuilder.loadTexts: wwpVoipMIB.setContactInfo(' Mib Meister Postal: World Wide Packets P.O. Box 950 Veradale, WA 99037 USA Phone: +1 509 242 9000 Email: mib.meister@worldwidepackets.com')
if mibBuilder.loadTexts: wwpVoipMIB.setDescription('This MIB module is for Voice Over IP feature on WWP Products')
wwpVoipMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1))
wwpVoip = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1))
wwpVoipMIBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 2))
wwpVoipMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 2, 0))
wwpVoipMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 3))
wwpVoipMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 3, 1))
wwpVoipMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 15, 3, 2))
wwpVoipTable = MibTable((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1), )
if mibBuilder.loadTexts: wwpVoipTable.setStatus('current')
if mibBuilder.loadTexts: wwpVoipTable.setDescription('The conceptual table listing all the Voice Over Ip Entries.')
wwpVoipEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1), ).setIndexNames((0, "WWP-VOIP-MIB", "wwpVoipIndex"))
if mibBuilder.loadTexts: wwpVoipEntry.setStatus('current')
if mibBuilder.loadTexts: wwpVoipEntry.setDescription('An entry in the wwpVoipTable.')
wwpVoipIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipIndex.setStatus('current')
if mibBuilder.loadTexts: wwpVoipIndex.setDescription('Index for the the Voip Entry.')
wwpVoipDownLoaderVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipDownLoaderVersion.setStatus('current')
if mibBuilder.loadTexts: wwpVoipDownLoaderVersion.setDescription('The Downloader version for this VOIP entry.')
wwpVoipApplicationVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipApplicationVersion.setStatus('current')
if mibBuilder.loadTexts: wwpVoipApplicationVersion.setDescription('The Aplication version for this VOIP entry.')
wwpVoipPortNum = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipPortNum.setStatus('current')
if mibBuilder.loadTexts: wwpVoipPortNum.setDescription('The Port Number for the VOIP.')
wwpVoipIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipIpAddr.setStatus('current')
if mibBuilder.loadTexts: wwpVoipIpAddr.setDescription('The IP Address for the VOIP Entry.')
wwpVoipNumResets = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipNumResets.setStatus('current')
if mibBuilder.loadTexts: wwpVoipNumResets.setDescription('The number of times the VOIP processor has been reset.')
wwpVoipCallAgentAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 7), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpVoipCallAgentAddr.setStatus('current')
if mibBuilder.loadTexts: wwpVoipCallAgentAddr.setDescription('The IP address of the call agent to which this VOIP aplication is connected to.')
wwpVoipResetOp = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 15, 1, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("none", 0), ("reset", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpVoipResetOp.setStatus('current')
if mibBuilder.loadTexts: wwpVoipResetOp.setDescription("This object reset the VOIP Aplication. A read on this object always returns 'none'.")
wwpVoipDiagFailNotification = NotificationType((1, 3, 6, 1, 4, 1, 6141, 2, 15, 2, 0, 1))
if mibBuilder.loadTexts: wwpVoipDiagFailNotification.setStatus('current')
if mibBuilder.loadTexts: wwpVoipDiagFailNotification.setDescription('A wwpVoipDiagFailNotification is sent if T2 VOIP ASIC fails diagnostics.')
mibBuilder.exportSymbols("WWP-VOIP-MIB", wwpVoipTable=wwpVoipTable, wwpVoipCallAgentAddr=wwpVoipCallAgentAddr, wwpVoipDownLoaderVersion=wwpVoipDownLoaderVersion, wwpVoipMIBCompliances=wwpVoipMIBCompliances, wwpVoipApplicationVersion=wwpVoipApplicationVersion, wwpVoipPortNum=wwpVoipPortNum, wwpVoipResetOp=wwpVoipResetOp, wwpVoipIndex=wwpVoipIndex, wwpVoipEntry=wwpVoipEntry, wwpVoipMIBNotificationPrefix=wwpVoipMIBNotificationPrefix, wwpVoipMIBConformance=wwpVoipMIBConformance, wwpVoipNumResets=wwpVoipNumResets, wwpVoipMIBNotifications=wwpVoipMIBNotifications, wwpVoipMIB=wwpVoipMIB, wwpVoipMIBObjects=wwpVoipMIBObjects, wwpVoip=wwpVoip, wwpVoipIpAddr=wwpVoipIpAddr, wwpVoipDiagFailNotification=wwpVoipDiagFailNotification, wwpVoipMIBGroups=wwpVoipMIBGroups, PYSNMP_MODULE_ID=wwpVoipMIB)
| true
| true
|
1c48d3f7cece91094b32bb88862cd9d132609db8
| 2,541
|
py
|
Python
|
tests/template_tests/test_nodelist.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 2
|
2015-01-21T15:45:07.000Z
|
2015-02-21T02:38:13.000Z
|
tests/template_tests/test_nodelist.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | null | null | null |
tests/template_tests/test_nodelist.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 1
|
2020-10-01T08:23:34.000Z
|
2020-10-01T08:23:34.000Z
|
from unittest import TestCase
from django.template import VariableNode, Context
from django.template.loader import get_template_from_string
from django.test import override_settings
class NodelistTest(TestCase):
def test_for(self):
source = '{% for i in 1 %}{{ a }}{% endfor %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_if(self):
source = '{% if x %}{{ a }}{% endif %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifequal(self):
source = '{% ifequal x y %}{{ a }}{% endifequal %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifchanged(self):
source = '{% ifchanged x %}{{ a }}{% endifchanged %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
class ErrorIndexTest(TestCase):
"""
Checks whether index of error is calculated correctly in
template debugger in for loops. Refs ticket #5831
"""
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True)
def test_correct_exception_index(self):
tests = [
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in range %}{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76)),
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in five %}{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57)),
('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)),
]
context = Context({
'range': range(5),
'five': 5,
})
for source, expected_error_source_index in tests:
template = get_template_from_string(source)
try:
template.render(context)
except (RuntimeError, TypeError) as e:
error_source_index = e.django_template_source[1]
self.assertEqual(error_source_index,
expected_error_source_index)
| 41.655738
| 132
| 0.599764
|
from unittest import TestCase
from django.template import VariableNode, Context
from django.template.loader import get_template_from_string
from django.test import override_settings
class NodelistTest(TestCase):
def test_for(self):
source = '{% for i in 1 %}{{ a }}{% endfor %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_if(self):
source = '{% if x %}{{ a }}{% endif %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifequal(self):
source = '{% ifequal x y %}{{ a }}{% endifequal %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifchanged(self):
source = '{% ifchanged x %}{{ a }}{% endifchanged %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
class ErrorIndexTest(TestCase):
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True)
def test_correct_exception_index(self):
tests = [
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in range %}{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76)),
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in five %}{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57)),
('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)),
]
context = Context({
'range': range(5),
'five': 5,
})
for source, expected_error_source_index in tests:
template = get_template_from_string(source)
try:
template.render(context)
except (RuntimeError, TypeError) as e:
error_source_index = e.django_template_source[1]
self.assertEqual(error_source_index,
expected_error_source_index)
| true
| true
|
1c48d500ecd549e2db64c6c379a7463a7076eef5
| 22,662
|
py
|
Python
|
tests/one_to_one/tests.py
|
downstreamimpact/django
|
6686238cdc5c826ca5aab39d771798ff98e90ae8
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 9
|
2020-09-30T16:32:05.000Z
|
2020-10-12T13:52:07.000Z
|
tests/one_to_one/tests.py
|
downstreamimpact/django
|
6686238cdc5c826ca5aab39d771798ff98e90ae8
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 3
|
2016-05-15T22:05:14.000Z
|
2019-11-02T15:58:14.000Z
|
tests/one_to_one/tests.py
|
downstreamimpact/django
|
6686238cdc5c826ca5aab39d771798ff98e90ae8
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 4
|
2019-11-07T01:22:16.000Z
|
2020-09-16T22:02:16.000Z
|
from django.db import IntegrityError, connection, transaction
from django.test import TestCase
from .models import (
Bar, Director, Favorites, HiddenPointer, ManualPrimaryKey, MultiModel,
Place, Pointer, RelatedModel, Restaurant, School, Target, ToFieldPointer,
UndergroundBar, Waiter,
)
class OneToOneTests(TestCase):
def setUp(self):
self.p1 = Place.objects.create(name='Demon Dogs', address='944 W. Fullerton')
self.p2 = Place.objects.create(name='Ace Hardware', address='1013 N. Ashland')
self.r1 = Restaurant.objects.create(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.b1 = Bar.objects.create(place=self.p1, serves_cocktails=False)
def test_getter(self):
# A Restaurant can access its place.
self.assertEqual(repr(self.r1.place), '<Place: Demon Dogs the place>')
# A Place can access its restaurant, if available.
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
# p2 doesn't have an associated restaurant.
with self.assertRaisesMessage(Restaurant.DoesNotExist, 'Place has no restaurant'):
self.p2.restaurant
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(self.p2, 'restaurant'))
def test_setter(self):
# Set the place using assignment notation. Because place is the primary
# key on Restaurant, the save will create a new restaurant
self.r1.place = self.p2
self.r1.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r1.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r1.pk)
# Set the place back again, using assignment in the reverse direction.
self.p1.restaurant = self.r1
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
# Restaurant.objects.all() just returns the Restaurants, not the Places.
self.assertQuerysetEqual(Restaurant.objects.all(), [
'<Restaurant: Demon Dogs the restaurant>',
])
# Place.objects.all() returns all Places, regardless of whether they
# have Restaurants.
self.assertQuerysetEqual(Place.objects.order_by('name'), [
'<Place: Ace Hardware the place>',
'<Place: Demon Dogs the place>',
])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r1)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r1)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
# Add a Waiter to the Restaurant.
w = self.r1.waiter_set.create(name='Joe')
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
# Query the waiters
def assert_filter_waiters(**params):
self.assertQuerysetEqual(Waiter.objects.filter(**params), [
'<Waiter: Joe the waiter at Demon Dogs the restaurant>'
])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.r1.pk)
assert_filter_waiters(restaurant__exact=self.r1)
assert_filter_waiters(restaurant__pk=self.r1.pk)
assert_filter_waiters(restaurant=self.r1.pk)
assert_filter_waiters(restaurant=self.r1)
assert_filter_waiters(id__exact=w.pk)
assert_filter_waiters(pk=w.pk)
# Delete the restaurant; the waiter should also be removed
r = Restaurant.objects.get(pk=self.r1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
# One-to-one fields still work if you create your own primary key
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
# You can have multiple one-to-one fields on a model, too.
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
# This will fail because each one-to-one field must be unique (and
# link2=o1 was used for x1, above).
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
with self.assertRaises(IntegrityError):
with transaction.atomic():
mm.save()
def test_unsaved_object(self):
"""
#10811 -- Assigning an unsaved object to a OneToOneField
should raise an exception.
"""
place = Place(name='User', address='London')
with self.assertRaises(Restaurant.DoesNotExist):
place.restaurant
msg = "save() prohibited to prevent data loss due to unsaved related object 'place'."
with self.assertRaisesMessage(ValueError, msg):
Restaurant.objects.create(place=place, serves_hot_dogs=True, serves_pizza=False)
# place should not cache restaurant
with self.assertRaises(Restaurant.DoesNotExist):
place.restaurant
def test_reverse_relationship_cache_cascade(self):
"""
Regression test for #9023: accessing the reverse relationship shouldn't
result in a cascading delete().
"""
bar = UndergroundBar.objects.create(place=self.p1, serves_cocktails=False)
# The bug in #9023: if you access the one-to-one relation *before*
# setting to None and deleting, the cascade happens anyway.
self.p1.undergroundbar
bar.place.name = 'foo'
bar.place = None
bar.save()
self.p1.delete()
self.assertEqual(Place.objects.all().count(), 1)
self.assertEqual(UndergroundBar.objects.all().count(), 1)
def test_create_models_m2m(self):
"""
Models are created via the m2m relation if the remote model has a
OneToOneField (#1064, #1506).
"""
f = Favorites(name='Fred')
f.save()
f.restaurants.set([self.r1])
self.assertQuerysetEqual(
f.restaurants.all(),
['<Restaurant: Demon Dogs the restaurant>']
)
def test_reverse_object_cache(self):
"""
The name of the cache for the reverse object is correct (#7173).
"""
self.assertEqual(self.p1.restaurant, self.r1)
self.assertEqual(self.p1.bar, self.b1)
def test_assign_none_reverse_relation(self):
p = Place.objects.get(name="Demon Dogs")
# Assigning None succeeds if field is null=True.
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
p.undergroundbar = None
self.assertIsNone(ug_bar.place)
ug_bar.save()
ug_bar.refresh_from_db()
self.assertIsNone(ug_bar.place)
def test_assign_none_null_reverse_relation(self):
p = Place.objects.get(name="Demon Dogs")
# Assigning None doesn't throw AttributeError if there isn't a related
# UndergroundBar.
p.undergroundbar = None
def test_assign_none_to_null_cached_reverse_relation(self):
p = Place.objects.get(name='Demon Dogs')
# Prime the relation's cache with a value of None.
with self.assertRaises(Place.undergroundbar.RelatedObjectDoesNotExist):
getattr(p, 'undergroundbar')
# Assigning None works if there isn't a related UndergroundBar and the
# reverse cache has a value of None.
p.undergroundbar = None
def test_assign_o2o_id_value(self):
b = UndergroundBar.objects.create(place=self.p1)
b.place_id = self.p2.pk
b.save()
self.assertEqual(b.place_id, self.p2.pk)
self.assertFalse(UndergroundBar.place.is_cached(b))
self.assertEqual(b.place, self.p2)
self.assertTrue(UndergroundBar.place.is_cached(b))
# Reassigning the same value doesn't clear a cached instance.
b.place_id = self.p2.pk
self.assertTrue(UndergroundBar.place.is_cached(b))
def test_assign_o2o_id_none(self):
b = UndergroundBar.objects.create(place=self.p1)
b.place_id = None
b.save()
self.assertIsNone(b.place_id)
self.assertFalse(UndergroundBar.place.is_cached(b))
self.assertIsNone(b.place)
self.assertTrue(UndergroundBar.place.is_cached(b))
def test_related_object_cache(self):
""" Regression test for #6886 (the related-object cache) """
# Look up the objects again so that we get "fresh" objects
p = Place.objects.get(name="Demon Dogs")
r = p.restaurant
# Accessing the related object again returns the exactly same object
self.assertIs(p.restaurant, r)
# But if we kill the cache, we get a new object
del p._state.fields_cache['restaurant']
self.assertIsNot(p.restaurant, r)
# Reassigning the Restaurant object results in an immediate cache update
# We can't use a new Restaurant because that'll violate one-to-one, but
# with a new *instance* the is test below will fail if #6886 regresses.
r2 = Restaurant.objects.get(pk=r.pk)
p.restaurant = r2
self.assertIs(p.restaurant, r2)
# Assigning None succeeds if field is null=True.
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
ug_bar.place = None
self.assertIsNone(ug_bar.place)
# Assigning None will not fail: Place.restaurant is null=False
setattr(p, 'restaurant', None)
# You also can't assign an object of the wrong type here
msg = (
'Cannot assign "<Place: Demon Dogs the place>": '
'"Place.restaurant" must be a "Restaurant" instance.'
)
with self.assertRaisesMessage(ValueError, msg):
setattr(p, 'restaurant', p)
# Creation using keyword argument should cache the related object.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place=p)
self.assertIs(r.place, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Place()
r = Restaurant(place=p)
self.assertIs(r.place, p)
# Creation using attname keyword argument and an id will cause the related
# object to be fetched.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place_id=p.id)
self.assertIsNot(r.place, p)
self.assertEqual(r.place, p)
def test_filter_one_to_one_relations(self):
"""
Regression test for #9968
filtering reverse one-to-one relations with primary_key=True was
misbehaving. We test both (primary_key=True & False) cases here to
prevent any reappearance of the problem.
"""
target = Target.objects.create()
self.assertSequenceEqual(Target.objects.filter(pointer=None), [target])
self.assertSequenceEqual(Target.objects.exclude(pointer=None), [])
self.assertSequenceEqual(Target.objects.filter(second_pointer=None), [target])
self.assertSequenceEqual(Target.objects.exclude(second_pointer=None), [])
def test_o2o_primary_key_delete(self):
t = Target.objects.create(name='name')
Pointer.objects.create(other=t)
num_deleted, objs = Pointer.objects.filter(other__name='name').delete()
self.assertEqual(num_deleted, 1)
self.assertEqual(objs, {'one_to_one.Pointer': 1})
def test_save_nullable_o2o_after_parent(self):
place = Place(name='Rose tattoo')
bar = UndergroundBar(place=place)
place.save()
bar.save()
bar.refresh_from_db()
self.assertEqual(bar.place, place)
def test_reverse_object_does_not_exist_cache(self):
"""
Regression for #13839 and #17439.
DoesNotExist on a reverse one-to-one relation is cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
with self.assertNumQueries(1):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
with self.assertNumQueries(0):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
def test_reverse_object_cached_when_related_is_accessed(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is cached
when the origin is accessed through the reverse relation.
"""
# Use a fresh object without caches
r = Restaurant.objects.get(pk=self.r1.pk)
p = r.place
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, r)
def test_related_object_cached_when_reverse_is_accessed(self):
"""
Regression for #13839 and #17439.
The origin of a one-to-one relation is cached
when the target is accessed through the reverse relation.
"""
# Use a fresh object without caches
p = Place.objects.get(pk=self.p1.pk)
r = p.restaurant
with self.assertNumQueries(0):
self.assertEqual(r.place, p)
def test_reverse_object_cached_when_related_is_set(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
self.r1.place = p
self.r1.save()
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, self.r1)
def test_reverse_object_cached_when_related_is_unset(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
b = UndergroundBar(place=self.p1, serves_cocktails=True)
b.save()
with self.assertNumQueries(0):
self.assertEqual(self.p1.undergroundbar, b)
b.place = None
b.save()
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
self.p1.undergroundbar
def test_get_reverse_on_unsaved_object(self):
"""
Regression for #18153 and #19089.
Accessing the reverse relation on an unsaved object
always raises an exception.
"""
p = Place()
# When there's no instance of the origin of the one-to-one
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
UndergroundBar.objects.create()
# When there's one instance of the origin
# (p.undergroundbar used to return that instance)
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
# Several instances of the origin are only possible if database allows
# inserting multiple NULL rows for a unique constraint
if connection.features.supports_nullable_unique_constraints:
UndergroundBar.objects.create()
# When there are several instances of the origin
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
def test_set_reverse_on_unsaved_object(self):
"""
Writing to the reverse relation on an unsaved object
is impossible too.
"""
p = Place()
b = UndergroundBar.objects.create()
# Assigning a reverse relation on an unsaved object is allowed.
p.undergroundbar = b
# However saving the object is not allowed.
msg = "save() prohibited to prevent data loss due to unsaved related object 'place'."
with self.assertNumQueries(0):
with self.assertRaisesMessage(ValueError, msg):
b.save()
def test_nullable_o2o_delete(self):
u = UndergroundBar.objects.create(place=self.p1)
u.place_id = None
u.save()
self.p1.delete()
self.assertTrue(UndergroundBar.objects.filter(pk=u.pk).exists())
self.assertIsNone(UndergroundBar.objects.get(pk=u.pk).place)
def test_hidden_accessor(self):
"""
When a '+' ending related name is specified no reverse accessor should
be added to the related model.
"""
self.assertFalse(
hasattr(Target, HiddenPointer._meta.get_field('target').remote_field.get_accessor_name())
)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_director = Director.objects.create(school=public_school, is_temp=False)
private_school = School.objects.create(is_public=False)
private_director = Director.objects.create(school=private_school, is_temp=True)
# Only one school is available via all() due to the custom default manager.
self.assertSequenceEqual(School.objects.all(), [public_school])
# Only one director is available via all() due to the custom default manager.
self.assertSequenceEqual(Director.objects.all(), [public_director])
self.assertEqual(public_director.school, public_school)
self.assertEqual(public_school.director, public_director)
# Make sure the base manager is used so that the related objects
# is still accessible even if the default manager doesn't normally
# allow it.
self.assertEqual(private_director.school, private_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_school.director, private_director)
School._meta.base_manager_name = 'objects'
School._meta._expire_cache()
try:
private_director = Director._base_manager.get(pk=private_director.pk)
with self.assertRaises(School.DoesNotExist):
private_director.school
finally:
School._meta.base_manager_name = None
School._meta._expire_cache()
Director._meta.base_manager_name = 'objects'
Director._meta._expire_cache()
try:
private_school = School._base_manager.get(pk=private_school.pk)
with self.assertRaises(Director.DoesNotExist):
private_school.director
finally:
Director._meta.base_manager_name = None
Director._meta._expire_cache()
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Director(), 'director'))
self.assertFalse(hasattr(School(), 'school'))
def test_update_one_to_one_pk(self):
p1 = Place.objects.create()
p2 = Place.objects.create()
r1 = Restaurant.objects.create(place=p1)
r2 = Restaurant.objects.create(place=p2)
w = Waiter.objects.create(restaurant=r1)
Waiter.objects.update(restaurant=r2)
w.refresh_from_db()
self.assertEqual(w.restaurant, r2)
def test_rel_pk_subquery(self):
r = Restaurant.objects.first()
q1 = Restaurant.objects.filter(place_id=r.pk)
# Subquery using primary key and a query against the
# same model works correctly.
q2 = Restaurant.objects.filter(place_id__in=q1)
self.assertSequenceEqual(q2, [r])
# Subquery using 'pk__in' instead of 'place_id__in' work, too.
q2 = Restaurant.objects.filter(
pk__in=Restaurant.objects.filter(place__id=r.place.pk)
)
self.assertSequenceEqual(q2, [r])
q3 = Restaurant.objects.filter(place__in=Place.objects.all())
self.assertSequenceEqual(q3, [r])
q4 = Restaurant.objects.filter(place__in=Place.objects.filter(id=r.pk))
self.assertSequenceEqual(q4, [r])
def test_rel_pk_exact(self):
r = Restaurant.objects.first()
r2 = Restaurant.objects.filter(pk__exact=r).first()
self.assertEqual(r, r2)
def test_primary_key_to_field_filter(self):
target = Target.objects.create(name='foo')
pointer = ToFieldPointer.objects.create(target=target)
self.assertSequenceEqual(ToFieldPointer.objects.filter(target=target), [pointer])
self.assertSequenceEqual(ToFieldPointer.objects.filter(pk__exact=pointer), [pointer])
def test_cached_relation_invalidated_on_save(self):
"""
Model.save() invalidates stale OneToOneField relations after a primary
key assignment.
"""
self.assertEqual(self.b1.place, self.p1) # caches b1.place
self.b1.place_id = self.p2.pk
self.b1.save()
self.assertEqual(self.b1.place, self.p2)
| 41.278689
| 101
| 0.654532
|
from django.db import IntegrityError, connection, transaction
from django.test import TestCase
from .models import (
Bar, Director, Favorites, HiddenPointer, ManualPrimaryKey, MultiModel,
Place, Pointer, RelatedModel, Restaurant, School, Target, ToFieldPointer,
UndergroundBar, Waiter,
)
class OneToOneTests(TestCase):
def setUp(self):
self.p1 = Place.objects.create(name='Demon Dogs', address='944 W. Fullerton')
self.p2 = Place.objects.create(name='Ace Hardware', address='1013 N. Ashland')
self.r1 = Restaurant.objects.create(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.b1 = Bar.objects.create(place=self.p1, serves_cocktails=False)
def test_getter(self):
self.assertEqual(repr(self.r1.place), '<Place: Demon Dogs the place>')
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
with self.assertRaisesMessage(Restaurant.DoesNotExist, 'Place has no restaurant'):
self.p2.restaurant
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
self.assertFalse(hasattr(self.p2, 'restaurant'))
def test_setter(self):
self.r1.place = self.p2
self.r1.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r1.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r1.pk)
self.p1.restaurant = self.r1
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
self.assertQuerysetEqual(Restaurant.objects.all(), [
'<Restaurant: Demon Dogs the restaurant>',
])
self.assertQuerysetEqual(Place.objects.order_by('name'), [
'<Place: Ace Hardware the place>',
'<Place: Demon Dogs the place>',
])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r1)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r1)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
w = self.r1.waiter_set.create(name='Joe')
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
def assert_filter_waiters(**params):
self.assertQuerysetEqual(Waiter.objects.filter(**params), [
'<Waiter: Joe the waiter at Demon Dogs the restaurant>'
])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.r1.pk)
assert_filter_waiters(restaurant__exact=self.r1)
assert_filter_waiters(restaurant__pk=self.r1.pk)
assert_filter_waiters(restaurant=self.r1.pk)
assert_filter_waiters(restaurant=self.r1)
assert_filter_waiters(id__exact=w.pk)
assert_filter_waiters(pk=w.pk)
r = Restaurant.objects.get(pk=self.r1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
with self.assertRaises(IntegrityError):
with transaction.atomic():
mm.save()
def test_unsaved_object(self):
place = Place(name='User', address='London')
with self.assertRaises(Restaurant.DoesNotExist):
place.restaurant
msg = "save() prohibited to prevent data loss due to unsaved related object 'place'."
with self.assertRaisesMessage(ValueError, msg):
Restaurant.objects.create(place=place, serves_hot_dogs=True, serves_pizza=False)
with self.assertRaises(Restaurant.DoesNotExist):
place.restaurant
def test_reverse_relationship_cache_cascade(self):
bar = UndergroundBar.objects.create(place=self.p1, serves_cocktails=False)
self.p1.undergroundbar
bar.place.name = 'foo'
bar.place = None
bar.save()
self.p1.delete()
self.assertEqual(Place.objects.all().count(), 1)
self.assertEqual(UndergroundBar.objects.all().count(), 1)
def test_create_models_m2m(self):
f = Favorites(name='Fred')
f.save()
f.restaurants.set([self.r1])
self.assertQuerysetEqual(
f.restaurants.all(),
['<Restaurant: Demon Dogs the restaurant>']
)
def test_reverse_object_cache(self):
self.assertEqual(self.p1.restaurant, self.r1)
self.assertEqual(self.p1.bar, self.b1)
def test_assign_none_reverse_relation(self):
p = Place.objects.get(name="Demon Dogs")
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
p.undergroundbar = None
self.assertIsNone(ug_bar.place)
ug_bar.save()
ug_bar.refresh_from_db()
self.assertIsNone(ug_bar.place)
def test_assign_none_null_reverse_relation(self):
p = Place.objects.get(name="Demon Dogs")
p.undergroundbar = None
def test_assign_none_to_null_cached_reverse_relation(self):
p = Place.objects.get(name='Demon Dogs')
with self.assertRaises(Place.undergroundbar.RelatedObjectDoesNotExist):
getattr(p, 'undergroundbar')
# Assigning None works if there isn't a related UndergroundBar and the
p.undergroundbar = None
def test_assign_o2o_id_value(self):
b = UndergroundBar.objects.create(place=self.p1)
b.place_id = self.p2.pk
b.save()
self.assertEqual(b.place_id, self.p2.pk)
self.assertFalse(UndergroundBar.place.is_cached(b))
self.assertEqual(b.place, self.p2)
self.assertTrue(UndergroundBar.place.is_cached(b))
b.place_id = self.p2.pk
self.assertTrue(UndergroundBar.place.is_cached(b))
def test_assign_o2o_id_none(self):
b = UndergroundBar.objects.create(place=self.p1)
b.place_id = None
b.save()
self.assertIsNone(b.place_id)
self.assertFalse(UndergroundBar.place.is_cached(b))
self.assertIsNone(b.place)
self.assertTrue(UndergroundBar.place.is_cached(b))
def test_related_object_cache(self):
# Look up the objects again so that we get "fresh" objects
p = Place.objects.get(name="Demon Dogs")
r = p.restaurant
# Accessing the related object again returns the exactly same object
self.assertIs(p.restaurant, r)
# But if we kill the cache, we get a new object
del p._state.fields_cache['restaurant']
self.assertIsNot(p.restaurant, r)
# Reassigning the Restaurant object results in an immediate cache update
# We can't use a new Restaurant because that'll violate one-to-one, but
# with a new *instance* the is test below will fail if #6886 regresses.
r2 = Restaurant.objects.get(pk=r.pk)
p.restaurant = r2
self.assertIs(p.restaurant, r2)
# Assigning None succeeds if field is null=True.
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
ug_bar.place = None
self.assertIsNone(ug_bar.place)
# Assigning None will not fail: Place.restaurant is null=False
setattr(p, 'restaurant', None)
# You also can't assign an object of the wrong type here
msg = (
'Cannot assign "<Place: Demon Dogs the place>": '
'"Place.restaurant" must be a "Restaurant" instance.'
)
with self.assertRaisesMessage(ValueError, msg):
setattr(p, 'restaurant', p)
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place=p)
self.assertIs(r.place, p)
p = Place()
r = Restaurant(place=p)
self.assertIs(r.place, p)
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place_id=p.id)
self.assertIsNot(r.place, p)
self.assertEqual(r.place, p)
def test_filter_one_to_one_relations(self):
target = Target.objects.create()
self.assertSequenceEqual(Target.objects.filter(pointer=None), [target])
self.assertSequenceEqual(Target.objects.exclude(pointer=None), [])
self.assertSequenceEqual(Target.objects.filter(second_pointer=None), [target])
self.assertSequenceEqual(Target.objects.exclude(second_pointer=None), [])
def test_o2o_primary_key_delete(self):
t = Target.objects.create(name='name')
Pointer.objects.create(other=t)
num_deleted, objs = Pointer.objects.filter(other__name='name').delete()
self.assertEqual(num_deleted, 1)
self.assertEqual(objs, {'one_to_one.Pointer': 1})
def test_save_nullable_o2o_after_parent(self):
place = Place(name='Rose tattoo')
bar = UndergroundBar(place=place)
place.save()
bar.save()
bar.refresh_from_db()
self.assertEqual(bar.place, place)
def test_reverse_object_does_not_exist_cache(self):
p = Place(name='Zombie Cats', address='Not sure')
p.save()
with self.assertNumQueries(1):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
with self.assertNumQueries(0):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
def test_reverse_object_cached_when_related_is_accessed(self):
r = Restaurant.objects.get(pk=self.r1.pk)
p = r.place
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, r)
def test_related_object_cached_when_reverse_is_accessed(self):
p = Place.objects.get(pk=self.p1.pk)
r = p.restaurant
with self.assertNumQueries(0):
self.assertEqual(r.place, p)
def test_reverse_object_cached_when_related_is_set(self):
p = Place(name='Zombie Cats', address='Not sure')
p.save()
self.r1.place = p
self.r1.save()
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, self.r1)
def test_reverse_object_cached_when_related_is_unset(self):
b = UndergroundBar(place=self.p1, serves_cocktails=True)
b.save()
with self.assertNumQueries(0):
self.assertEqual(self.p1.undergroundbar, b)
b.place = None
b.save()
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
self.p1.undergroundbar
def test_get_reverse_on_unsaved_object(self):
p = Place()
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
UndergroundBar.objects.create()
# When there's one instance of the origin
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
if connection.features.supports_nullable_unique_constraints:
UndergroundBar.objects.create()
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
def test_set_reverse_on_unsaved_object(self):
p = Place()
b = UndergroundBar.objects.create()
p.undergroundbar = b
msg = "save() prohibited to prevent data loss due to unsaved related object 'place'."
with self.assertNumQueries(0):
with self.assertRaisesMessage(ValueError, msg):
b.save()
def test_nullable_o2o_delete(self):
u = UndergroundBar.objects.create(place=self.p1)
u.place_id = None
u.save()
self.p1.delete()
self.assertTrue(UndergroundBar.objects.filter(pk=u.pk).exists())
self.assertIsNone(UndergroundBar.objects.get(pk=u.pk).place)
def test_hidden_accessor(self):
self.assertFalse(
hasattr(Target, HiddenPointer._meta.get_field('target').remote_field.get_accessor_name())
)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_director = Director.objects.create(school=public_school, is_temp=False)
private_school = School.objects.create(is_public=False)
private_director = Director.objects.create(school=private_school, is_temp=True)
self.assertSequenceEqual(School.objects.all(), [public_school])
self.assertSequenceEqual(Director.objects.all(), [public_director])
self.assertEqual(public_director.school, public_school)
self.assertEqual(public_school.director, public_director)
# allow it.
self.assertEqual(private_director.school, private_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
self.assertEqual(private_school.director, private_director)
School._meta.base_manager_name = 'objects'
School._meta._expire_cache()
try:
private_director = Director._base_manager.get(pk=private_director.pk)
with self.assertRaises(School.DoesNotExist):
private_director.school
finally:
School._meta.base_manager_name = None
School._meta._expire_cache()
Director._meta.base_manager_name = 'objects'
Director._meta._expire_cache()
try:
private_school = School._base_manager.get(pk=private_school.pk)
with self.assertRaises(Director.DoesNotExist):
private_school.director
finally:
Director._meta.base_manager_name = None
Director._meta._expire_cache()
def test_hasattr_related_object(self):
# refs #21563
self.assertFalse(hasattr(Director(), 'director'))
self.assertFalse(hasattr(School(), 'school'))
def test_update_one_to_one_pk(self):
p1 = Place.objects.create()
p2 = Place.objects.create()
r1 = Restaurant.objects.create(place=p1)
r2 = Restaurant.objects.create(place=p2)
w = Waiter.objects.create(restaurant=r1)
Waiter.objects.update(restaurant=r2)
w.refresh_from_db()
self.assertEqual(w.restaurant, r2)
def test_rel_pk_subquery(self):
r = Restaurant.objects.first()
q1 = Restaurant.objects.filter(place_id=r.pk)
# Subquery using primary key and a query against the
# same model works correctly.
q2 = Restaurant.objects.filter(place_id__in=q1)
self.assertSequenceEqual(q2, [r])
# Subquery using 'pk__in' instead of 'place_id__in' work, too.
q2 = Restaurant.objects.filter(
pk__in=Restaurant.objects.filter(place__id=r.place.pk)
)
self.assertSequenceEqual(q2, [r])
q3 = Restaurant.objects.filter(place__in=Place.objects.all())
self.assertSequenceEqual(q3, [r])
q4 = Restaurant.objects.filter(place__in=Place.objects.filter(id=r.pk))
self.assertSequenceEqual(q4, [r])
def test_rel_pk_exact(self):
r = Restaurant.objects.first()
r2 = Restaurant.objects.filter(pk__exact=r).first()
self.assertEqual(r, r2)
def test_primary_key_to_field_filter(self):
target = Target.objects.create(name='foo')
pointer = ToFieldPointer.objects.create(target=target)
self.assertSequenceEqual(ToFieldPointer.objects.filter(target=target), [pointer])
self.assertSequenceEqual(ToFieldPointer.objects.filter(pk__exact=pointer), [pointer])
def test_cached_relation_invalidated_on_save(self):
self.assertEqual(self.b1.place, self.p1) # caches b1.place
self.b1.place_id = self.p2.pk
self.b1.save()
self.assertEqual(self.b1.place, self.p2)
| true
| true
|
1c48d5ad03091b3ee673df43f7d507922eb3e256
| 1,060
|
py
|
Python
|
kubernetes/test/test_extensions_v1beta1_ingress_list.py
|
redjohn/python
|
5e512ff564c244c50cab780d821542ed56aa965a
|
[
"Apache-2.0"
] | 1
|
2019-04-14T23:51:35.000Z
|
2019-04-14T23:51:35.000Z
|
kubernetes/test/test_extensions_v1beta1_ingress_list.py
|
redjohn/python
|
5e512ff564c244c50cab780d821542ed56aa965a
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_extensions_v1beta1_ingress_list.py
|
redjohn/python
|
5e512ff564c244c50cab780d821542ed56aa965a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.extensions_v1beta1_ingress_list import ExtensionsV1beta1IngressList
class TestExtensionsV1beta1IngressList(unittest.TestCase):
""" ExtensionsV1beta1IngressList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testExtensionsV1beta1IngressList(self):
"""
Test ExtensionsV1beta1IngressList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.extensions_v1beta1_ingress_list.ExtensionsV1beta1IngressList()
pass
if __name__ == '__main__':
unittest.main()
| 23.555556
| 105
| 0.735849
|
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.extensions_v1beta1_ingress_list import ExtensionsV1beta1IngressList
class TestExtensionsV1beta1IngressList(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testExtensionsV1beta1IngressList(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c48d5f9f9521e09c32013b687664465b262cec1
| 2,552
|
py
|
Python
|
csgomenumaker/command/navstate/vertfolder.py
|
citrusCS/csgo-menu-maker
|
60e055b4b6f61c7081fc231da47be51eb6e1d47f
|
[
"MIT"
] | 152
|
2019-03-11T23:05:34.000Z
|
2022-03-11T08:09:21.000Z
|
csgomenumaker/command/navstate/vertfolder.py
|
citrusCS/csgo-menu-maker
|
60e055b4b6f61c7081fc231da47be51eb6e1d47f
|
[
"MIT"
] | 6
|
2019-03-12T11:22:09.000Z
|
2020-06-23T05:53:45.000Z
|
csgomenumaker/command/navstate/vertfolder.py
|
citrusCS/csgo-menu-maker
|
60e055b4b6f61c7081fc231da47be51eb6e1d47f
|
[
"MIT"
] | 15
|
2019-03-12T06:52:29.000Z
|
2021-08-30T18:26:34.000Z
|
from ..compound import Compound
from ..placeholder import Placeholder
from ..primitive import Primitive
from .navstate import NavState
from .horz import Horz
class VertFolder(NavState):
"""
A vertical state transitioner, that also holds recursive children.
VertFolder instances are toggled between by pressing Back/Fire in the UI.
"""
def __init__(self, parent):
NavState.__init__(self, parent)
self.cls = "nav-vert-folder"
# self.dummy is a navstatehorz which serves as the UI element and
# inward transition for this navstate.
self.dummy = Horz(self)
self.dummy.dummy = True
self.actions["fire"].hook = Compound(self.actions["fire"])
self.actions["fire_back"] = Placeholder(
self.actions["fire"].hook,
self.root.globals["void"]
)
self.actions["entry"].hook.children.append(
self.dummy.actions["entry"].hook
)
self.dummy.actions["fire"].hook = self.actions["fire"]
def join_children(self):
"""
Set the neighbors of each child selection, so that make_realiases() can
be run on them.
"""
# Bind dummy to self neighbors.
self.dummy.neighbors["up"] = self.neighbors["up"]
self.dummy.neighbors["down"] = self.neighbors["down"]
self.dummy.neighbors["left"] = self.dummy
self.dummy.neighbors["right"] = self.dummy
self.dummy.neighbors["back"] = self.neighbors["back"]
if len(self.selections):
self.actions["fire_back"].hook = \
self.selections[0].actions["entry"]
# Setup the fire action - i.e. setup enter button press.
max = len(self.selections)
# Bind children selections to each other and self.
for i, ch in enumerate(self.selections):
ch.neighbors["up"] = self.selections[(i - 1) % max]
ch.neighbors["down"] = self.selections[(i + 1) % max]
ch.neighbors["back"] = self.dummy
ch.join_children()
def make_realiases(self):
"""
Make all of the realiases on this instances' children. This function is
called recursively.
"""
self.dummy.make_realiases()
for ch in self.selections:
ch.make_realiases()
def get_path(self):
"""
Return a path suitable for UI printing.
"""
if self.parent is self.root:
return "/"
else:
return self.parent.get_path()+self.ui_name+"/"
| 32.717949
| 79
| 0.59953
|
from ..compound import Compound
from ..placeholder import Placeholder
from ..primitive import Primitive
from .navstate import NavState
from .horz import Horz
class VertFolder(NavState):
def __init__(self, parent):
NavState.__init__(self, parent)
self.cls = "nav-vert-folder"
self.dummy = Horz(self)
self.dummy.dummy = True
self.actions["fire"].hook = Compound(self.actions["fire"])
self.actions["fire_back"] = Placeholder(
self.actions["fire"].hook,
self.root.globals["void"]
)
self.actions["entry"].hook.children.append(
self.dummy.actions["entry"].hook
)
self.dummy.actions["fire"].hook = self.actions["fire"]
def join_children(self):
self.dummy.neighbors["up"] = self.neighbors["up"]
self.dummy.neighbors["down"] = self.neighbors["down"]
self.dummy.neighbors["left"] = self.dummy
self.dummy.neighbors["right"] = self.dummy
self.dummy.neighbors["back"] = self.neighbors["back"]
if len(self.selections):
self.actions["fire_back"].hook = \
self.selections[0].actions["entry"]
max = len(self.selections)
for i, ch in enumerate(self.selections):
ch.neighbors["up"] = self.selections[(i - 1) % max]
ch.neighbors["down"] = self.selections[(i + 1) % max]
ch.neighbors["back"] = self.dummy
ch.join_children()
def make_realiases(self):
self.dummy.make_realiases()
for ch in self.selections:
ch.make_realiases()
def get_path(self):
if self.parent is self.root:
return "/"
else:
return self.parent.get_path()+self.ui_name+"/"
| true
| true
|
1c48d60e2e51be10bc73f9a18624f1051be55abb
| 2,225
|
py
|
Python
|
test_gym.py
|
Ferch42/PyDSRL
|
bd9ea3e739c837db0db5052f7db23476fa21c472
|
[
"MIT"
] | null | null | null |
test_gym.py
|
Ferch42/PyDSRL
|
bd9ea3e739c837db0db5052f7db23476fa21c472
|
[
"MIT"
] | null | null | null |
test_gym.py
|
Ferch42/PyDSRL
|
bd9ea3e739c837db0db5052f7db23476fa21c472
|
[
"MIT"
] | null | null | null |
'''Random-action tester for gym environments'''
import argparse
import pprint
import gym
from gym_recording.wrappers import TraceRecordingWrapper
import os
import cross_circle_gym # Required, registers the environments.
class RandomAgent(object):
"""The world's simplest agent!"""
def __init__(self, action_space, switch_action_every=1):
self.action_space = action_space
self.switch_action_every = switch_action_every
self.idx = 0
self.action = None
def act(self, observation, reward, done):
if self.idx % self.switch_action_every == 0:
self.action = self.action_space.sample()
self.idx = 0
self.idx += 1
return self.action
class Filter(object):
def __init__(self, m):
self.m = m
def __call__(self, n):
return n % self.m == 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('directory')
parser.add_argument('--n-steps', type=int, default=100)
parser.add_argument('--n-episodes', type=int, default=100)
parser.add_argument('--switch-action-every', type=int, default=1)
parser.add_argument('env_id', nargs='?', default='CrossCircle-MixedRand-v0')
args = parser.parse_args()
os.makedirs(args.directory, exist_ok=True)
with open(os.path.join(args.directory, 'config.txt'), 'w') as f:
f.write(pprint.pformat(args))
env = gym.make(args.env_id)
os.makedirs(args.directory, exist_ok=True)
env = TraceRecordingWrapper(
env, directory=args.directory, episode_filter=Filter(1), frame_filter=Filter(1))
env.seed(0)
agent = RandomAgent(env.action_space, args.switch_action_every)
reward = 0
done = False
ob = env.reset()
for episode in range(args.n_episodes):
env.reset()
# env.render()
for step in range(args.n_steps):
action = agent.act(ob, reward, done)
ob, reward, done, info = env.step(action)
# env.render()
print('Action:', action, 'Reward:', reward)
if done:
break
env.close()
| 29.276316
| 89
| 0.622022
|
import argparse
import pprint
import gym
from gym_recording.wrappers import TraceRecordingWrapper
import os
import cross_circle_gym
class RandomAgent(object):
def __init__(self, action_space, switch_action_every=1):
self.action_space = action_space
self.switch_action_every = switch_action_every
self.idx = 0
self.action = None
def act(self, observation, reward, done):
if self.idx % self.switch_action_every == 0:
self.action = self.action_space.sample()
self.idx = 0
self.idx += 1
return self.action
class Filter(object):
def __init__(self, m):
self.m = m
def __call__(self, n):
return n % self.m == 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('directory')
parser.add_argument('--n-steps', type=int, default=100)
parser.add_argument('--n-episodes', type=int, default=100)
parser.add_argument('--switch-action-every', type=int, default=1)
parser.add_argument('env_id', nargs='?', default='CrossCircle-MixedRand-v0')
args = parser.parse_args()
os.makedirs(args.directory, exist_ok=True)
with open(os.path.join(args.directory, 'config.txt'), 'w') as f:
f.write(pprint.pformat(args))
env = gym.make(args.env_id)
os.makedirs(args.directory, exist_ok=True)
env = TraceRecordingWrapper(
env, directory=args.directory, episode_filter=Filter(1), frame_filter=Filter(1))
env.seed(0)
agent = RandomAgent(env.action_space, args.switch_action_every)
reward = 0
done = False
ob = env.reset()
for episode in range(args.n_episodes):
env.reset()
for step in range(args.n_steps):
action = agent.act(ob, reward, done)
ob, reward, done, info = env.step(action)
print('Action:', action, 'Reward:', reward)
if done:
break
env.close()
| true
| true
|
1c48d64f5158e5505be85364f278ac2439138204
| 46
|
py
|
Python
|
imagepy/tools/Measure/angle2_tol.py
|
Pad0y/imagepy
|
23f41b64ade02f94b566b0d23a4b6459c1a1578d
|
[
"BSD-4-Clause"
] | null | null | null |
imagepy/tools/Measure/angle2_tol.py
|
Pad0y/imagepy
|
23f41b64ade02f94b566b0d23a4b6459c1a1578d
|
[
"BSD-4-Clause"
] | null | null | null |
imagepy/tools/Measure/angle2_tol.py
|
Pad0y/imagepy
|
23f41b64ade02f94b566b0d23a4b6459c1a1578d
|
[
"BSD-4-Clause"
] | null | null | null |
from sciapp.action import SlopeTool as Plugin
| 23
| 45
| 0.847826
|
from sciapp.action import SlopeTool as Plugin
| true
| true
|
1c48d7fa080b1b6f933ed8da341f10f785b48c3d
| 15,598
|
py
|
Python
|
main.py
|
dinhhungGM/Telegram-Bot
|
f7250a505138c1a1957f5dd92da63e36e4bd70c4
|
[
"MIT"
] | null | null | null |
main.py
|
dinhhungGM/Telegram-Bot
|
f7250a505138c1a1957f5dd92da63e36e4bd70c4
|
[
"MIT"
] | null | null | null |
main.py
|
dinhhungGM/Telegram-Bot
|
f7250a505138c1a1957f5dd92da63e36e4bd70c4
|
[
"MIT"
] | null | null | null |
# --------------------------------------------- #
# Plugin Name : TelegramAirdropBot #
# Author Name : fabston #
# File Name : main.py #
# --------------------------------------------- #
import re
import ssl
from io import BytesIO
from time import gmtime, strftime
import pymysql
import telebot
from aiohttp import web
from telebot import types
from telebot.types import InlineKeyboardButton, InlineKeyboardMarkup
import config
WEBHOOK_HOST = config.host
WEBHOOK_PORT = 8443 # 443, 80, 88 or 8443 (port needs to be 'open')
WEBHOOK_LISTEN = "0.0.0.0" # In some VPS you may need to put here the IP addr.
WEBHOOK_SSL_CERT = "./webhook_cert.pem" # Path to the ssl certificate
WEBHOOK_SSL_PRIV = "./webhook_pkey.pem" # Path to the ssl private key
WEBHOOK_URL_BASE = "https://{}:{}".format(WEBHOOK_HOST, WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/{}/".format(config.api_token)
bot = telebot.TeleBot(config.api_token)
app = web.Application()
def get_connection():
connection = pymysql.connect(
host=config.mysql_host,
user=config.mysql_user,
password=config.mysql_pw,
port=config.mysql_port,
db=config.mysql_db,
charset="utf8mb4",
cursorclass=pymysql.cursors.DictCursor,
autocommit=True,
)
return connection
def create_tables():
connection = get_connection()
with connection.cursor() as cursor:
table_name = "users"
try:
cursor.execute(
" CREATE TABLE `"
+ table_name
+ "` ( `user_id` int(12) DEFAULT NULL, `address` varchar(42) DEFAULT NULL, `address_change_status` tinyint DEFAULT 0, `captcha` tinyint DEFAULT NULL )"
)
print("Database tables created.")
return create_tables
except:
pass
def get_airdrop_wallets():
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address FROM users WHERE address IS NOT NULL"
cursor.execute(sql)
tmp = []
for user in cursor.fetchall():
tmp.append(user["address"])
return tmp
def get_airdrop_users():
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT user_id FROM users WHERE address IS NOT NULL"
cursor.execute(sql)
tmp = []
for user in cursor.fetchall():
tmp.append(user["user_id"])
return tmp
default_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
default_keyboard.row(types.KeyboardButton("🚀 Join Airdrop"))
airdrop_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
airdrop_keyboard.row(types.KeyboardButton("💼 View Wallet Address"))
def cancel_button():
markup = InlineKeyboardMarkup()
markup.add(InlineKeyboardButton("Cancel Operation", callback_data="cancel_input"))
return markup
def update_wallet_address_button(message):
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address_change_status FROM users WHERE user_id = %s"
cursor.execute(sql, message.chat.id)
address_changes = cursor.fetchone()["address_change_status"]
markup = InlineKeyboardMarkup()
markup.add(
InlineKeyboardButton(
f"Update Address ({address_changes}/{config.wallet_changes})",
callback_data="edit_wallet_address",
)
)
return markup
@bot.message_handler(
func=lambda message: message.chat.type == "private", commands=["start"]
)
def handle_text(message):
connection = get_connection()
with connection.cursor() as cursor:
bot.send_chat_action(message.chat.id, "typing")
sql = "SELECT EXISTS(SELECT user_id FROM users WHERE user_id = %s)"
cursor.execute(sql, message.chat.id)
result = cursor.fetchone()
if not list(result.values())[0]:
sql = "INSERT INTO users(user_id) VALUES (%s)"
cursor.execute(sql, message.chat.id)
if message.chat.id in airdrop_users:
bot.send_message(
message.chat.id,
config.texts["start_2"].format(message.from_user.first_name)
+ "[» Source Code](https://github.com/fabston/Telegram-Airdrop-Bot).",
parse_mode="Markdown",
disable_web_page_preview=True,
reply_markup=airdrop_keyboard,
)
elif not config.airdrop_live:
bot.send_message(
message.chat.id,
config.texts["airdrop_start"]
+ "[» Source Code](https://github.com/fabston/Telegram-Airdrop-Bot).",
parse_mode="Markdown",
disable_web_page_preview=True,
)
elif len(airdrop_users) >= config.airdrop_cap:
bot.send_message(
message.chat.id,
config.texts["airdrop_max_cap"]
+ "[» Source Code](https://github.com/fabston/Telegram-Airdrop-Bot).",
parse_mode="Markdown",
disable_web_page_preview=True,
)
else:
bot.send_message(
message.chat.id,
config.texts["start_1"].format(message.from_user.first_name)
+ "[» Source Code](https://github.com/fabston/Telegram-Airdrop-Bot).",
parse_mode="Markdown",
disable_web_page_preview=True,
reply_markup=default_keyboard,
)
@bot.message_handler(
func=lambda message: message.chat.type == "private"
and message.from_user.id not in airdrop_users
and message.text == "🚀 Join Airdrop"
)
def handle_text(message):
bot.send_chat_action(message.chat.id, "typing")
if not config.airdrop_live:
bot.send_message(
message.chat.id,
config.texts["airdrop_start"],
parse_mode="Markdown",
disable_web_page_preview=True,
)
else:
if len(airdrop_users) >= config.airdrop_cap:
bot.send_message(
message.chat.id,
config.texts["airdrop_max_cap"],
parse_mode="Markdown",
reply_markup=telebot.types.ReplyKeyboardRemove(),
)
else:
bot.send_message(
message.chat.id,
config.texts["airdrop_address"],
parse_mode="Markdown",
disable_web_page_preview=True,
reply_markup=telebot.types.ReplyKeyboardRemove(),
)
bot.register_next_step_handler(message, address_check)
@bot.message_handler(
func=lambda message: message.chat.type == "private"
and message.from_user.id in airdrop_users
and message.text == "💼 View Wallet Address"
)
def handle_text(message):
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address FROM users WHERE user_id = %s"
cursor.execute(sql, message.chat.id)
data = cursor.fetchall()
bot.send_message(
message.chat.id,
text="Your tokens will be sent to:\n\n`{0}`".format(data[0]["address"]),
parse_mode="Markdown",
disable_web_page_preview=True,
reply_markup=update_wallet_address_button(message),
)
def address_check(message):
bot.send_chat_action(message.chat.id, "typing")
connection = get_connection()
with connection.cursor() as cursor:
if len(airdrop_users) >= config.airdrop_cap:
bot.send_message(
message.chat.id, config.texts["airdrop_max_cap"], parse_mode="Markdown"
)
bot.clear_step_handler(message)
elif message.text in airdrop_wallets:
msg = bot.reply_to(
message,
config.texts["airdrop_walletused"],
parse_mode="Markdown",
reply_markup=cancel_button(),
)
bot.register_next_step_handler(msg, address_check)
elif message.content_type == "text" and re.match(
r"^(?=.{42}$).*", message.text
):
sql = "UPDATE users SET address = %s WHERE user_id = %s"
cursor.execute(sql, (message.text, message.chat.id))
bot.reply_to(
message,
config.texts["airdrop_confirmation"],
parse_mode="Markdown",
reply_markup=airdrop_keyboard,
)
airdrop_wallets.append(message.text)
airdrop_users.append(message.chat.id)
try:
bot.send_message(
config.log_channel,
"🎈 *#Airdrop_Entry ({0}):*\n"
" • User: [{1}](tg://user?id={2}) (#id{2})\n"
" • Address: `{3}`\n"
" • Time: `{4} UTC`".format(
len(airdrop_users),
bot.get_chat(message.chat.id).first_name,
message.chat.id,
message.text,
strftime("%Y-%m-%d %H:%M:%S", gmtime()),
),
parse_mode="Markdown",
disable_web_page_preview=True,
)
except:
pass
else:
msg = bot.reply_to(
message,
"❌ Invalid $ETH address. Try again:",
parse_mode="Markdown",
reply_markup=cancel_button(),
)
bot.register_next_step_handler(msg, address_check)
def address_check_update(message, old_address):
bot.send_chat_action(message.chat.id, "typing")
connection = get_connection()
with connection.cursor() as cursor:
if message.text in airdrop_wallets:
msg = bot.reply_to(
message, config.texts["airdrop_walletused"], parse_mode="Markdown"
)
bot.register_next_step_handler(msg, address_check_update, old_address)
elif message.content_type == "text" and re.match(
r"^(?=.{42}$).*", message.text
):
sql = "UPDATE users SET address = %s, address_change_status = address_change_status + 1 WHERE user_id = %s"
cursor.execute(sql, (message.text, message.chat.id))
bot.reply_to(
message, config.texts["airdrop_wallet_update"], parse_mode="Markdown"
)
airdrop_wallets.append(message.text)
try:
bot.send_message(
config.log_channel,
"📝 *#Address_Updated:*\n"
" • User: [{1}](tg://user?id={2}) (#id{2})\n"
" • Old Address: `{3}`\n"
" • New Address: `{4}`\n"
" • Time: `{5} UTC`".format(
len(airdrop_wallets),
bot.get_chat(message.chat.id).first_name,
message.chat.id,
old_address,
message.text,
strftime("%Y-%m-%d %H:%M:%S", gmtime()),
),
parse_mode="Markdown",
disable_web_page_preview=True,
)
except:
pass
else:
msg = bot.reply_to(
message,
"❌ Invalid address. Try again:",
parse_mode="Markdown",
reply_markup=cancel_button(),
)
bot.register_next_step_handler(msg, address_check_update, old_address)
@bot.message_handler(
func=lambda message: message.chat.id in config.admins, commands=["airdroplist"]
)
def handle_text(message):
bot.send_chat_action(message.chat.id, "upload_document")
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address FROM users"
cursor.execute(sql)
airdrop = "AIRDROP ({}):\n\n".format(len(airdrop_users))
for user in cursor.fetchall():
if user["address"] is not None:
address = user["address"]
airdrop += "{}\n".format(address)
with BytesIO(str.encode(airdrop)) as output:
output.name = "AIRDROP.txt"
bot.send_document(
message.chat.id,
output,
caption="Here's the list with all airdrop addresses.",
)
return
@bot.callback_query_handler(func=lambda call: True)
def callback_query(call):
if call.data == "cancel_input":
bot.delete_message(
chat_id=call.message.chat.id, message_id=call.message.message_id
)
if len(airdrop_users) >= config.airdrop_cap:
bot.send_message(
call.message.chat.id,
"✅ Operation canceled.\n\nℹ️ The airdrop reached its max cap.",
)
elif call.message.chat.id in airdrop_users:
bot.send_message(
call.message.chat.id,
"✅ Operation canceled.",
reply_markup=airdrop_keyboard,
)
else:
bot.send_message(
call.message.chat.id,
"✅ Operation canceled.",
reply_markup=default_keyboard,
)
bot.clear_step_handler_by_chat_id(chat_id=call.message.chat.id)
elif call.data == "edit_wallet_address":
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address, address_change_status FROM users WHERE user_id = %s"
cursor.execute(sql, call.message.chat.id)
data = cursor.fetchone()
if data["address_change_status"] != config.wallet_changes:
address = data["address"]
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text="Please send your new address:",
parse_mode="Markdown",
disable_web_page_preview=True,
)
bot.register_next_step_handler(
call.message, address_check_update, address
)
else:
bot.answer_callback_query(
call.id,
"⚠️ You can't change your address anymore.",
show_alert=True,
)
create_db_tables = create_tables()
airdrop_users = get_airdrop_users()
airdrop_wallets = get_airdrop_wallets()
bot.enable_save_next_step_handlers(delay=2)
bot.load_next_step_handlers()
create_db_tables
# Remove webhook, it fails sometimes the set if there is a previous webhook
bot.remove_webhook()
# Set webhook
bot.set_webhook(
url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH, certificate=open(WEBHOOK_SSL_CERT, "r")
)
# Build ssl context
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)
# Process webhook calls
async def handle(request):
if request.match_info.get("token") == bot.token:
request_body_dict = await request.json()
update = telebot.types.Update.de_json(request_body_dict)
bot.process_new_updates([update])
return web.Response()
else:
return web.Response(status=403)
app.router.add_post("/{token}/", handle)
# Start aiohttp server
web.run_app(
app,
host="0.0.0.0",
port=WEBHOOK_PORT,
ssl_context=context,
)
| 35.369615
| 170
| 0.57174
|
import re
import ssl
from io import BytesIO
from time import gmtime, strftime
import pymysql
import telebot
from aiohttp import web
from telebot import types
from telebot.types import InlineKeyboardButton, InlineKeyboardMarkup
import config
WEBHOOK_HOST = config.host
WEBHOOK_PORT = 8443 WEBHOOK_LISTEN = "0.0.0.0"
WEBHOOK_SSL_CERT = "./webhook_cert.pem" WEBHOOK_SSL_PRIV = "./webhook_pkey.pem"
WEBHOOK_URL_BASE = "https://{}:{}".format(WEBHOOK_HOST, WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/{}/".format(config.api_token)
bot = telebot.TeleBot(config.api_token)
app = web.Application()
def get_connection():
connection = pymysql.connect(
host=config.mysql_host,
user=config.mysql_user,
password=config.mysql_pw,
port=config.mysql_port,
db=config.mysql_db,
charset="utf8mb4",
cursorclass=pymysql.cursors.DictCursor,
autocommit=True,
)
return connection
def create_tables():
connection = get_connection()
with connection.cursor() as cursor:
table_name = "users"
try:
cursor.execute(
" CREATE TABLE `"
+ table_name
+ "` ( `user_id` int(12) DEFAULT NULL, `address` varchar(42) DEFAULT NULL, `address_change_status` tinyint DEFAULT 0, `captcha` tinyint DEFAULT NULL )"
)
print("Database tables created.")
return create_tables
except:
pass
def get_airdrop_wallets():
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address FROM users WHERE address IS NOT NULL"
cursor.execute(sql)
tmp = []
for user in cursor.fetchall():
tmp.append(user["address"])
return tmp
def get_airdrop_users():
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT user_id FROM users WHERE address IS NOT NULL"
cursor.execute(sql)
tmp = []
for user in cursor.fetchall():
tmp.append(user["user_id"])
return tmp
default_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
default_keyboard.row(types.KeyboardButton("🚀 Join Airdrop"))
airdrop_keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
airdrop_keyboard.row(types.KeyboardButton("💼 View Wallet Address"))
def cancel_button():
markup = InlineKeyboardMarkup()
markup.add(InlineKeyboardButton("Cancel Operation", callback_data="cancel_input"))
return markup
def update_wallet_address_button(message):
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address_change_status FROM users WHERE user_id = %s"
cursor.execute(sql, message.chat.id)
address_changes = cursor.fetchone()["address_change_status"]
markup = InlineKeyboardMarkup()
markup.add(
InlineKeyboardButton(
f"Update Address ({address_changes}/{config.wallet_changes})",
callback_data="edit_wallet_address",
)
)
return markup
@bot.message_handler(
func=lambda message: message.chat.type == "private", commands=["start"]
)
def handle_text(message):
connection = get_connection()
with connection.cursor() as cursor:
bot.send_chat_action(message.chat.id, "typing")
sql = "SELECT EXISTS(SELECT user_id FROM users WHERE user_id = %s)"
cursor.execute(sql, message.chat.id)
result = cursor.fetchone()
if not list(result.values())[0]:
sql = "INSERT INTO users(user_id) VALUES (%s)"
cursor.execute(sql, message.chat.id)
if message.chat.id in airdrop_users:
bot.send_message(
message.chat.id,
config.texts["start_2"].format(message.from_user.first_name)
+ "[» Source Code](https://github.com/fabston/Telegram-Airdrop-Bot).",
parse_mode="Markdown",
disable_web_page_preview=True,
reply_markup=airdrop_keyboard,
)
elif not config.airdrop_live:
bot.send_message(
message.chat.id,
config.texts["airdrop_start"]
+ "[» Source Code](https://github.com/fabston/Telegram-Airdrop-Bot).",
parse_mode="Markdown",
disable_web_page_preview=True,
)
elif len(airdrop_users) >= config.airdrop_cap:
bot.send_message(
message.chat.id,
config.texts["airdrop_max_cap"]
+ "[» Source Code](https://github.com/fabston/Telegram-Airdrop-Bot).",
parse_mode="Markdown",
disable_web_page_preview=True,
)
else:
bot.send_message(
message.chat.id,
config.texts["start_1"].format(message.from_user.first_name)
+ "[» Source Code](https://github.com/fabston/Telegram-Airdrop-Bot).",
parse_mode="Markdown",
disable_web_page_preview=True,
reply_markup=default_keyboard,
)
@bot.message_handler(
func=lambda message: message.chat.type == "private"
and message.from_user.id not in airdrop_users
and message.text == "🚀 Join Airdrop"
)
def handle_text(message):
bot.send_chat_action(message.chat.id, "typing")
if not config.airdrop_live:
bot.send_message(
message.chat.id,
config.texts["airdrop_start"],
parse_mode="Markdown",
disable_web_page_preview=True,
)
else:
if len(airdrop_users) >= config.airdrop_cap:
bot.send_message(
message.chat.id,
config.texts["airdrop_max_cap"],
parse_mode="Markdown",
reply_markup=telebot.types.ReplyKeyboardRemove(),
)
else:
bot.send_message(
message.chat.id,
config.texts["airdrop_address"],
parse_mode="Markdown",
disable_web_page_preview=True,
reply_markup=telebot.types.ReplyKeyboardRemove(),
)
bot.register_next_step_handler(message, address_check)
@bot.message_handler(
func=lambda message: message.chat.type == "private"
and message.from_user.id in airdrop_users
and message.text == "💼 View Wallet Address"
)
def handle_text(message):
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address FROM users WHERE user_id = %s"
cursor.execute(sql, message.chat.id)
data = cursor.fetchall()
bot.send_message(
message.chat.id,
text="Your tokens will be sent to:\n\n`{0}`".format(data[0]["address"]),
parse_mode="Markdown",
disable_web_page_preview=True,
reply_markup=update_wallet_address_button(message),
)
def address_check(message):
bot.send_chat_action(message.chat.id, "typing")
connection = get_connection()
with connection.cursor() as cursor:
if len(airdrop_users) >= config.airdrop_cap:
bot.send_message(
message.chat.id, config.texts["airdrop_max_cap"], parse_mode="Markdown"
)
bot.clear_step_handler(message)
elif message.text in airdrop_wallets:
msg = bot.reply_to(
message,
config.texts["airdrop_walletused"],
parse_mode="Markdown",
reply_markup=cancel_button(),
)
bot.register_next_step_handler(msg, address_check)
elif message.content_type == "text" and re.match(
r"^(?=.{42}$).*", message.text
):
sql = "UPDATE users SET address = %s WHERE user_id = %s"
cursor.execute(sql, (message.text, message.chat.id))
bot.reply_to(
message,
config.texts["airdrop_confirmation"],
parse_mode="Markdown",
reply_markup=airdrop_keyboard,
)
airdrop_wallets.append(message.text)
airdrop_users.append(message.chat.id)
try:
bot.send_message(
config.log_channel,
"🎈 *#Airdrop_Entry ({0}):*\n"
" • User: [{1}](tg://user?id={2}) (#id{2})\n"
" • Address: `{3}`\n"
" • Time: `{4} UTC`".format(
len(airdrop_users),
bot.get_chat(message.chat.id).first_name,
message.chat.id,
message.text,
strftime("%Y-%m-%d %H:%M:%S", gmtime()),
),
parse_mode="Markdown",
disable_web_page_preview=True,
)
except:
pass
else:
msg = bot.reply_to(
message,
"❌ Invalid $ETH address. Try again:",
parse_mode="Markdown",
reply_markup=cancel_button(),
)
bot.register_next_step_handler(msg, address_check)
def address_check_update(message, old_address):
bot.send_chat_action(message.chat.id, "typing")
connection = get_connection()
with connection.cursor() as cursor:
if message.text in airdrop_wallets:
msg = bot.reply_to(
message, config.texts["airdrop_walletused"], parse_mode="Markdown"
)
bot.register_next_step_handler(msg, address_check_update, old_address)
elif message.content_type == "text" and re.match(
r"^(?=.{42}$).*", message.text
):
sql = "UPDATE users SET address = %s, address_change_status = address_change_status + 1 WHERE user_id = %s"
cursor.execute(sql, (message.text, message.chat.id))
bot.reply_to(
message, config.texts["airdrop_wallet_update"], parse_mode="Markdown"
)
airdrop_wallets.append(message.text)
try:
bot.send_message(
config.log_channel,
"📝 *#Address_Updated:*\n"
" • User: [{1}](tg://user?id={2}) (#id{2})\n"
" • Old Address: `{3}`\n"
" • New Address: `{4}`\n"
" • Time: `{5} UTC`".format(
len(airdrop_wallets),
bot.get_chat(message.chat.id).first_name,
message.chat.id,
old_address,
message.text,
strftime("%Y-%m-%d %H:%M:%S", gmtime()),
),
parse_mode="Markdown",
disable_web_page_preview=True,
)
except:
pass
else:
msg = bot.reply_to(
message,
"❌ Invalid address. Try again:",
parse_mode="Markdown",
reply_markup=cancel_button(),
)
bot.register_next_step_handler(msg, address_check_update, old_address)
@bot.message_handler(
func=lambda message: message.chat.id in config.admins, commands=["airdroplist"]
)
def handle_text(message):
bot.send_chat_action(message.chat.id, "upload_document")
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address FROM users"
cursor.execute(sql)
airdrop = "AIRDROP ({}):\n\n".format(len(airdrop_users))
for user in cursor.fetchall():
if user["address"] is not None:
address = user["address"]
airdrop += "{}\n".format(address)
with BytesIO(str.encode(airdrop)) as output:
output.name = "AIRDROP.txt"
bot.send_document(
message.chat.id,
output,
caption="Here's the list with all airdrop addresses.",
)
return
@bot.callback_query_handler(func=lambda call: True)
def callback_query(call):
if call.data == "cancel_input":
bot.delete_message(
chat_id=call.message.chat.id, message_id=call.message.message_id
)
if len(airdrop_users) >= config.airdrop_cap:
bot.send_message(
call.message.chat.id,
"✅ Operation canceled.\n\nℹ️ The airdrop reached its max cap.",
)
elif call.message.chat.id in airdrop_users:
bot.send_message(
call.message.chat.id,
"✅ Operation canceled.",
reply_markup=airdrop_keyboard,
)
else:
bot.send_message(
call.message.chat.id,
"✅ Operation canceled.",
reply_markup=default_keyboard,
)
bot.clear_step_handler_by_chat_id(chat_id=call.message.chat.id)
elif call.data == "edit_wallet_address":
connection = get_connection()
with connection.cursor() as cursor:
sql = "SELECT address, address_change_status FROM users WHERE user_id = %s"
cursor.execute(sql, call.message.chat.id)
data = cursor.fetchone()
if data["address_change_status"] != config.wallet_changes:
address = data["address"]
bot.edit_message_text(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
text="Please send your new address:",
parse_mode="Markdown",
disable_web_page_preview=True,
)
bot.register_next_step_handler(
call.message, address_check_update, address
)
else:
bot.answer_callback_query(
call.id,
"⚠️ You can't change your address anymore.",
show_alert=True,
)
create_db_tables = create_tables()
airdrop_users = get_airdrop_users()
airdrop_wallets = get_airdrop_wallets()
bot.enable_save_next_step_handlers(delay=2)
bot.load_next_step_handlers()
create_db_tables
bot.remove_webhook()
bot.set_webhook(
url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH, certificate=open(WEBHOOK_SSL_CERT, "r")
)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)
async def handle(request):
if request.match_info.get("token") == bot.token:
request_body_dict = await request.json()
update = telebot.types.Update.de_json(request_body_dict)
bot.process_new_updates([update])
return web.Response()
else:
return web.Response(status=403)
app.router.add_post("/{token}/", handle)
web.run_app(
app,
host="0.0.0.0",
port=WEBHOOK_PORT,
ssl_context=context,
)
| true
| true
|
1c48d8e95699fa6a9ffe7920b5539dd0a6b34075
| 1,433
|
py
|
Python
|
nanome/_internal/_structure/_workspace.py
|
rramji/nanome-lib
|
2806598af31cfb4bb6e16366f0b300d2ddcc9c13
|
[
"MIT"
] | null | null | null |
nanome/_internal/_structure/_workspace.py
|
rramji/nanome-lib
|
2806598af31cfb4bb6e16366f0b300d2ddcc9c13
|
[
"MIT"
] | null | null | null |
nanome/_internal/_structure/_workspace.py
|
rramji/nanome-lib
|
2806598af31cfb4bb6e16366f0b300d2ddcc9c13
|
[
"MIT"
] | null | null | null |
from nanome.util import Vector3, Quaternion, Logs
from . import _Base
class _Workspace(_Base):
@classmethod
def _create(cls):
return cls()
def __init__(self):
self._position = Vector3()
self._rotation = Quaternion()
self._scale = Vector3(0.02,0.02,0.02)
self._complexes = []
def _add_complex(self, complex):
self._complexes.append(complex)
complex._parent = self
def _remove_complex(self, complex):
self._complexes.remove(complex)
complex._parent = None
@Logs.deprecated()
def get_atom_iterator(self):
iterator = _Workspace.AtomIterator(self)
return iter(iterator)
class AtomIterator(object):
def __init__(self, workspace):
self._workspace = workspace
def __iter__(self):
self._complexes = iter(self._workspace.complexes)
self._update_iter()
return self
def __next__(self):
while True:
try:
return next(self._moleculeAtom)
except StopIteration:
self._update_iter()
def _update_iter(self):
while True:
complex = next(self._complexes)
try:
self._moleculeAtom = complex.get_atom_iterator()
break
except StopIteration:
pass
| 27.557692
| 68
| 0.563852
|
from nanome.util import Vector3, Quaternion, Logs
from . import _Base
class _Workspace(_Base):
@classmethod
def _create(cls):
return cls()
def __init__(self):
self._position = Vector3()
self._rotation = Quaternion()
self._scale = Vector3(0.02,0.02,0.02)
self._complexes = []
def _add_complex(self, complex):
self._complexes.append(complex)
complex._parent = self
def _remove_complex(self, complex):
self._complexes.remove(complex)
complex._parent = None
@Logs.deprecated()
def get_atom_iterator(self):
iterator = _Workspace.AtomIterator(self)
return iter(iterator)
class AtomIterator(object):
def __init__(self, workspace):
self._workspace = workspace
def __iter__(self):
self._complexes = iter(self._workspace.complexes)
self._update_iter()
return self
def __next__(self):
while True:
try:
return next(self._moleculeAtom)
except StopIteration:
self._update_iter()
def _update_iter(self):
while True:
complex = next(self._complexes)
try:
self._moleculeAtom = complex.get_atom_iterator()
break
except StopIteration:
pass
| true
| true
|
1c48d9151eed8e1af38217f84b6e4a7624f59829
| 49,261
|
py
|
Python
|
tensorflow/python/ops/variables.py
|
ml-resources/tensorflow
|
4ecd72b68cd70c3930551aebbf0c80badc301d28
|
[
"Apache-2.0"
] | 1
|
2019-06-19T08:43:26.000Z
|
2019-06-19T08:43:26.000Z
|
tensorflow/python/ops/variables.py
|
liudgit/tensorflow
|
4ecd72b68cd70c3930551aebbf0c80badc301d28
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/variables.py
|
liudgit/tensorflow
|
4ecd72b68cd70c3930551aebbf0c80badc301d28
|
[
"Apache-2.0"
] | 1
|
2019-06-19T08:43:23.000Z
|
2019-06-19T08:43:23.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
class Variable(object):
"""See the [Variables How To](../../how_tos/variables/index.md) for a high
level overview.
A variable maintains state in the graph across calls to `run()`. You add a
variable to the graph by constructing an instance of the class `Variable`.
The `Variable()` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
If you want to change the shape of a variable later you have to use an
`assign` Op with `validate_shape=False`.
Just like any `Tensor`, variables created with `Variable()` can be used as
inputs for other Ops in the graph. Additionally, all the operators
overloaded for the `Tensor` class are carried over to variables, so you can
also add nodes to the graph by just doing arithmetic on variables.
```python
import tensorflow as tf
# Create a variable.
w = tf.Variable(<initial-value>, name=<optional-name>)
# Use the variable in the graph like any Tensor.
y = tf.matmul(w, ...another variable or tensor...)
# The overloaded operators are available too.
z = tf.sigmoid(w + y)
# Assign a new value to the variable with `assign()` or a related method.
w.assign(w + 1.0)
w.assign_add(1.0)
```
When you launch the graph, variables have to be explicitly initialized before
you can run Ops that use their value. You can initialize a variable by
running its *initializer op*, restoring the variable from a save file, or
simply running an `assign` Op that assigns a value to the variable. In fact,
the variable *initializer op* is just an `assign` Op that assigns the
variable's initial value to the variable itself.
```python
# Launch the graph in a session.
with tf.Session() as sess:
# Run the variable initializer.
sess.run(w.initializer)
# ...you now can run ops that use the value of 'w'...
```
The most common initialization pattern is to use the convenience function
`global_variables_initializer()` to add an Op to the graph that initializes
all the variables. You then run that Op after launching the graph.
```python
# Add an Op to initialize global variables.
init_op = tf.global_variables_initializer()
# Launch the graph in a session.
with tf.Session() as sess:
# Run the Op that initializes global variables.
sess.run(init_op)
# ...you can now run any Op that uses variable values...
```
If you need to create a variable with an initial value dependent on another
variable, use the other variable's `initialized_value()`. This ensures that
variables are initialized in the right order.
All variables are automatically collected in the graph where they are
created. By default, the constructor adds the new variable to the graph
collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function
`global_variables()` returns the contents of that collection.
When building a machine learning model it is often convenient to distinguish
between variables holding the trainable model parameters and other variables
such as a `global step` variable used to count training steps. To make this
easier, the variable constructor supports a `trainable=<bool>` parameter. If
`True`, the new variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. The convenience function
`trainable_variables()` returns the contents of this collection. The
various `Optimizer` classes use this collection as the default list of
variables to optimize.
Creating a variable.
@@__init__
@@initialized_value
Changing a variable value.
@@assign
@@assign_add
@@assign_sub
@@scatter_sub
@@count_up_to
@@eval
Properties.
@@name
@@dtype
@@get_shape
@@device
@@initializer
@@graph
@@op
"""
# TODO(touts): Add @@value and @@ref in the docstring above once they are
# ready for consumption.
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
If `trainable` is `True` the variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
This constructor creates both a `variable` Op and an `assign` Op to set the
variable to its initial value.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates
the Variable object with its contents. `variable_def` and the other
arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
expected_shape: A TensorShape. If set, initial_value is expected
to have this shape.
import_scope: Optional `string`. Name scope to add to the
`Variable.` Only used when initializing from protocol buffer.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if variable_def:
# If variable_def is provided, recreates the variable from its fields.
if initial_value:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
# Create from initial_value.
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
expected_shape=expected_shape)
def __str__(self):
return str(self._snapshot)
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
expected_shape=None):
"""Creates a new variable from arguments.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
expected_shape: Deprecated. Ignored.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
_ = expected_shape
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.control_dependencies(None):
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
true_name = ops._name_from_scope_name(name)
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
# pylint: disable=protected-access
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# Or get the initial value from a Tensor or Python object.
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
# In this case, the variable op can't be created until after the
# initial_value has been converted to a Tensor with a known type.
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# Manually overrides the variable's shape with the initial value's.
if validate_shape:
initial_value_shape = self._initial_value.get_shape()
if not initial_value_shape.is_fully_defined():
raise ValueError("initial_value must have a shape specified: %s" %
self._initial_value)
# Assigns initial value.
self._initializer_op = state_ops.assign(
self._variable, self._initial_value,
validate_shape=validate_shape).op
# TODO(vrv): Change this class to not take caching_device, but
# to take the op to colocate the snapshot with, so we can use
# colocation rather than devices.
if caching_device is not None:
with ops.device(caching_device):
self._snapshot = array_ops.identity(self._variable, name="read")
else:
with ops.colocate_with(self._variable.op):
self._snapshot = array_ops.identity(self._variable, name="read")
ops.add_to_collections(collections, self)
self._caching_device = caching_device
self._save_slice_info = None
def _init_from_proto(self, variable_def, import_scope=None):
"""Creates a new variable from `VariableDef` protocol buffer.
Args:
variable_def: `VariableDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(variable_def, variable_pb2.VariableDef)
# Create from variable_def.
g = ops.get_default_graph()
self._variable = g.as_graph_element(
ops.prepend_name_scope(variable_def.variable_name,
import_scope=import_scope))
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(variable_def.initializer_name,
import_scope=import_scope))
self._snapshot = g.as_graph_element(
ops.prepend_name_scope(variable_def.snapshot_name,
import_scope=import_scope))
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def)
else:
self._save_slice_info = None
self._caching_device = None
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._variable
def _AsTensor(self): # pylint: disable=invalid-name
"""Converts this variable to a Tensor.
See [`value()`](#Variable.value).
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def __iter__(self):
"""Dummy method to prevent iteration. Do not call.
NOTE(mrry): If we register __getitem__ as an overloaded operator,
Python will valiantly attempt to iterate over the variable's Tensor from 0
to infinity. Declaring this method prevents this unintended behavior.
Raises:
TypeError: when invoked.
"""
raise TypeError("'Variable' object is not iterable.")
def value(self):
"""Returns the last snapshot of this variable.
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a reference to the variable.
To avoid copies, if the consumer of the returned value is on the same device
as the variable, this actually returns the live value of the variable, not
a copy. Updates to the variable are seen by the consumer. If the consumer
is on a different device it will get a copy of the variable.
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def read_value(self):
"""Returns the value of this variable, read in the current context.
Can be different from value() if it's on another device, with control
dependencies, etc.
Returns:
A `Tensor` containing the value of the variable.
"""
return array_ops.identity(self._variable, name="read")
def _ref(self):
"""Returns a reference to this variable.
You usually do not need to call this method as all ops that need a reference
to the variable call it automatically.
Returns is a `Tensor` which holds a reference to the variable. You can
assign a new value to the variable by passing the tensor to an assign op.
See [`value()`](#Variable.value) if you want to get the value of the
variable.
Returns:
A `Tensor` that is a reference to the variable.
"""
return self._variable
def set_shape(self, shape):
"""Overrides the shape for this variable.
Args:
shape: the `TensorShape` representing the overridden shape.
"""
self._ref().set_shape(shape)
self.value().set_shape(shape)
def eval(self, session=None):
"""In a session, computes and returns the value of this variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph containing this
variable has been launched. If no session is passed, the default session is
used. See the [Session class](../../api_docs/python/client.md#Session) for
more information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
print(v.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(v.eval())
```
Args:
session: The session to use to evaluate this variable. If
none, the default session is used.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
return self._variable.eval(session=session)
def initialized_value(self):
"""Returns the value of the initialized variable.
You should use this instead of the variable itself to initialize another
variable with a value that depends on the value of this variable.
Beware of using initialized_value except during initialization:
initialized_value causes the Variable's initializer op to be run, so running
this op resets the variable to the initial value.
```python
# Initialize 'v' with a random tensor.
v = tf.Variable(tf.truncated_normal([10, 40]))
# Use `initialized_value` to guarantee that `v` has been
# initialized before its value is used to initialize `w`.
# The random values are picked only once.
w = tf.Variable(v.initialized_value() * 2.0)
```
Returns:
A `Tensor` holding the value of this variable after its initializer
has run.
"""
with ops.control_dependencies(None):
with ops.control_dependencies([self._initializer_op]):
# TODO(vrv): Change this class to not take caching_device, but
# to take the op to colocate the snapshot with, so we can use
# colocation rather than devices.
if self._caching_device is not None:
with ops.device(self._caching_device):
return array_ops.identity(self._variable)
else:
with ops.colocate_with(self._variable.op):
return array_ops.identity(self._variable)
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable.
Note that this is different from `initialized_value()` which runs
the op that initializes the variable before returning its value.
This method returns the tensor that is used by the op that initializes
the variable.
Returns:
A `Tensor`.
"""
return self._initial_value
def assign(self, value, use_locking=False):
"""Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
Returns:
A `Tensor` that will hold the new value of this variable after
the assignment has completed.
"""
return state_ops.assign(self._variable, value, use_locking=use_locking)
def assign_add(self, delta, use_locking=False):
"""Adds a value to this variable.
This is essentially a shortcut for `assign_add(self, delta)`.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the addition has completed.
"""
return state_ops.assign_add(self._variable, delta, use_locking=use_locking)
def assign_sub(self, delta, use_locking=False):
"""Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
"""
return state_ops.assign_sub(self._variable, delta, use_locking=use_locking)
def scatter_sub(self, sparse_delta, use_locking=False):
"""Subtracts `IndexedSlices` from this variable.
This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
sparse_delta.values)`.
Args:
sparse_delta: `IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return state_ops.scatter_sub(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking)
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return state_ops.count_up_to(self._variable, limit=limit)
def load(self, value, session=None):
"""Load new value into this variable
Writes new value to variable's memory. Doesn't add ops to the graph.
This convenience method requires a session where the graph containing this
variable has been launched. If no session is passed, the default session is
used. See the [Session class](../../api_docs/python/client.md#Session) for
more information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
v.load([2, 3], sess)
print(v.eval(sess)) # prints [2 3]
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
v.load([3, 4], sess)
print(v.eval()) # prints [3 4]
```
Args:
value: New variable value
session: The session to use to evaluate this variable. If
none, the default session is used.
Raises:
ValueError: Session is not passed and no default session
"""
session = session or ops.get_default_session()
if session is None:
raise ValueError(
"Either session argument should be provided or default session "
"should be established")
session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
"""Utility function for converting a Variable to a Tensor."""
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref() # pylint: disable=protected-access
else:
return v.value()
@staticmethod
def _OverloadAllOperators(): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
Variable._OverloadOperator(operator)
# For slicing, bind getitem differently than a tensor (use SliceHelperVar
# instead)
# pylint: disable=protected-access
setattr(Variable, "__getitem__", array_ops._SliceHelperVar)
@staticmethod
def _OverloadOperator(operator): # pylint: disable=invalid-name
"""Defer an operator overload to `ops.Tensor`.
We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
Args:
operator: string. The operator name.
"""
def _run_op(a, *args):
# pylint: disable=protected-access
return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
# Propagate __doc__ to wrapper
try:
_run_op.__doc__ = getattr(ops.Tensor, operator).__doc__
except AttributeError:
pass
setattr(Variable, operator, _run_op)
# NOTE(mrry): This enables the Variable's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Variables interact
# with ndarrays.
__array_priority__ = 100
@property
def name(self):
"""The name of this variable."""
return self._variable.name
@property
def initializer(self):
"""The initializer operation for this variable."""
return self._initializer_op
@property
def device(self):
"""The device of this variable."""
return self._variable.device
@property
def dtype(self):
"""The `DType` of this variable."""
return self._variable.dtype
@property
def op(self):
"""The `Operation` of this variable."""
return self._variable.op
@property
def graph(self):
"""The `Graph` of this variable."""
return self._variable.graph
def get_shape(self):
"""The `TensorShape` of this variable.
Returns:
A `TensorShape`.
"""
return self._variable.get_shape()
def to_proto(self, export_scope=None):
"""Converts a `Variable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if (export_scope is None or
self._variable.name.startswith(export_scope)):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(
self._variable.name, export_scope)
var_def.initializer_name = ops.strip_name_scope(
self.initializer.name, export_scope)
var_def.snapshot_name = ops.strip_name_scope(
self._snapshot.name, export_scope)
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(
export_scope=export_scope))
return var_def
else:
return None
@staticmethod
def from_proto(variable_def, import_scope=None):
"""Returns a `Variable` object created from `variable_def`."""
return Variable(variable_def=variable_def,
import_scope=import_scope)
class SaveSliceInfo(object):
"""Information on how to save this Variable as a slice.
Provides internal support for saving variables as slices of a larger
variable. This API is not public and is subject to change.
Available properties:
* full_name
* full_shape
* var_offset
* var_shape
"""
def __init__(self,
full_name=None,
full_shape=None,
var_offset=None,
var_shape=None,
save_slice_info_def=None,
import_scope=None):
"""Create a `SaveSliceInfo`.
Args:
full_name: Name of the full variable of which this `Variable` is a
slice.
full_shape: Shape of the full variable, as a list of int.
var_offset: Offset of this `Variable` into the full variable, as a
list of int.
var_shape: Shape of this `Variable`, as a list of int.
save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
recreates the SaveSliceInfo object its contents.
`save_slice_info_def` and other arguments are mutually
exclusive.
import_scope: Optional `string`. Name scope to add. Only used
when initializing from protocol buffer.
"""
if save_slice_info_def:
assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
self.full_name = ops.prepend_name_scope(
save_slice_info_def.full_name, import_scope=import_scope)
self.full_shape = [i for i in save_slice_info_def.full_shape]
self.var_offset = [i for i in save_slice_info_def.var_offset]
self.var_shape = [i for i in save_slice_info_def.var_shape]
else:
self.full_name = full_name
self.full_shape = full_shape
self.var_offset = var_offset
self.var_shape = var_shape
@property
def spec(self):
"""Computes the spec string used for saving."""
full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
sl_spec = ":".join([
"%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
])
return full_shape_str + sl_spec
def to_proto(self, export_scope=None):
"""Returns a SaveSliceInfoDef() proto.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
in the specified name scope.
"""
if (export_scope is None or
self.full_name.startswith(export_scope)):
save_slice_info_def = variable_pb2.SaveSliceInfoDef()
save_slice_info_def.full_name = ops.strip_name_scope(
self.full_name, export_scope)
for i in self.full_shape:
save_slice_info_def.full_shape.append(i)
for i in self.var_offset:
save_slice_info_def.var_offset.append(i)
for i in self.var_shape:
save_slice_info_def.var_shape.append(i)
return save_slice_info_def
else:
return None
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `Variable`.
Args:
save_slice_info: A `Variable.SaveSliceInfo` object.
"""
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
class PartitionedVariable(object):
"""A container for partitioned `Variable` objects."""
class PartitionedVariableIterator(object):
"""An iterator that allows accessing the underlying `Variable` objects.
This iterator is necessary to control order of access when Variables
are not partitioned in a standard way along a single axis.
Allows e.g. `list(partitioned_variable)` to return a proper list.
"""
def __init__(self, partitioned_variable):
self._ix = 0
self._partitioned_variable = partitioned_variable
def __iter__(self):
return self
def __next__(self): # For python3 compatibility.
return self.next()
def next(self):
# pylint: disable=protected-access
if self._ix >= len(self._partitioned_variable._variable_list):
raise StopIteration()
variable = self._partitioned_variable._variable_list[self._ix]
# pylint: enable=protected-access
self._ix += 1
return variable
def __init__(self, name, shape, dtype, variable_list, partitions):
"""Creates a new partitioned variable wrapper.
Variables passed via the variable_list must contain a save_slice_info
field. Concatenation and iteration is in lexicographic order according
to the var_offset property of the save_slice_info.
Args:
name: String. Overall name of the variables.
shape: List of integers. Overall shape of the variables.
dtype: Type of the variables.
variable_list: List of `Variable` that comprise this partitioned variable.
partitions: List of integers. Number of partitions for each dimension.
Raises:
TypeError: If `variable_list` is not a list of `Variable` objects, or
`partitions` is not a list.
ValueError: If `variable_list` is empty, or the `Variable` shape
information does not match `shape`, or `partitions` has invalid values.
"""
if not isinstance(variable_list, (list, tuple)):
raise TypeError(
"variable_list is not a list or tuple: %s" % variable_list)
if not isinstance(partitions, (list, tuple)):
raise TypeError("partitions is not a list or tuple: %s" % partitions)
if not all([p >= 1 for p in partitions]):
raise ValueError("partition values must be positive: %s" % partitions)
if not variable_list:
raise ValueError("variable_list may not be empty")
# pylint: disable=protected-access
for v in variable_list:
# Sort the variable_list lexicographically according to var offset value.
if not all([v._get_save_slice_info() is not None for v in variable_list]):
raise ValueError(
"All variables must have a save_slice_info available: %s"
% [v.name for v in variable_list])
if len(shape) != len(partitions):
raise ValueError("len(shape) != len(partitions): %s vs. %s"
% (shape, partitions))
if not all([v._get_save_slice_info().full_shape == shape]):
raise ValueError(
"All variables' full shapes must match shape: %s; "
"but full shapes were: %s"
% (shape, str([v._get_save_slice_info().full_shape])))
self._variable_list = sorted(
variable_list, key=lambda v: v._get_save_slice_info().var_offset)
# pylint: enable=protected-access
self._name = name
self._shape = shape
self._dtype = dtype
self._partitions = partitions
self._as_tensor = None
def __iter__(self):
"""Return an iterable for accessing the underlying partition Variables."""
return self.PartitionedVariableIterator(self)
def __len__(self):
num_partition_axes = len(self._partition_axes())
if num_partition_axes > 1:
raise ValueError("Cannot get a length for %d > 1 partition axes"
% num_partition_axes)
return len(self._variable_list)
def _partition_axes(self):
if all([p == 1 for p in self._partitions]):
return [0]
else:
return [i for i, p in enumerate(self._partitions) if p > 1]
def _concat(self):
"""Returns the overall concatenated value as a `Tensor`.
This is different from using the partitioned variable directly as a tensor
(through tensor conversion and `as_tensor`) in that it creates a new set of
operations that keeps the control dependencies from its scope.
Returns:
`Tensor` containing the concatenated value.
"""
if len(self._variable_list) == 1:
with ops.name_scope(None):
return array_ops.identity(self._variable_list[0], name=self._name)
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot concatenate along more than one dimension: %s. "
"Multi-axis partition concat is not supported" % str(partition_axes))
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
concatenated = array_ops.concat(self._variable_list, partition_ix)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
def as_tensor(self):
"""Returns the overall concatenated value as a `Tensor`.
The returned tensor will not inherit the control dependencies from the scope
where the value is used, which is similar to getting the value of
`Variable`.
Returns:
`Tensor` containing the concatenated value.
"""
with ops.control_dependencies(None):
return self._concat()
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
# pylint: disable=invalid-name
_ = name
if dtype is not None and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise NotImplementedError(
"PartitionedVariable doesn't support being used as a reference.")
else:
return v.as_tensor()
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
def get_shape(self):
return self._shape
def _get_variable_list(self):
return self._variable_list
def _get_partitions(self):
return self._partitions
def assign(self, value, use_locking=False):
_ = value, use_locking
raise NotImplementedError(
"assign() has not been implemented for PartitionedVariable.")
def global_variables():
"""Returns global variables.
Global variables are variables that are shared across machines in a
distributed environment. The `Variable()` constructor or `get_variable()`
automatically adds new variables to the graph collection
`GraphKeys.GLOBAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to global variables are local variables. See
[`tf.local_variables()`](../../api_docs/python/state_ops.md#local_variables)
Returns:
A list of `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
@deprecated("2017-03-02", "Please use tf.global_variables instead.")
def all_variables():
"""See `tf.global_variables`."""
return global_variables()
def _all_saveable_objects():
"""Returns all variables and `SaveableObject`s that must be checkpointed.
Returns:
A list of `Variable` and `SaveableObject` to be checkpointed
"""
# TODO(andreasst): make this function public once things are settled.
return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +
ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
def local_variables():
"""Returns local variables.
Local variables - per process variables, usually not saved/restored to
checkpoint and used for temporary or intermediate values.
For example, they can be used as counters for metrics computation or
number of epochs this machine has read data.
The `tf.contrib.framework.local_variable()` function automatically adds the
new variable to `GraphKeys.LOCAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to local variables are global variables. See
[`tf.global_variables()`](../../api_docs/python/state_ops.md#global_variables)
Returns:
A list of local `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def model_variables():
"""Returns all variables in the MODEL_VARIABLES collection.
Returns:
A list of local Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)
def trainable_variables():
"""Returns all variables created with `trainable=True`.
When passed `trainable=True`, the `Variable()` constructor automatically
adds new variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the
contents of that collection.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def moving_average_variables():
"""Returns all variables that maintain their moving averages.
If an `ExponentialMovingAverage` object is created and the `apply()`
method is called on a list of variables, these variables will
be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
This convenience function returns the contents of that collection.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES)
def variables_initializer(var_list, name="init"):
"""Returns an Op that initializes a list of variables.
After you launch the graph in a session, you can run the returned Op to
initialize all the variables in `var_list`. This Op runs all the
initializers of the variables in `var_list` in parallel.
Calling `initialize_variables()` is equivalent to passing the list of
initializers to `Group()`.
If `var_list` is empty, however, the function still returns an Op that can
be run. That Op just has no effect.
Args:
var_list: List of `Variable` objects to initialize.
name: Optional name for the returned operation.
Returns:
An Op that run the initializers of all the specified variables.
"""
if var_list:
return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")
def initialize_variables(var_list, name="init"):
"""See `tf.variables_initializer`."""
return variables_initializer(var_list, name=name)
def global_variables_initializer():
"""Returns an Op that initializes global variables.
This is just a shortcut for `variable_initializers(global_variables())`
Returns:
An Op that initializes global variables in the graph.
"""
return variables_initializer(global_variables())
@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
def initialize_all_variables():
"""See `tf.global_variables_initializer`."""
return global_variables_initializer()
def local_variables_initializer():
"""Returns an Op that initializes all local variables.
This is just a shortcut for `variable_initializers(local_variables())`
Returns:
An Op that initializes all local variables in the graph.
"""
return variables_initializer(local_variables())
@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")
def initialize_local_variables():
"""See `tf.local_variables_initializer`."""
return local_variables_initializer()
def is_variable_initialized(variable):
"""Tests if a variable has been initialized.
Args:
variable: A `Variable`.
Returns:
Returns a scalar boolean Tensor, `True` if the variable has been
initialized, `False` otherwise.
"""
return state_ops.is_variable_initialized(variable)
def assert_variables_initialized(var_list=None):
"""Returns an Op to check if variables are initialized.
NOTE: This function is obsolete and will be removed in 6 months. Please
change your implementation to use `report_uninitialized_variables()`.
When run, the returned Op will raise the exception `FailedPreconditionError`
if any of the variables has not yet been initialized.
Note: This function is implemented by trying to fetch the values of the
variables. If one of the variables is not initialized a message may be
logged by the C++ runtime. This is expected.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `global_variables().`
Returns:
An Op, or None if there are no variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
return None
else:
ranks = []
for var in var_list:
with ops.colocate_with(var.op):
ranks.append(array_ops.rank_internal(var, optimize=False))
if len(ranks) == 1:
return ranks[0]
else:
return array_ops.stack(ranks)
def report_uninitialized_variables(var_list=None,
name="report_uninitialized_variables"):
"""Adds ops to list the names of uninitialized variables.
When run, it returns a 1-D tensor containing the names of uninitialized
variables if there are any, or an empty array if there are none.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `global_variables() + local_variables()`
name: Optional name of the `Operation`.
Returns:
A 1-D tensor containing names of the uninitialized variables, or an empty
1-D tensor if there are no variables or no uninitialized variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
with ops.name_scope(name):
if not var_list:
# Return an empty tensor so we only need to check for returned tensor
# size being 0 as an indication of model ready.
return array_ops.constant([], dtype=dtypes.string)
else:
# Get a 1-D boolean tensor listing whether each variable is initialized.
variables_mask = math_ops.logical_not(
array_ops.stack(
[state_ops.is_variable_initialized(v) for v in var_list]))
# Get a 1-D string tensor containing all the variable names.
variable_names_tensor = array_ops.constant([s.op.name for s in var_list])
# Return a 1-D tensor containing all the names of uninitialized variables.
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
# pylint: disable=protected-access
ops.register_tensor_conversion_function(Variable,
Variable._TensorConversionFunction)
Variable._OverloadAllOperators()
ops.register_tensor_conversion_function(
PartitionedVariable, PartitionedVariable._TensorConversionFunction)
# pylint: enable=protected-access
ops.register_dense_tensor_like_type(Variable)
| 37.010518
| 104
| 0.692982
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
class Variable(object):
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None):
if variable_def:
if initial_value:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
expected_shape=expected_shape)
def __str__(self):
return str(self._snapshot)
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
expected_shape=None):
_ = expected_shape
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.control_dependencies(None):
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
if init_from_fn:
# yet exist.
true_name = ops._name_from_scope_name(name)
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
# pylint: disable=protected-access
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# Or get the initial value from a Tensor or Python object.
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
# In this case, the variable op can't be created until after the
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
if validate_shape:
initial_value_shape = self._initial_value.get_shape()
if not initial_value_shape.is_fully_defined():
raise ValueError("initial_value must have a shape specified: %s" %
self._initial_value)
self._initializer_op = state_ops.assign(
self._variable, self._initial_value,
validate_shape=validate_shape).op
if caching_device is not None:
with ops.device(caching_device):
self._snapshot = array_ops.identity(self._variable, name="read")
else:
with ops.colocate_with(self._variable.op):
self._snapshot = array_ops.identity(self._variable, name="read")
ops.add_to_collections(collections, self)
self._caching_device = caching_device
self._save_slice_info = None
def _init_from_proto(self, variable_def, import_scope=None):
assert isinstance(variable_def, variable_pb2.VariableDef)
g = ops.get_default_graph()
self._variable = g.as_graph_element(
ops.prepend_name_scope(variable_def.variable_name,
import_scope=import_scope))
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(variable_def.initializer_name,
import_scope=import_scope))
self._snapshot = g.as_graph_element(
ops.prepend_name_scope(variable_def.snapshot_name,
import_scope=import_scope))
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def)
else:
self._save_slice_info = None
self._caching_device = None
def _as_graph_element(self):
return self._variable
def _AsTensor(self): return self._snapshot
def __iter__(self):
raise TypeError("'Variable' object is not iterable.")
def value(self):
return self._snapshot
def read_value(self):
return array_ops.identity(self._variable, name="read")
def _ref(self):
return self._variable
def set_shape(self, shape):
self._ref().set_shape(shape)
self.value().set_shape(shape)
def eval(self, session=None):
return self._variable.eval(session=session)
def initialized_value(self):
with ops.control_dependencies(None):
with ops.control_dependencies([self._initializer_op]):
if self._caching_device is not None:
with ops.device(self._caching_device):
return array_ops.identity(self._variable)
else:
with ops.colocate_with(self._variable.op):
return array_ops.identity(self._variable)
@property
def initial_value(self):
return self._initial_value
def assign(self, value, use_locking=False):
return state_ops.assign(self._variable, value, use_locking=use_locking)
def assign_add(self, delta, use_locking=False):
return state_ops.assign_add(self._variable, delta, use_locking=use_locking)
def assign_sub(self, delta, use_locking=False):
return state_ops.assign_sub(self._variable, delta, use_locking=use_locking)
def scatter_sub(self, sparse_delta, use_locking=False):
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return state_ops.scatter_sub(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking)
def count_up_to(self, limit):
return state_ops.count_up_to(self._variable, limit=limit)
def load(self, value, session=None):
session = session or ops.get_default_session()
if session is None:
raise ValueError(
"Either session argument should be provided or default session "
"should be established")
session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): _ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref() else:
return v.value()
@staticmethod
def _OverloadAllOperators(): for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
Variable._OverloadOperator(operator)
setattr(Variable, "__getitem__", array_ops._SliceHelperVar)
@staticmethod
def _OverloadOperator(operator):
def _run_op(a, *args):
return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
try:
_run_op.__doc__ = getattr(ops.Tensor, operator).__doc__
except AttributeError:
pass
setattr(Variable, operator, _run_op)
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
__array_priority__ = 100
@property
def name(self):
return self._variable.name
@property
def initializer(self):
return self._initializer_op
@property
def device(self):
return self._variable.device
@property
def dtype(self):
return self._variable.dtype
@property
def op(self):
return self._variable.op
@property
def graph(self):
return self._variable.graph
def get_shape(self):
return self._variable.get_shape()
def to_proto(self, export_scope=None):
if (export_scope is None or
self._variable.name.startswith(export_scope)):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(
self._variable.name, export_scope)
var_def.initializer_name = ops.strip_name_scope(
self.initializer.name, export_scope)
var_def.snapshot_name = ops.strip_name_scope(
self._snapshot.name, export_scope)
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(
export_scope=export_scope))
return var_def
else:
return None
@staticmethod
def from_proto(variable_def, import_scope=None):
return Variable(variable_def=variable_def,
import_scope=import_scope)
class SaveSliceInfo(object):
def __init__(self,
full_name=None,
full_shape=None,
var_offset=None,
var_shape=None,
save_slice_info_def=None,
import_scope=None):
if save_slice_info_def:
assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
self.full_name = ops.prepend_name_scope(
save_slice_info_def.full_name, import_scope=import_scope)
self.full_shape = [i for i in save_slice_info_def.full_shape]
self.var_offset = [i for i in save_slice_info_def.var_offset]
self.var_shape = [i for i in save_slice_info_def.var_shape]
else:
self.full_name = full_name
self.full_shape = full_shape
self.var_offset = var_offset
self.var_shape = var_shape
@property
def spec(self):
full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
sl_spec = ":".join([
"%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
])
return full_shape_str + sl_spec
def to_proto(self, export_scope=None):
if (export_scope is None or
self.full_name.startswith(export_scope)):
save_slice_info_def = variable_pb2.SaveSliceInfoDef()
save_slice_info_def.full_name = ops.strip_name_scope(
self.full_name, export_scope)
for i in self.full_shape:
save_slice_info_def.full_shape.append(i)
for i in self.var_offset:
save_slice_info_def.var_offset.append(i)
for i in self.var_shape:
save_slice_info_def.var_shape.append(i)
return save_slice_info_def
else:
return None
def _set_save_slice_info(self, save_slice_info):
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
class PartitionedVariable(object):
class PartitionedVariableIterator(object):
def __init__(self, partitioned_variable):
self._ix = 0
self._partitioned_variable = partitioned_variable
def __iter__(self):
return self
def __next__(self): return self.next()
def next(self):
if self._ix >= len(self._partitioned_variable._variable_list):
raise StopIteration()
variable = self._partitioned_variable._variable_list[self._ix]
self._ix += 1
return variable
def __init__(self, name, shape, dtype, variable_list, partitions):
if not isinstance(variable_list, (list, tuple)):
raise TypeError(
"variable_list is not a list or tuple: %s" % variable_list)
if not isinstance(partitions, (list, tuple)):
raise TypeError("partitions is not a list or tuple: %s" % partitions)
if not all([p >= 1 for p in partitions]):
raise ValueError("partition values must be positive: %s" % partitions)
if not variable_list:
raise ValueError("variable_list may not be empty")
for v in variable_list:
if not all([v._get_save_slice_info() is not None for v in variable_list]):
raise ValueError(
"All variables must have a save_slice_info available: %s"
% [v.name for v in variable_list])
if len(shape) != len(partitions):
raise ValueError("len(shape) != len(partitions): %s vs. %s"
% (shape, partitions))
if not all([v._get_save_slice_info().full_shape == shape]):
raise ValueError(
"All variables' full shapes must match shape: %s; "
"but full shapes were: %s"
% (shape, str([v._get_save_slice_info().full_shape])))
self._variable_list = sorted(
variable_list, key=lambda v: v._get_save_slice_info().var_offset)
# pylint: enable=protected-access
self._name = name
self._shape = shape
self._dtype = dtype
self._partitions = partitions
self._as_tensor = None
def __iter__(self):
return self.PartitionedVariableIterator(self)
def __len__(self):
num_partition_axes = len(self._partition_axes())
if num_partition_axes > 1:
raise ValueError("Cannot get a length for %d > 1 partition axes"
% num_partition_axes)
return len(self._variable_list)
def _partition_axes(self):
if all([p == 1 for p in self._partitions]):
return [0]
else:
return [i for i, p in enumerate(self._partitions) if p > 1]
def _concat(self):
if len(self._variable_list) == 1:
with ops.name_scope(None):
return array_ops.identity(self._variable_list[0], name=self._name)
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot concatenate along more than one dimension: %s. "
"Multi-axis partition concat is not supported" % str(partition_axes))
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
concatenated = array_ops.concat(self._variable_list, partition_ix)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
def as_tensor(self):
with ops.control_dependencies(None):
return self._concat()
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
# pylint: disable=invalid-name
_ = name
if dtype is not None and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise NotImplementedError(
"PartitionedVariable doesn't support being used as a reference.")
else:
return v.as_tensor()
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
def get_shape(self):
return self._shape
def _get_variable_list(self):
return self._variable_list
def _get_partitions(self):
return self._partitions
def assign(self, value, use_locking=False):
_ = value, use_locking
raise NotImplementedError(
"assign() has not been implemented for PartitionedVariable.")
def global_variables():
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
@deprecated("2017-03-02", "Please use tf.global_variables instead.")
def all_variables():
return global_variables()
def _all_saveable_objects():
return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +
ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
def local_variables():
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def model_variables():
return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)
def trainable_variables():
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def moving_average_variables():
return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES)
def variables_initializer(var_list, name="init"):
if var_list:
return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")
def initialize_variables(var_list, name="init"):
return variables_initializer(var_list, name=name)
def global_variables_initializer():
return variables_initializer(global_variables())
@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
def initialize_all_variables():
return global_variables_initializer()
def local_variables_initializer():
return variables_initializer(local_variables())
@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")
def initialize_local_variables():
return local_variables_initializer()
def is_variable_initialized(variable):
return state_ops.is_variable_initialized(variable)
def assert_variables_initialized(var_list=None):
if var_list is None:
var_list = global_variables() + local_variables()
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
return None
else:
ranks = []
for var in var_list:
with ops.colocate_with(var.op):
ranks.append(array_ops.rank_internal(var, optimize=False))
if len(ranks) == 1:
return ranks[0]
else:
return array_ops.stack(ranks)
def report_uninitialized_variables(var_list=None,
name="report_uninitialized_variables"):
if var_list is None:
var_list = global_variables() + local_variables()
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
with ops.name_scope(name):
if not var_list:
return array_ops.constant([], dtype=dtypes.string)
else:
variables_mask = math_ops.logical_not(
array_ops.stack(
[state_ops.is_variable_initialized(v) for v in var_list]))
variable_names_tensor = array_ops.constant([s.op.name for s in var_list])
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
ops.register_tensor_conversion_function(Variable,
Variable._TensorConversionFunction)
Variable._OverloadAllOperators()
ops.register_tensor_conversion_function(
PartitionedVariable, PartitionedVariable._TensorConversionFunction)
ops.register_dense_tensor_like_type(Variable)
| true
| true
|
1c48d95179ea72c37d482d0eb02137ad50a8ff23
| 8,888
|
py
|
Python
|
mysqloperator/controller/group_monitor.py
|
sjmudd/mysql-operator
|
415dd8eae02a8909c2b85c4653b34525c74e388a
|
[
"Apache-2.0"
] | 206
|
2021-05-28T16:45:10.000Z
|
2022-03-31T03:08:15.000Z
|
mysqloperator/controller/group_monitor.py
|
sjmudd/mysql-operator
|
415dd8eae02a8909c2b85c4653b34525c74e388a
|
[
"Apache-2.0"
] | 6
|
2021-06-20T05:52:28.000Z
|
2022-03-14T14:08:41.000Z
|
mysqloperator/controller/group_monitor.py
|
sjmudd/mysql-operator
|
415dd8eae02a8909c2b85c4653b34525c74e388a
|
[
"Apache-2.0"
] | 37
|
2021-06-12T11:36:43.000Z
|
2022-03-26T07:32:16.000Z
|
# Copyright (c) 2020, 2021, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
#
from logging import Logger
from typing import Callable, Optional, TYPE_CHECKING, Tuple
from mysqloperator.controller.innodbcluster.cluster_api import InnoDBCluster
from mysqloperator.controller.shellutils import RetryLoop
from . import shellutils
import threading
import time
import select
import mysqlsh
mysql = mysqlsh.mysql
mysqlx = mysqlsh.mysqlx
k_connect_retry_interval = 10
class MonitoredCluster:
def __init__(self, cluster: InnoDBCluster,
account: Tuple[str, str],
handler: Callable[[InnoDBCluster, list, bool], None]):
self.cluster = cluster
self.account = account
self.session = None
self.target = None
self.target_not_primary = None
self.last_connect_attempt = 0
self.last_primary_id = None
self.last_view_id = None
self.handler = handler
@property
def name(self) -> str:
return self.cluster.name
@property
def namespace(self) -> str:
return self.cluster.namespace
def ensure_connected(self) -> Optional['mysqlx.Session']:
# TODO run a ping every X seconds
if not self.session and (not self.last_connect_attempt or time.time() - self.last_connect_attempt > k_connect_retry_interval):
print(
f"GroupMonitor: Trying to connect to a member of cluster {self.cluster.namespace}/{self.cluster.name}")
self.last_connect_attempt = time.time()
self.session = None
self.connect_to_primary()
# force a refresh after we connect so we don't miss anything
# that happened while we were out
if self.session:
print(
f"GroupMonitor: Connect member of {self.cluster.namespace}/{self.cluster.name} OK {self.session}")
self.on_view_change(None)
else:
print(
f"GroupMonitor: Connect to member of {self.cluster.namespace}/{self.cluster.name} failed")
return self.session
def connect_to_primary(self) -> None:
while True:
session, is_primary = self.find_primary()
if not is_primary:
if session:
print(
f"GroupMonitor: Could not connect to PRIMARY of cluster {self.cluster.namespace}/{self.cluster.name}")
else:
print(
f"GroupMonitor: Could not connect to PRIMARY nor SECONDARY of cluster {self.cluster.namespace}/{self.cluster.name}")
if session:
try:
# extend number of seconds for the server to wait for a command to arrive to a full day
session.run_sql(
f"set session mysqlx_wait_timeout = {24*60*60}")
session._enable_notices(["GRViewChanged"])
co = shellutils.parse_uri(session.uri)
self.target = f"{co['host']}:{co['port']}"
self.target_not_primary = not is_primary
self.session = session
except mysqlsh.Error as e:
if mysql.ErrorCode.CR_MAX_ERROR >= e.code >= mysql.ErrorCode.CR_MIN_ERROR:
# Try again if the server we were connectd to is gone
continue
else:
raise
else:
self.session = None
break
def find_primary(self) -> Tuple[Optional['mysqlx.Session'], bool]:
not_primary = None
pods = self.cluster.get_pods()
# Try to find the PRIMARY the easy way
for pod in pods:
member_info = pod.get_membership_info()
if member_info and member_info.get("role") == "PRIMARY":
session = self.try_connect(pod)
if session:
s = shellutils.jump_to_primary(session, self.account)
if s:
if s != session:
session.close()
return s, True
else:
not_primary = session
# Try to connect to anyone and find the primary from there
for pod in pods:
session = self.try_connect(pod)
if session:
s = shellutils.jump_to_primary(session, self.account)
if s:
if s != session:
session.close()
return s, True
else:
not_primary = session
return not_primary, False
def try_connect(self, pod) -> Optional['mysqlx.Session']:
try:
session = mysqlx.get_session(pod.xendpoint_co)
except mysqlsh.Error as e:
print(f"GroupMonitor: Error connecting to {pod.xendpoint}: {e}")
return None
return session
def handle_notice(self) -> None:
while 1:
try:
# TODO hack to force unexpected async notice to be read, xsession should read packets itself
self.session.run_sql("select 1")
notice = self.session._fetch_notice()
if not notice:
break
print(f"GOT NOTICE {notice}")
self.on_view_change(notice.get("view_id"))
if not self.session:
break
except mysqlsh.Error as e:
print(
f"GroupMonitor: Error fetching notice: dest={self.target} error={e}")
self.session.close()
self.session = None
break
def on_view_change(self, view_id: Optional[str]) -> None:
members = shellutils.query_members(self.session)
self.handler(self.cluster, members, view_id != self.last_view_id)
self.last_view_id = view_id
primary = None
force_reconnect = False
for member_id, role, status, view_id, endpoint, version in members:
if self.last_primary_id == member_id and role != "PRIMARY":
force_reconnect = True
break
if role == "PRIMARY" and not primary:
primary = member_id
self.last_primary_id = primary
# force reconnection if the PRIMARY changed or we're not connected to the PRIMARY
if self.target_not_primary or force_reconnect:
print(
f"GroupMonitor: PRIMARY changed for {self.cluster.namespace}/{self.cluster.name}")
if self.session:
self.session.close()
self.session = None
# TODO change this to a per cluster kopf.daemon?
class GroupMonitor(threading.Thread):
def __init__(self):
super().__init__(daemon=True, name="group-monitor")
self.clusters = []
self.stopped = False
def monitor_cluster(self, cluster: InnoDBCluster,
handler: Callable[[InnoDBCluster, list, bool], None],
logger: Logger) -> None:
for c in self.clusters:
if c.name == cluster.name and c.namespace == cluster.namespace:
return
# We could get called here before the Secret is ready
account = RetryLoop(logger).call(cluster.get_admin_account)
target = MonitoredCluster(cluster, account, handler)
self.clusters.append(target)
print(f"Added monitor for {cluster.namespace}/{cluster.name}")
def remove_cluster(self, cluster: InnoDBCluster) -> None:
for c in self.clusters:
if c.name == cluster.name and c.namespace == cluster.namespace:
self.clusters.remove(c)
break
def run(self) -> None:
last_ping = time.time()
while not self.stopped:
session_fds_to_cluster = {}
for cluster in self.clusters:
cluster.ensure_connected()
if cluster.session:
session_fds_to_cluster[cluster.session._get_socket_fd()] = cluster
# wait for 1s at most so that newly added session don't wait much
# TODO replace poll_sessions() with something to get the session fd
# - do the poll loop in python
# - add a socket_pair() to allow interrupting the poll when a new
# cluster is added and increase the timeout
ready, _, _ = select.select(session_fds_to_cluster.keys(), [], [], 1000)
for fd in ready:
session_fds_to_cluster[fd].handle_notice()
def stop(self) -> None:
self.stopped = True
g_group_monitor = GroupMonitor()
| 37.344538
| 140
| 0.57212
|
from logging import Logger
from typing import Callable, Optional, TYPE_CHECKING, Tuple
from mysqloperator.controller.innodbcluster.cluster_api import InnoDBCluster
from mysqloperator.controller.shellutils import RetryLoop
from . import shellutils
import threading
import time
import select
import mysqlsh
mysql = mysqlsh.mysql
mysqlx = mysqlsh.mysqlx
k_connect_retry_interval = 10
class MonitoredCluster:
def __init__(self, cluster: InnoDBCluster,
account: Tuple[str, str],
handler: Callable[[InnoDBCluster, list, bool], None]):
self.cluster = cluster
self.account = account
self.session = None
self.target = None
self.target_not_primary = None
self.last_connect_attempt = 0
self.last_primary_id = None
self.last_view_id = None
self.handler = handler
@property
def name(self) -> str:
return self.cluster.name
@property
def namespace(self) -> str:
return self.cluster.namespace
def ensure_connected(self) -> Optional['mysqlx.Session']:
if not self.session and (not self.last_connect_attempt or time.time() - self.last_connect_attempt > k_connect_retry_interval):
print(
f"GroupMonitor: Trying to connect to a member of cluster {self.cluster.namespace}/{self.cluster.name}")
self.last_connect_attempt = time.time()
self.session = None
self.connect_to_primary()
# that happened while we were out
if self.session:
print(
f"GroupMonitor: Connect member of {self.cluster.namespace}/{self.cluster.name} OK {self.session}")
self.on_view_change(None)
else:
print(
f"GroupMonitor: Connect to member of {self.cluster.namespace}/{self.cluster.name} failed")
return self.session
def connect_to_primary(self) -> None:
while True:
session, is_primary = self.find_primary()
if not is_primary:
if session:
print(
f"GroupMonitor: Could not connect to PRIMARY of cluster {self.cluster.namespace}/{self.cluster.name}")
else:
print(
f"GroupMonitor: Could not connect to PRIMARY nor SECONDARY of cluster {self.cluster.namespace}/{self.cluster.name}")
if session:
try:
# extend number of seconds for the server to wait for a command to arrive to a full day
session.run_sql(
f"set session mysqlx_wait_timeout = {24*60*60}")
session._enable_notices(["GRViewChanged"])
co = shellutils.parse_uri(session.uri)
self.target = f"{co['host']}:{co['port']}"
self.target_not_primary = not is_primary
self.session = session
except mysqlsh.Error as e:
if mysql.ErrorCode.CR_MAX_ERROR >= e.code >= mysql.ErrorCode.CR_MIN_ERROR:
# Try again if the server we were connectd to is gone
continue
else:
raise
else:
self.session = None
break
def find_primary(self) -> Tuple[Optional['mysqlx.Session'], bool]:
not_primary = None
pods = self.cluster.get_pods()
# Try to find the PRIMARY the easy way
for pod in pods:
member_info = pod.get_membership_info()
if member_info and member_info.get("role") == "PRIMARY":
session = self.try_connect(pod)
if session:
s = shellutils.jump_to_primary(session, self.account)
if s:
if s != session:
session.close()
return s, True
else:
not_primary = session
# Try to connect to anyone and find the primary from there
for pod in pods:
session = self.try_connect(pod)
if session:
s = shellutils.jump_to_primary(session, self.account)
if s:
if s != session:
session.close()
return s, True
else:
not_primary = session
return not_primary, False
def try_connect(self, pod) -> Optional['mysqlx.Session']:
try:
session = mysqlx.get_session(pod.xendpoint_co)
except mysqlsh.Error as e:
print(f"GroupMonitor: Error connecting to {pod.xendpoint}: {e}")
return None
return session
def handle_notice(self) -> None:
while 1:
try:
# TODO hack to force unexpected async notice to be read, xsession should read packets itself
self.session.run_sql("select 1")
notice = self.session._fetch_notice()
if not notice:
break
print(f"GOT NOTICE {notice}")
self.on_view_change(notice.get("view_id"))
if not self.session:
break
except mysqlsh.Error as e:
print(
f"GroupMonitor: Error fetching notice: dest={self.target} error={e}")
self.session.close()
self.session = None
break
def on_view_change(self, view_id: Optional[str]) -> None:
members = shellutils.query_members(self.session)
self.handler(self.cluster, members, view_id != self.last_view_id)
self.last_view_id = view_id
primary = None
force_reconnect = False
for member_id, role, status, view_id, endpoint, version in members:
if self.last_primary_id == member_id and role != "PRIMARY":
force_reconnect = True
break
if role == "PRIMARY" and not primary:
primary = member_id
self.last_primary_id = primary
# force reconnection if the PRIMARY changed or we're not connected to the PRIMARY
if self.target_not_primary or force_reconnect:
print(
f"GroupMonitor: PRIMARY changed for {self.cluster.namespace}/{self.cluster.name}")
if self.session:
self.session.close()
self.session = None
class GroupMonitor(threading.Thread):
def __init__(self):
super().__init__(daemon=True, name="group-monitor")
self.clusters = []
self.stopped = False
def monitor_cluster(self, cluster: InnoDBCluster,
handler: Callable[[InnoDBCluster, list, bool], None],
logger: Logger) -> None:
for c in self.clusters:
if c.name == cluster.name and c.namespace == cluster.namespace:
return
account = RetryLoop(logger).call(cluster.get_admin_account)
target = MonitoredCluster(cluster, account, handler)
self.clusters.append(target)
print(f"Added monitor for {cluster.namespace}/{cluster.name}")
def remove_cluster(self, cluster: InnoDBCluster) -> None:
for c in self.clusters:
if c.name == cluster.name and c.namespace == cluster.namespace:
self.clusters.remove(c)
break
def run(self) -> None:
last_ping = time.time()
while not self.stopped:
session_fds_to_cluster = {}
for cluster in self.clusters:
cluster.ensure_connected()
if cluster.session:
session_fds_to_cluster[cluster.session._get_socket_fd()] = cluster
# TODO replace poll_sessions() with something to get the session fd
# - do the poll loop in python
# - add a socket_pair() to allow interrupting the poll when a new
# cluster is added and increase the timeout
ready, _, _ = select.select(session_fds_to_cluster.keys(), [], [], 1000)
for fd in ready:
session_fds_to_cluster[fd].handle_notice()
def stop(self) -> None:
self.stopped = True
g_group_monitor = GroupMonitor()
| true
| true
|
1c48da8dfc2b9932134f31843ace90d77afe7978
| 1,684
|
py
|
Python
|
main02_ceres_data.py
|
timothyfisherphd/CRISPR_Cancer_Chromatin_State_Activity
|
91cbd8519baaeccab404574d61e21dbf0ea1f26f
|
[
"MIT"
] | null | null | null |
main02_ceres_data.py
|
timothyfisherphd/CRISPR_Cancer_Chromatin_State_Activity
|
91cbd8519baaeccab404574d61e21dbf0ea1f26f
|
[
"MIT"
] | null | null | null |
main02_ceres_data.py
|
timothyfisherphd/CRISPR_Cancer_Chromatin_State_Activity
|
91cbd8519baaeccab404574d61e21dbf0ea1f26f
|
[
"MIT"
] | null | null | null |
## Generating Ceres Data
from collections import defaultdict
import pandas as pd
mainDicticionary=defaultdict(list)
stateDictionary=defaultdict(list)
countScoreDictionary=defaultdict(int)
sumScoreDictionary=defaultdict(int)
meanScoreDictionary=defaultdict(int)
n = 0
with open('/Users/timothyfisher/Desktop/Ernst_Lab/UNIX/Updated_Dataset/ceres.overlapsComparsionValues.tab.bed', 'r') as dictList:
for line in dictList:
chromosome, start, end, state, score, strand, signal, end2, color = line.strip().split()
score = float(score)
stateDictionary[state].append(score)
n += 1
with open('/Users/timothyfisher/Desktop/Ernst_Lab/UNIX/Updated_Dataset/ceres.overlapsComparsionValues.tab.bed', 'w') as outfile:
for state in stateDictionary:
countScoreDictionary[state] = len(stateDictionary[state])
sumScoreDictionary[state]= sum(stateDictionary[state])
meanScoreDictionary[state]= sumScoreDictionary[state]/countScoreDictionary[state]
mainDicticionary[state].append(stateDictionary)
mainDicticionary[state].append(countScoreDictionary)
mainDicticionary[state].append(sumScoreDictionary)
mainDicticionary[state].append(meanScoreDictionary)
outfile.write(state+','+str(meanScoreDictionary[state])+'\n')
print(countScoreDictionary.items())
import numpy as np
with open('ceres_std_errs.csv','w') as f:
for state, l in stateDictionary.items():
print('{}\t{}'.format(state,np.std(l)), file=f)
import numpy as np
with open('ceres_length.csv','w') as f:
for state in countScoreDictionary.items():
print('{}\t'.format(state), file=f)
| 37.422222
| 129
| 0.723278
|
from collections import defaultdict
import pandas as pd
mainDicticionary=defaultdict(list)
stateDictionary=defaultdict(list)
countScoreDictionary=defaultdict(int)
sumScoreDictionary=defaultdict(int)
meanScoreDictionary=defaultdict(int)
n = 0
with open('/Users/timothyfisher/Desktop/Ernst_Lab/UNIX/Updated_Dataset/ceres.overlapsComparsionValues.tab.bed', 'r') as dictList:
for line in dictList:
chromosome, start, end, state, score, strand, signal, end2, color = line.strip().split()
score = float(score)
stateDictionary[state].append(score)
n += 1
with open('/Users/timothyfisher/Desktop/Ernst_Lab/UNIX/Updated_Dataset/ceres.overlapsComparsionValues.tab.bed', 'w') as outfile:
for state in stateDictionary:
countScoreDictionary[state] = len(stateDictionary[state])
sumScoreDictionary[state]= sum(stateDictionary[state])
meanScoreDictionary[state]= sumScoreDictionary[state]/countScoreDictionary[state]
mainDicticionary[state].append(stateDictionary)
mainDicticionary[state].append(countScoreDictionary)
mainDicticionary[state].append(sumScoreDictionary)
mainDicticionary[state].append(meanScoreDictionary)
outfile.write(state+','+str(meanScoreDictionary[state])+'\n')
print(countScoreDictionary.items())
import numpy as np
with open('ceres_std_errs.csv','w') as f:
for state, l in stateDictionary.items():
print('{}\t{}'.format(state,np.std(l)), file=f)
import numpy as np
with open('ceres_length.csv','w') as f:
for state in countScoreDictionary.items():
print('{}\t'.format(state), file=f)
| true
| true
|
1c48db3dfcc306004b247938a6279a060b4dde3d
| 9,875
|
py
|
Python
|
eslearn/GUI/easylearn_main_run.py
|
dongmengshi/easylearn
|
df528aaa69c3cf61f5459a04671642eb49421dfb
|
[
"MIT"
] | null | null | null |
eslearn/GUI/easylearn_main_run.py
|
dongmengshi/easylearn
|
df528aaa69c3cf61f5459a04671642eb49421dfb
|
[
"MIT"
] | null | null | null |
eslearn/GUI/easylearn_main_run.py
|
dongmengshi/easylearn
|
df528aaa69c3cf61f5459a04671642eb49421dfb
|
[
"MIT"
] | 1
|
2021-01-11T08:21:35.000Z
|
2021-01-11T08:21:35.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Main GUI of the easylearn
# Author: Chao Li <lichao19870617@gmail.com>
# License: MIT
"""
import sys
import os
import json
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog
from PyQt5.QtGui import QIcon, QPixmap
from eslearn.stylesheets.PyQt5_stylesheets import PyQt5_stylesheets
from easylearn_main_gui import Ui_MainWindow
from easylearn_data_loading_run import EasylearnDataLoadingRun
class EasylearnMainGUI(QMainWindow, Ui_MainWindow):
"""This class is used to display the main GUI of the easylearn.
"""
def __init__(self):
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.working_directory = ""
self.textBrowser.setText("Hi~, I'm easylearn. I hope I can help you finish this project successfully\n")
# Set appearance
self.set_logo()
self.set_skin()
# Connecting to functions
self.select_working_directory.triggered.connect(self.select_workingdir_fun)
self.create_configuration_file.triggered.connect(self.initialize_configuration_fun)
self.choose_configuration_file.triggered.connect(self.load_configuration_fun)
self.data_loading.clicked.connect(self.data_loading_fun)
self.feature_engineering.clicked.connect(self.feature_engineering_fun)
self.machine_learning.clicked.connect(self.machine_learning_fun)
self.model_evaluation.clicked.connect(self.model_evaluation_fun)
self.statistical_analysis.clicked.connect(self.statistical_analysis_fun)
self.run.clicked.connect(self.run_fun)
self.quit.clicked.connect(self.closeEvent_button)
# Skins
self.skins = {"Dark": "style_Dark", "Black": "style_black", "DarkOrange": "style_DarkOrange",
"Gray": "style_gray", "Blue": "style_blue", "Navy": "style_navy", "Classic": "style_Classic"}
self.actionDark.triggered.connect(self.set_skin)
self.actionBlack.triggered.connect(self.set_skin)
self.actionDarkOrange.triggered.connect(self.set_skin)
self.actionGray.triggered.connect(self.set_skin)
self.actionBlue.triggered.connect(self.set_skin)
self.actionNavy.triggered.connect(self.set_skin)
self.actionClassic.triggered.connect(self.set_skin)
def set_logo(self):
qss_logo = """#logo{background-color: black;
border: 2px solid white;
border-radius: 20px;
border-image: url('../logo/logo-lower.jpg');
}
#logo:hover {border-radius: 0px;}
"""
self.logo.setStyleSheet(qss_logo)
self.setWindowTitle('easylearn')
self.setWindowIcon(QIcon('../logo/logo-upper.jpg'))
# Run Icon
self.run.setIcon(QIcon("../logo/run.png"));
self.run.setIconSize(QPixmap("../logo/run.png").size());
self.run.resize(QPixmap("../logo/run.png").size());
# Close Icon
self.quit.setIcon(QIcon("../logo/close.png"));
self.quit.setIconSize(QPixmap("../logo/close.png").size());
self.quit.resize(QPixmap("../logo/close.png").size());
def set_skin(self):
"""Set a appearance for easylearn (skin, etc).
"""
sender = self.sender()
if sender:
if (sender.text() in list(self.skins.keys())):
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style=self.skins[sender.text()]))
if sender.text() == "Classic":
self.setStyleSheet("")
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
def select_workingdir_fun(self):
"""
This function is used to select the working working_directory, then change directory to this directory.
"""
# If has selected working working_directory previously, then I set it as initial working working_directory.
if self.working_directory == "":
self.working_directory = QFileDialog.getExistingDirectory(self, "Select a working_directory", os.getcwd())
self.textBrowser.setText("Current working directory is " + self.working_directory + "\n")
else:
self.working_directory = QFileDialog.getExistingDirectory(self, "Select a working_directory", self.working_directory)
self.textBrowser.setText("Current working directory is " + self.working_directory + "\n")
# If already choose a working directory, change directory to the working directory
if self.working_directory != "":
os.chdir(self.working_directory)
def initialize_configuration_fun(self):
"""Create file to save settings
This function will add the configuration_file to self
"""
if self.working_directory != "":
configuration_file_name, ok = QInputDialog.getText(self, "Initialize configuration", "Please name the configuration file:", QLineEdit.Normal, "configuration_file.json")
self.configuration_file = os.path.join(self.working_directory, configuration_file_name)
with open(self.configuration_file, 'w') as configuration_file:
config = {"data_loading": {}, "feature_engineering": {}, "machine_learning": {}, "model_evaluation": {}, "statistical_analysis": {}}
config = json.dumps(config)
configuration_file.write(config)
config_message = "Configuration file is " + self.configuration_file
self.textBrowser.setText(config_message)
else:
QMessageBox.warning( self, 'Warning', f'Please choose a working directory first! (press button at the top left corner)')
def load_configuration_fun(self):
"""Load configuration
"""
self.configuration_file, filetype = QFileDialog.getOpenFileName(self,
"Select configuration file",
os.getcwd(), "Text Files (*.json);;All Files (*);;")
# Read configuration_file if already selected
if self.configuration_file != "":
# TODO: 解决中文编码的问题
with open(self.configuration_file, 'r') as config:
self.configuration = config.read()
# Check the configuration is valid JSON, then transform the configuration to dict
# If the configuration is not valid JSON, then give configuration and configuration_file to ""
try:
self.configuration = json.loads(self.configuration)
self.textBrowser.setText("Configuration file is " + self.configuration_file)
except json.decoder.JSONDecodeError:
QMessageBox.warning( self, 'Warning', f'{self.configuration_file} is not valid JSON')
self.configuration_file = ""
else:
QMessageBox.warning( self, 'Warning', 'Configuration file was not selected')
def data_loading_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('data_loading_fun')
self.data_loading = EasylearnDataLoadingRun(self.working_directory)
self.data_loading.show()
def feature_engineering_fun(self):
"""This function is called when feature_engineering button is clicked.
Then, this function will process the feature_engineering.
"""
print('feature_engineering_fun')
def machine_learning_fun(self):
"""This function is called when machine_learning button is clicked.
Then, this function will process the data loading.
"""
print('machine_learning_fun')
def model_evaluation_fun(self):
"""This function is called when model_evaluation button is clicked.
Then, this function will process the model evaluation.
"""
print('model_evaluation_fun')
def statistical_analysis_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('statistical_analysis_fun')
def save_workflow_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('save_workflow_fun')
def run_fun(self):
"""This function is called when data_loading button is clicked.
Then, this function will process the data loading.
"""
print('run_fun')
def closeEvent(self, event):
"""This function is called when exit icon of the window is clicked.
This function make sure the program quit safely.
"""
# Set qss to make sure the QMessageBox can be seen
reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def closeEvent_button(self, event):
"""This function is called when quit button is clicked.
This function make sure the program quit safely.
"""
# Set qss to make sure the QMessageBox can be seen
reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
QCoreApplication.quit()
if __name__=='__main__':
app=QApplication(sys.argv)
md=EasylearnMainGUI()
md.show()
sys.exit(app.exec_())
| 41.317992
| 180
| 0.650937
|
import sys
import os
import json
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog
from PyQt5.QtGui import QIcon, QPixmap
from eslearn.stylesheets.PyQt5_stylesheets import PyQt5_stylesheets
from easylearn_main_gui import Ui_MainWindow
from easylearn_data_loading_run import EasylearnDataLoadingRun
class EasylearnMainGUI(QMainWindow, Ui_MainWindow):
def __init__(self):
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.working_directory = ""
self.textBrowser.setText("Hi~, I'm easylearn. I hope I can help you finish this project successfully\n")
# Set appearance
self.set_logo()
self.set_skin()
# Connecting to functions
self.select_working_directory.triggered.connect(self.select_workingdir_fun)
self.create_configuration_file.triggered.connect(self.initialize_configuration_fun)
self.choose_configuration_file.triggered.connect(self.load_configuration_fun)
self.data_loading.clicked.connect(self.data_loading_fun)
self.feature_engineering.clicked.connect(self.feature_engineering_fun)
self.machine_learning.clicked.connect(self.machine_learning_fun)
self.model_evaluation.clicked.connect(self.model_evaluation_fun)
self.statistical_analysis.clicked.connect(self.statistical_analysis_fun)
self.run.clicked.connect(self.run_fun)
self.quit.clicked.connect(self.closeEvent_button)
# Skins
self.skins = {"Dark": "style_Dark", "Black": "style_black", "DarkOrange": "style_DarkOrange",
"Gray": "style_gray", "Blue": "style_blue", "Navy": "style_navy", "Classic": "style_Classic"}
self.actionDark.triggered.connect(self.set_skin)
self.actionBlack.triggered.connect(self.set_skin)
self.actionDarkOrange.triggered.connect(self.set_skin)
self.actionGray.triggered.connect(self.set_skin)
self.actionBlue.triggered.connect(self.set_skin)
self.actionNavy.triggered.connect(self.set_skin)
self.actionClassic.triggered.connect(self.set_skin)
def set_logo(self):
qss_logo = """#logo{background-color: black;
border: 2px solid white;
border-radius: 20px;
border-image: url('../logo/logo-lower.jpg');
}
#logo:hover {border-radius: 0px;}
"""
self.logo.setStyleSheet(qss_logo)
self.setWindowTitle('easylearn')
self.setWindowIcon(QIcon('../logo/logo-upper.jpg'))
# Run Icon
self.run.setIcon(QIcon("../logo/run.png"));
self.run.setIconSize(QPixmap("../logo/run.png").size());
self.run.resize(QPixmap("../logo/run.png").size());
# Close Icon
self.quit.setIcon(QIcon("../logo/close.png"));
self.quit.setIconSize(QPixmap("../logo/close.png").size());
self.quit.resize(QPixmap("../logo/close.png").size());
def set_skin(self):
sender = self.sender()
if sender:
if (sender.text() in list(self.skins.keys())):
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style=self.skins[sender.text()]))
if sender.text() == "Classic":
self.setStyleSheet("")
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
else:
self.setStyleSheet(PyQt5_stylesheets.load_stylesheet_pyqt5(style="style_Dark"))
def select_workingdir_fun(self):
# If has selected working working_directory previously, then I set it as initial working working_directory.
if self.working_directory == "":
self.working_directory = QFileDialog.getExistingDirectory(self, "Select a working_directory", os.getcwd())
self.textBrowser.setText("Current working directory is " + self.working_directory + "\n")
else:
self.working_directory = QFileDialog.getExistingDirectory(self, "Select a working_directory", self.working_directory)
self.textBrowser.setText("Current working directory is " + self.working_directory + "\n")
# If already choose a working directory, change directory to the working directory
if self.working_directory != "":
os.chdir(self.working_directory)
def initialize_configuration_fun(self):
if self.working_directory != "":
configuration_file_name, ok = QInputDialog.getText(self, "Initialize configuration", "Please name the configuration file:", QLineEdit.Normal, "configuration_file.json")
self.configuration_file = os.path.join(self.working_directory, configuration_file_name)
with open(self.configuration_file, 'w') as configuration_file:
config = {"data_loading": {}, "feature_engineering": {}, "machine_learning": {}, "model_evaluation": {}, "statistical_analysis": {}}
config = json.dumps(config)
configuration_file.write(config)
config_message = "Configuration file is " + self.configuration_file
self.textBrowser.setText(config_message)
else:
QMessageBox.warning( self, 'Warning', f'Please choose a working directory first! (press button at the top left corner)')
def load_configuration_fun(self):
self.configuration_file, filetype = QFileDialog.getOpenFileName(self,
"Select configuration file",
os.getcwd(), "Text Files (*.json);;All Files (*);;")
# Read configuration_file if already selected
if self.configuration_file != "":
# TODO: 解决中文编码的问题
with open(self.configuration_file, 'r') as config:
self.configuration = config.read()
# Check the configuration is valid JSON, then transform the configuration to dict
# If the configuration is not valid JSON, then give configuration and configuration_file to ""
try:
self.configuration = json.loads(self.configuration)
self.textBrowser.setText("Configuration file is " + self.configuration_file)
except json.decoder.JSONDecodeError:
QMessageBox.warning( self, 'Warning', f'{self.configuration_file} is not valid JSON')
self.configuration_file = ""
else:
QMessageBox.warning( self, 'Warning', 'Configuration file was not selected')
def data_loading_fun(self):
print('data_loading_fun')
self.data_loading = EasylearnDataLoadingRun(self.working_directory)
self.data_loading.show()
def feature_engineering_fun(self):
print('feature_engineering_fun')
def machine_learning_fun(self):
print('machine_learning_fun')
def model_evaluation_fun(self):
print('model_evaluation_fun')
def statistical_analysis_fun(self):
print('statistical_analysis_fun')
def save_workflow_fun(self):
print('save_workflow_fun')
def run_fun(self):
print('run_fun')
def closeEvent(self, event):
# Set qss to make sure the QMessageBox can be seen
reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def closeEvent_button(self, event):
# Set qss to make sure the QMessageBox can be seen
reply = QMessageBox.question(self, 'Quit',"Are you sure to quit?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
QCoreApplication.quit()
if __name__=='__main__':
app=QApplication(sys.argv)
md=EasylearnMainGUI()
md.show()
sys.exit(app.exec_())
| true
| true
|
1c48dbbbe0ab9bd7f9a2531556bee427f7b0a2e4
| 40,781
|
py
|
Python
|
uamqp/message.py
|
123Jun321/azure-uamqp-python
|
b67e4fcaf2e8a337636947523570239c10a58ae2
|
[
"MIT"
] | 1
|
2021-07-07T06:30:36.000Z
|
2021-07-07T06:30:36.000Z
|
uamqp/message.py
|
123Jun321/azure-uamqp-python
|
b67e4fcaf2e8a337636947523570239c10a58ae2
|
[
"MIT"
] | null | null | null |
uamqp/message.py
|
123Jun321/azure-uamqp-python
|
b67e4fcaf2e8a337636947523570239c10a58ae2
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# pylint: disable=too-many-lines
import logging
import six
from uamqp import c_uamqp, constants, errors, utils
_logger = logging.getLogger(__name__)
class Message(object):
"""An AMQP message.
When sending, depending on the nature of the data,
different body encoding will be used. If the data is str or bytes,
a single part DataBody will be sent. If the data is a list of str/bytes,
a multipart DataBody will be sent. Any other type of list or any other
type of data will be sent as a ValueBody.
An empty payload will also be sent as a ValueBody.
:ivar on_send_complete: A custom callback to be run on completion of
the send operation of this message. The callback must take two parameters,
a result (of type `MessageSendResult`) and an error (of type
Exception). The error parameter may be None if no error ocurred or the error
information was undetermined.
:vartype on_send_complete: callable[~uamqp.constants.MessageSendResult, Exception]
:param body: The data to send in the message.
:type body: Any Python data type.
:param properties: Properties to add to the message.
:type properties: ~uamqp.message.MessageProperties
:param application_properties: Service specific application properties.
:type application_properties: dict
:param annotations: Service specific message annotations. Keys in the dictionary
must be `types.AMQPSymbol` or `types.AMQPuLong`.
:type annotations: dict
:param header: The message header.
:type header: ~uamqp.message.MessageHeader
:param msg_format: A custom message format. Default is 0.
:type msg_format: int
:param message: Internal only. This is used to wrap an existing message
that has been received from an AMQP service. If specified, all other
parameters will be ignored.
:type message: uamqp.c_uamqp.cMessage
:param settler: Internal only. This is used when wrapping an existing message
that has been received from an AMQP service. Should only be specified together
with `message` and is to settle the message.
:type settler: callable[~uamqp.errors.MessageResponse]
:param delivery_no: Internal only. This is used when wrapping an existing message
that has been received from an AMQP service. Should only be specified together
with `message` and specifies the messages client delivery number.
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(self,
body=None,
properties=None,
application_properties=None,
annotations=None,
header=None,
msg_format=None,
message=None,
settler=None,
delivery_no=None,
encoding='UTF-8'):
self.state = constants.MessageState.WaitingToBeSent
self.idle_time = 0
self.retries = 0
self._response = None
self._settler = None
self._encoding = encoding
self.delivery_no = delivery_no
self.on_send_complete = None
self.properties = None
self.application_properties = None
self.annotations = None
self.header = None
self.footer = None
self.delivery_annotations = None
if message:
if settler:
self.state = constants.MessageState.ReceivedUnsettled
self._response = None
else:
self.state = constants.MessageState.ReceivedSettled
self._response = errors.MessageAlreadySettled()
self._settler = settler
self._parse_message(message)
else:
self._message = c_uamqp.create_message()
if isinstance(body, (six.text_type, six.binary_type)):
self._body = DataBody(self._message)
self._body.append(body)
elif isinstance(body, list) and all([isinstance(b, (six.text_type, six.binary_type)) for b in body]):
self._body = DataBody(self._message)
for value in body:
self._body.append(value)
else:
self._body = ValueBody(self._message)
self._body.set(body)
if msg_format:
self._message.message_format = msg_format
self.properties = properties
self.application_properties = application_properties
self.annotations = annotations
self.header = header
@classmethod
def decode_from_bytes(cls, data):
"""Decode an AMQP message from a bytearray.
The returned message will not have a delivery context and
therefore will be considered to be in an "already settled" state.
:param data: The AMQP wire-encoded bytes to decode.
:type data: bytes or bytearray
"""
decoded_message = c_uamqp.decode_message(len(data), data)
return cls(message=decoded_message)
def __str__(self):
if not self._message:
return ""
return str(self._body)
def _parse_message(self, message):
"""Parse a message received from an AMQP service.
:param message: The received C message.
:type message: uamqp.c_uamqp.cMessage
"""
_logger.debug("Parsing received message %r.", self.delivery_no)
self._message = message
body_type = message.body_type
if body_type == c_uamqp.MessageBodyType.NoneType:
self._body = None
elif body_type == c_uamqp.MessageBodyType.DataType:
self._body = DataBody(self._message)
elif body_type == c_uamqp.MessageBodyType.SequenceType:
raise TypeError("Message body type Sequence not supported.")
else:
self._body = ValueBody(self._message)
_props = self._message.properties
if _props:
_logger.debug("Parsing received message properties %r.", self.delivery_no)
self.properties = MessageProperties(properties=_props, encoding=self._encoding)
_header = self._message.header
if _header:
_logger.debug("Parsing received message header %r.", self.delivery_no)
self.header = MessageHeader(header=_header)
_footer = self._message.footer
if _footer:
_logger.debug("Parsing received message footer %r.", self.delivery_no)
self.footer = _footer.map
_app_props = self._message.application_properties
if _app_props:
_logger.debug("Parsing received message application properties %r.", self.delivery_no)
self.application_properties = _app_props.map
_ann = self._message.message_annotations
if _ann:
_logger.debug("Parsing received message annotations %r.", self.delivery_no)
self.annotations = _ann.map
_delivery_ann = self._message.delivery_annotations
if _delivery_ann:
_logger.debug("Parsing received message delivery annotations %r.", self.delivery_no)
self.delivery_annotations = _delivery_ann.map
def _can_settle_message(self):
if self.state not in constants.RECEIVE_STATES:
raise TypeError("Only received messages can be settled.")
if self.settled:
return False
return True
def _populate_message_attributes(self, c_message):
if self.properties:
c_message.properties = self.properties.get_properties_obj()
if self.application_properties:
if not isinstance(self.application_properties, dict):
raise TypeError("Application properties must be a dictionary.")
amqp_props = utils.data_factory(self.application_properties, encoding=self._encoding)
c_message.application_properties = amqp_props
if self.annotations:
if not isinstance(self.annotations, dict):
raise TypeError("Message annotations must be a dictionary.")
ann_props = c_uamqp.create_message_annotations(
utils.data_factory(self.annotations, encoding=self._encoding))
c_message.message_annotations = ann_props
if self.header:
c_message.header = self.header.get_header_obj()
@property
def settled(self):
"""Whether the message transaction for this message has been completed.
If this message is to be sent, the message will be `settled=True` once a
disposition has been received from the service.
If this message has been received, the message will be `settled=True` once
a disposition has been sent to the service.
:rtype: bool
"""
if self._response:
return True
return False
def get_message_encoded_size(self):
"""Pre-emptively get the size of the message once it has been encoded
to go over the wire so we can raise an error if the message will be
rejected for being to large.
This method is not available for messages that have been received.
:rtype: int
"""
if not self._message:
raise ValueError("No message data to encode.")
cloned_data = self._message.clone()
self._populate_message_attributes(cloned_data)
encoded_data = []
return c_uamqp.get_encoded_message_size(cloned_data, encoded_data)
def encode_message(self):
"""Encode message to AMQP wire-encoded bytearray.
:rtype: bytearray
"""
if not self._message:
raise ValueError("No message data to encode.")
cloned_data = self._message.clone()
self._populate_message_attributes(cloned_data)
encoded_data = []
c_uamqp.get_encoded_message_size(cloned_data, encoded_data)
return b"".join(encoded_data)
def get_data(self):
"""Get the body data of the message. The format may vary depending
on the body type.
:rtype: generator
"""
if not self._message or not self._body:
return None
return self._body.data
def gather(self):
"""Return all the messages represented by this object.
This will always be a list of a single message.
:rtype: list[~uamqp.message.Message]
"""
if self.state in constants.RECEIVE_STATES:
raise TypeError("Only new messages can be gathered.")
if not self._message:
raise ValueError("Message data already consumed.")
try:
raise self._response
except TypeError:
pass
return [self]
def get_message(self):
"""Get the underlying C message from this object.
:rtype: uamqp.c_uamqp.cMessage
"""
if not self._message:
return None
self._populate_message_attributes(self._message)
return self._message
def accept(self):
"""Send a response disposition to the service to indicate that
a received message has been accepted. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. Returns `True` is message was accepted, or `False` if the message
was already settled.
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageAccepted()
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
def reject(self, condition=None, description=None):
"""Send a response disposition to the service to indicate that
a received message has been rejected. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. A rejected message will increment the messages delivery count.
Returns `True` is message was rejected, or `False` if the message
was already settled.
:param condition: The AMQP rejection code. By default this is `amqp:internal-error`.
:type condition: bytes or str
:param description: A description/reason to accompany the rejection.
:type description: bytes or str
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageRejected(
condition=condition,
description=description,
encoding=self._encoding)
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
def release(self):
"""Send a response disposition to the service to indicate that
a received message has been released. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. A released message will not incremenet the messages
delivery count. Returns `True` is message was released, or `False` if the message
was already settled.
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageReleased()
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
def modify(self, failed, deliverable, annotations=None):
"""Send a response disposition to the service to indicate that
a received message has been modified. If the client is running in PeekLock
mode, the service will wait on this disposition. Otherwise it will
be ignored. Returns `True` is message was modified, or `False` if the message
was already settled.
:param failed: Whether this delivery of this message failed. This does not
indicate whether subsequence deliveries of this message would also fail.
:type failed: bool
:param deliverable: Whether this message will be deliverable to this client
on subsequent deliveries - i.e. whether delivery is retryable.
:type deliverable: bool
:param annotations: Annotations to attach to response.
:type annotations: dict
:rtype: bool
:raises: TypeError if the message is being sent rather than received.
"""
if self._can_settle_message():
self._response = errors.MessageModified(
failed,
deliverable,
annotations=annotations,
encoding=self._encoding)
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
class BatchMessage(Message):
"""A Batched AMQP message.
This batch message encodes multiple message bodies into a single message
to increase through-put over the wire. It requires server-side support
to unpackage the batched messages and so will not be universally supported.
:ivar on_send_complete: A custom callback to be run on completion of
the send operation of this message. The callback must take two parameters,
a result (of type ~uamqp.constants.MessageSendResult) and an error (of type
Exception). The error parameter may be None if no error ocurred or the error
information was undetermined.
:vartype on_send_complete: callable[~uamqp.constants.MessageSendResult, Exception]
:ivar batch_format: The is the specific message format to inform the service the
the body should be interpreted as multiple messages. The value is 0x80013700.
:vartype batch_format: int
:ivar max_message_length: The maximum data size in bytes to allow in a single message.
By default this is 256kb. If sending a single batch message, an error will be raised
if the supplied data exceeds this maximum. If sending multiple batch messages, this
value will be used to divide the supplied data between messages.
:vartype max_message_length: int
:param data: An iterable source of data, where each value will be considered the
body of a single message in the batch.
:type data: iterable
:param properties: Properties to add to the message. If multiple messages are created
these properties will be applied to each message.
:type properties: ~uamqp.message.MessageProperties
:param application_properties: Service specific application properties. If multiple messages
are created these properties will be applied to each message.
:type application_properties: dict
:param annotations: Service specific message annotations. If multiple messages are created
these properties will be applied to each message. Keys in the dictionary
must be `types.AMQPSymbol` or `types.AMQPuLong`.
:type annotations: dict
:param header: The message header. This header will be applied to each message in the batch.
:type header: ~uamqp.message.MessageHeader
:param multi_messages: Whether to send the supplied data across multiple messages. If set to
`False`, all the data will be sent in a single message, and an error raised if the message
is too large. If set to `True`, the data will automatically be divided across multiple messages
of an appropriate size. The default is `False`.
:type multi_messages: bool
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
:raises: ValueError if data is sent in a single message and that message exceeds the max size.
"""
batch_format = 0x80013700
max_message_length = constants.MAX_MESSAGE_LENGTH_BYTES
size_offset = 0
def __init__(self,
data=None,
properties=None,
application_properties=None,
annotations=None,
header=None,
multi_messages=False,
encoding='UTF-8'):
# pylint: disable=super-init-not-called
self._multi_messages = multi_messages
self._body_gen = data
self._encoding = encoding
self.on_send_complete = None
self.properties = properties
self.application_properties = application_properties
self.annotations = annotations
self.header = header
def _create_batch_message(self):
"""Create a ~uamqp.message.Message for a value supplied by the data
generator. Applies all properties and annotations to the message.
:rtype: ~uamqp.message.Message
"""
return Message(body=[],
properties=self.properties,
annotations=self.annotations,
msg_format=self.batch_format,
header=self.header,
encoding=self._encoding)
def _multi_message_generator(self):
"""Generate multiple ~uamqp.message.Message objects from a single data
stream that in total may exceed the maximum individual message size.
Data will be continuously added to a single message until that message
reaches a max allowable size, at which point it will be yielded and
a new message will be started.
:rtype: generator[~uamqp.message.Message]
"""
unappended_message_bytes = None
while True:
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
if unappended_message_bytes:
new_message._body.append(unappended_message_bytes) # pylint: disable=protected-access
body_size += len(unappended_message_bytes)
try:
for data in self._body_gen:
message_bytes = None
try:
if not data.application_properties: # Message-like object
data.application_properties = self.application_properties
message_bytes = data.encode_message()
except AttributeError: # raw data
wrap_message = Message(body=data, application_properties=self.application_properties)
message_bytes = wrap_message.encode_message()
body_size += len(message_bytes)
if (body_size + message_size) > self.max_message_length:
new_message.on_send_complete = self.on_send_complete
unappended_message_bytes = message_bytes
yield new_message
raise StopIteration()
new_message._body.append(message_bytes) # pylint: disable=protected-access
except StopIteration:
_logger.debug("Sent partial message.")
continue
else:
new_message.on_send_complete = self.on_send_complete
yield new_message
_logger.debug("Sent all batched data.")
break
def gather(self):
"""Return all the messages represented by this object. This will convert
the batch data into individual Message objects, which may be one
or more if multi_messages is set to `True`.
:rtype: list[~uamqp.message.Message]
"""
if self._multi_messages:
return self._multi_message_generator()
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
for data in self._body_gen:
message_bytes = None
try:
if not data.application_properties: # Message-like object
data.application_properties = self.application_properties
message_bytes = data.encode_message()
except AttributeError: # raw data
wrap_message = Message(body=data, application_properties=self.application_properties)
message_bytes = wrap_message.encode_message()
body_size += len(message_bytes)
if (body_size + message_size) > self.max_message_length:
raise ValueError(
"Data set too large for a single message."
"Set multi_messages to True to split data across multiple messages.")
new_message._body.append(message_bytes) # pylint: disable=protected-access
new_message.on_send_complete = self.on_send_complete
return [new_message]
class MessageProperties(object):
"""Message properties.
The properties that are actually used will depend on the service implementation.
Not all received messages will have all properties, and not all properties
will be utilized on a sent message.
:ivar message_id: Message-id, if set, uniquely identifies a message within the message system.
The message producer is usually responsible for setting the message-id in such a way that it
is assured to be globally unique. A broker MAY discard a message as a duplicate if the value
of the message-id matches that of a previously received message sent to the same node.
:vartype message_id: str or bytes or uuid.UUID or ~uamqp.types.AMQPType
:ivar user_id: The identity of the user responsible for producing the message. The client sets
this value, and it MAY be authenticated by intermediaries.
:vartype user_id: str or bytes
:ivar to: The to field identifies the node that is the intended destination of the message.
On any given transfer this might not be the node at the receiving end of the link.
:vartype to: str or bytes
:ivar subject:
:vartype subject:
:ivar reply_to:
:vartype reply_to:
:ivar correlation_id:
:vartype correlation_id:
:ivar content_type:
:vartype content_type:
:ivar content_encoding:
:vartype content_encoding:
:ivar absolute_expiry_time:
:vartype absolute_expiry_time:
:ivar creation_time:
:vartype creation_time:
:ivar group_id:
:vartype group_id:
:ivar group_sequence:
:vartype group_sequence:
:ivar reply_to_group_id:
:vartype reply_to_group_id:
"""
def __init__(self,
message_id=None,
user_id=None,
to=None,
subject=None,
reply_to=None,
correlation_id=None,
content_type=None,
content_encoding=None,
absolute_expiry_time=None,
creation_time=None,
group_id=None,
group_sequence=None,
reply_to_group_id=None,
properties=None,
encoding='UTF-8'):
self._encoding = encoding
if properties:
self._message_id = properties.message_id
self._user_id = properties.user_id
self._to = properties.to
self._subject = properties.subject
self._reply_to = properties.reply_to
self._correlation_id = properties.correlation_id
self._content_type = properties.content_type
self._content_encoding = properties.content_encoding
self._absolute_expiry_time = properties.absolute_expiry_time
self._creation_time = properties.creation_time
self._group_id = properties.group_id
self._group_sequence = properties.group_sequence
self._reply_to_group_id = properties.reply_to_group_id
else:
self.message_id = message_id
self.user_id = user_id
self.to = to
self.subject = subject
self.reply_to = reply_to
self.correlation_id = correlation_id
self.content_type = content_type
self.content_encoding = content_encoding
self.absolute_expiry_time = absolute_expiry_time
self.creation_time = creation_time
self.group_id = group_id
self.group_sequence = group_sequence
self.reply_to_group_id = reply_to_group_id
def __str__(self):
return str({
'message_id': self.message_id,
'user_id': self.user_id,
'to': self.to,
'subject': self.subject,
'reply_to': self.reply_to,
'correlation_id': self.correlation_id,
'content_type': self.content_type,
'content_encoding': self.content_encoding,
'absolute_expiry_time': self.absolute_expiry_time,
'creation_time': self.creation_time,
'group_id': self.group_id,
'group_sequence': self.group_sequence,
'reply_to_group_id': self.reply_to_group_id
})
@property
def message_id(self):
if self._message_id:
return self._message_id.value
return None
@message_id.setter
def message_id(self, value):
if value is None:
self._message_id = None
else:
self._message_id = utils.data_factory(value, encoding=self._encoding)
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("user_id must be bytes or str.")
self._user_id = value
@property
def to(self):
if self._to:
return self._to.value
return None
@to.setter
def to(self, value):
if value is None:
self._to = None
else:
self._to = utils.data_factory(value, encoding=self._encoding)
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("subject must be bytes or str.")
self._subject = value
@property
def reply_to(self):
if self._reply_to is not None:
return self._reply_to.value
return None
@reply_to.setter
def reply_to(self, value):
if value is None:
self._reply_to = None
else:
self._reply_to = utils.data_factory(value, encoding=self._encoding)
@property
def correlation_id(self):
if self._correlation_id is not None:
return self._correlation_id.value
return None
@correlation_id.setter
def correlation_id(self, value):
if value is None:
self._correlation_id = None
else:
self._correlation_id = utils.data_factory(value, encoding=self._encoding)
@property
def content_type(self):
return self._content_type
@content_type.setter
def content_type(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("content_type must be bytes or str.")
self._content_type = value
@property
def content_encoding(self):
return self._content_encoding
@content_encoding.setter
def content_encoding(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("content_encoding must be bytes or str.")
self._content_encoding = value
@property
def absolute_expiry_time(self):
return self._absolute_expiry_time
@absolute_expiry_time.setter
def absolute_expiry_time(self, value):
if value is not None and not isinstance(value, int):
raise TypeError("absolute_expiry_time must be an integer.")
self._absolute_expiry_time = value
@property
def creation_time(self):
return self._creation_time
@creation_time.setter
def creation_time(self, value):
if value is not None and not isinstance(value, int):
raise TypeError("creation_time must be an integer.")
self._creation_time = value
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("group_id must be bytes or str.")
self._group_id = value
@property
def group_sequence(self):
return self._group_sequence
@group_sequence.setter
def group_sequence(self, value):
if value is not None and not isinstance(value, int):
raise TypeError("group_sequence must be an integer.")
self._group_sequence = value
@property
def reply_to_group_id(self):
return self._reply_to_group_id
@reply_to_group_id.setter
def reply_to_group_id(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("reply_to_group_id must be bytes or str.")
self._reply_to_group_id = value
def _set_attr(self, attr, properties):
attr_value = getattr(self, "_" + attr)
if attr_value is not None:
setattr(properties, attr, attr_value)
def get_properties_obj(self):
"""Get the underlying C reference from this object.
:rtype: uamqp.c_uamqp.cProperties
"""
properties = c_uamqp.cProperties()
self._set_attr('message_id', properties)
self._set_attr('user_id', properties)
self._set_attr('to', properties)
self._set_attr('subject', properties)
self._set_attr('reply_to', properties)
self._set_attr('correlation_id', properties)
self._set_attr('content_type', properties)
self._set_attr('content_encoding', properties)
self._set_attr('absolute_expiry_time', properties)
self._set_attr('creation_time', properties)
self._set_attr('group_id', properties)
self._set_attr('group_sequence', properties)
self._set_attr('reply_to_group_id', properties)
return properties
class MessageBody(object):
"""Base class for an AMQP message body. This should
not be used directly.
"""
def __init__(self, c_message, encoding='UTF-8'):
self._message = c_message
self._encoding = encoding
@property
def type(self):
return self._message.body_type
@property
def data(self):
raise NotImplementedError("Only MessageBody subclasses have data.")
class DataBody(MessageBody):
"""An AMQP message body of type Data. This represents
a list of bytes sections.
:ivar type: The body type. This should always be `DataType`.
:vartype type: uamqp.c_uamqp.MessageBodyType
:ivar data: The data contained in the message body. This returns
a generator to iterate over each section in the body, where
each section will be a byte string.
:vartype data: Generator[bytes]
"""
def __str__(self):
if six.PY3:
return "".join(d.decode(self._encoding) for d in self.data)
return "".join(self.data)
def __unicode__(self):
return u"".join(d.decode(self._encoding) for d in self.data)
def __bytes__(self):
return b"".join(self.data)
def __len__(self):
return self._message.count_body_data()
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Index is out of range.")
data = self._message.get_body_data(index)
return data.value
def append(self, data):
"""Append a section to the body.
:param data: The data to append.
:type data: str or bytes
"""
if isinstance(data, six.text_type):
self._message.add_body_data(data.encode(self._encoding))
elif isinstance(data, six.binary_type):
self._message.add_body_data(data)
@property
def data(self):
for i in range(len(self)):
yield self._message.get_body_data(i)
class ValueBody(MessageBody):
"""An AMQP message body of type Value. This represents
a single encoded object.
:ivar type: The body type. This should always be ValueType
:vartype type: uamqp.c_uamqp.MessageBodyType
:ivar data: The data contained in the message body. The value
of the encoded object
:vartype data: object
"""
def __str__(self):
data = self.data
if not data:
return ""
if six.PY3 and isinstance(data, six.binary_type):
return data.decode(self._encoding)
return str(data)
def __unicode__(self):
data = self.data
if not data:
return u""
if isinstance(data, six.binary_type):
return data.decode(self._encoding)
return unicode(data) # pylint: disable=undefined-variable
def __bytes__(self):
data = self.data
if not data:
return b""
return bytes(data)
def set(self, value):
"""Set a value as the message body. This can be any
Python data type and it will be automatically encoded
into an AMQP type. If a specific AMQP type is required, a
`types.AMQPType` can be used.
:param data: The data to send in the body.
:type data: ~uamqp.types.AMQPType
"""
value = utils.data_factory(value)
self._message.set_body_value(value)
@property
def data(self):
_value = self._message.get_body_value()
if _value:
return _value.value
return None
class MessageHeader(object):
"""The Message header. This is only used on received message, and not
set on messages being sent. The properties set on any given message
will depend on the Service and not all messages will have all properties.
:ivar delivery_count: The number of unsuccessful previous attempts to deliver
this message. If this value is non-zero it can be taken as an indication that the
delivery might be a duplicate. On first delivery, the value is zero. It is
incremented upon an outcome being settled at the sender, according to rules
defined for each outcome.
:vartype delivery_count: int
:ivar time_to_live: Duration in milliseconds for which the message is to be considered "live".
If this is set then a message expiration time will be computed based on the time of arrival
at an intermediary. Messages that live longer than their expiration time will be discarded
(or dead lettered). When a message is transmitted by an intermediary that was received
with a ttl, the transmitted message's header SHOULD contain a ttl that is computed as the
difference between the current time and the formerly computed message expiration time,
i.e., the reduced ttl, so that messages will eventually die if they end up in a delivery loop.
:vartype time_to_live: int
:ivar durable: Durable messages MUST NOT be lost even if an intermediary is unexpectedly terminated
and restarted. A target which is not capable of fulfilling this guarantee MUST NOT accept messages
where the durable header is set to `True`: if the source allows the rejected outcome then the
message SHOULD be rejected with the precondition-failed error, otherwise the link MUST be detached
by the receiver with the same error.
:vartype durable: bool
:ivar first_acquirer: If this value is `True`, then this message has not been acquired
by any other link. If this value is `False`, then this message MAY have previously
been acquired by another link or links.
:vartype first_acquirer: bool
:ivar priority: This field contains the relative message priority. Higher numbers indicate higher
priority messages. Messages with higher priorities MAY be delivered before those with lower priorities.
:vartype priority: int
:param header: Internal only. This is used to wrap an existing message header
that has been received from an AMQP service.
:type header: uamqp.c_uamqp.cHeader
"""
def __init__(self, header=None):
self.delivery_count = None
self.time_to_live = None
self.first_acquirer = None
self.durable = None
self.priority = None
if header:
self.delivery_count = header.delivery_count
self.time_to_live = header.time_to_live
self.first_acquirer = header.first_acquirer
self.durable = header.durable
self.priority = header.priority
def __str__(self):
return str({
'delivery_count': self.delivery_count,
'time_to_live': self.time_to_live,
'first_acquirer': self.first_acquirer,
'durable': self.durable,
'priority': self.priority
})
def get_header_obj(self):
"""Get the underlying C reference from this object.
:rtype: uamqp.c_uamqp.cHeader
"""
header = c_uamqp.create_header()
if self.delivery_count is not None:
header.delivery_count = self.delivery_count
if self.time_to_live is not None:
header.time_to_live = self.time_to_live
if self.first_acquirer is not None:
header.first_acquirer = self.first_acquirer
if self.durable is not None:
header.durable = self.durable
if self.priority is not None:
header.priority = self.priority
return header
| 40.457341
| 113
| 0.650205
|
import logging
import six
from uamqp import c_uamqp, constants, errors, utils
_logger = logging.getLogger(__name__)
class Message(object):
def __init__(self,
body=None,
properties=None,
application_properties=None,
annotations=None,
header=None,
msg_format=None,
message=None,
settler=None,
delivery_no=None,
encoding='UTF-8'):
self.state = constants.MessageState.WaitingToBeSent
self.idle_time = 0
self.retries = 0
self._response = None
self._settler = None
self._encoding = encoding
self.delivery_no = delivery_no
self.on_send_complete = None
self.properties = None
self.application_properties = None
self.annotations = None
self.header = None
self.footer = None
self.delivery_annotations = None
if message:
if settler:
self.state = constants.MessageState.ReceivedUnsettled
self._response = None
else:
self.state = constants.MessageState.ReceivedSettled
self._response = errors.MessageAlreadySettled()
self._settler = settler
self._parse_message(message)
else:
self._message = c_uamqp.create_message()
if isinstance(body, (six.text_type, six.binary_type)):
self._body = DataBody(self._message)
self._body.append(body)
elif isinstance(body, list) and all([isinstance(b, (six.text_type, six.binary_type)) for b in body]):
self._body = DataBody(self._message)
for value in body:
self._body.append(value)
else:
self._body = ValueBody(self._message)
self._body.set(body)
if msg_format:
self._message.message_format = msg_format
self.properties = properties
self.application_properties = application_properties
self.annotations = annotations
self.header = header
@classmethod
def decode_from_bytes(cls, data):
decoded_message = c_uamqp.decode_message(len(data), data)
return cls(message=decoded_message)
def __str__(self):
if not self._message:
return ""
return str(self._body)
def _parse_message(self, message):
_logger.debug("Parsing received message %r.", self.delivery_no)
self._message = message
body_type = message.body_type
if body_type == c_uamqp.MessageBodyType.NoneType:
self._body = None
elif body_type == c_uamqp.MessageBodyType.DataType:
self._body = DataBody(self._message)
elif body_type == c_uamqp.MessageBodyType.SequenceType:
raise TypeError("Message body type Sequence not supported.")
else:
self._body = ValueBody(self._message)
_props = self._message.properties
if _props:
_logger.debug("Parsing received message properties %r.", self.delivery_no)
self.properties = MessageProperties(properties=_props, encoding=self._encoding)
_header = self._message.header
if _header:
_logger.debug("Parsing received message header %r.", self.delivery_no)
self.header = MessageHeader(header=_header)
_footer = self._message.footer
if _footer:
_logger.debug("Parsing received message footer %r.", self.delivery_no)
self.footer = _footer.map
_app_props = self._message.application_properties
if _app_props:
_logger.debug("Parsing received message application properties %r.", self.delivery_no)
self.application_properties = _app_props.map
_ann = self._message.message_annotations
if _ann:
_logger.debug("Parsing received message annotations %r.", self.delivery_no)
self.annotations = _ann.map
_delivery_ann = self._message.delivery_annotations
if _delivery_ann:
_logger.debug("Parsing received message delivery annotations %r.", self.delivery_no)
self.delivery_annotations = _delivery_ann.map
def _can_settle_message(self):
if self.state not in constants.RECEIVE_STATES:
raise TypeError("Only received messages can be settled.")
if self.settled:
return False
return True
def _populate_message_attributes(self, c_message):
if self.properties:
c_message.properties = self.properties.get_properties_obj()
if self.application_properties:
if not isinstance(self.application_properties, dict):
raise TypeError("Application properties must be a dictionary.")
amqp_props = utils.data_factory(self.application_properties, encoding=self._encoding)
c_message.application_properties = amqp_props
if self.annotations:
if not isinstance(self.annotations, dict):
raise TypeError("Message annotations must be a dictionary.")
ann_props = c_uamqp.create_message_annotations(
utils.data_factory(self.annotations, encoding=self._encoding))
c_message.message_annotations = ann_props
if self.header:
c_message.header = self.header.get_header_obj()
@property
def settled(self):
if self._response:
return True
return False
def get_message_encoded_size(self):
if not self._message:
raise ValueError("No message data to encode.")
cloned_data = self._message.clone()
self._populate_message_attributes(cloned_data)
encoded_data = []
return c_uamqp.get_encoded_message_size(cloned_data, encoded_data)
def encode_message(self):
if not self._message:
raise ValueError("No message data to encode.")
cloned_data = self._message.clone()
self._populate_message_attributes(cloned_data)
encoded_data = []
c_uamqp.get_encoded_message_size(cloned_data, encoded_data)
return b"".join(encoded_data)
def get_data(self):
if not self._message or not self._body:
return None
return self._body.data
def gather(self):
if self.state in constants.RECEIVE_STATES:
raise TypeError("Only new messages can be gathered.")
if not self._message:
raise ValueError("Message data already consumed.")
try:
raise self._response
except TypeError:
pass
return [self]
def get_message(self):
if not self._message:
return None
self._populate_message_attributes(self._message)
return self._message
def accept(self):
if self._can_settle_message():
self._response = errors.MessageAccepted()
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
def reject(self, condition=None, description=None):
if self._can_settle_message():
self._response = errors.MessageRejected(
condition=condition,
description=description,
encoding=self._encoding)
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
def release(self):
if self._can_settle_message():
self._response = errors.MessageReleased()
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
def modify(self, failed, deliverable, annotations=None):
if self._can_settle_message():
self._response = errors.MessageModified(
failed,
deliverable,
annotations=annotations,
encoding=self._encoding)
self._settler(self._response)
self.state = constants.MessageState.ReceivedSettled
return True
return False
class BatchMessage(Message):
batch_format = 0x80013700
max_message_length = constants.MAX_MESSAGE_LENGTH_BYTES
size_offset = 0
def __init__(self,
data=None,
properties=None,
application_properties=None,
annotations=None,
header=None,
multi_messages=False,
encoding='UTF-8'):
self._multi_messages = multi_messages
self._body_gen = data
self._encoding = encoding
self.on_send_complete = None
self.properties = properties
self.application_properties = application_properties
self.annotations = annotations
self.header = header
def _create_batch_message(self):
return Message(body=[],
properties=self.properties,
annotations=self.annotations,
msg_format=self.batch_format,
header=self.header,
encoding=self._encoding)
def _multi_message_generator(self):
unappended_message_bytes = None
while True:
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
if unappended_message_bytes:
new_message._body.append(unappended_message_bytes) body_size += len(unappended_message_bytes)
try:
for data in self._body_gen:
message_bytes = None
try:
if not data.application_properties: data.application_properties = self.application_properties
message_bytes = data.encode_message()
except AttributeError: wrap_message = Message(body=data, application_properties=self.application_properties)
message_bytes = wrap_message.encode_message()
body_size += len(message_bytes)
if (body_size + message_size) > self.max_message_length:
new_message.on_send_complete = self.on_send_complete
unappended_message_bytes = message_bytes
yield new_message
raise StopIteration()
new_message._body.append(message_bytes) except StopIteration:
_logger.debug("Sent partial message.")
continue
else:
new_message.on_send_complete = self.on_send_complete
yield new_message
_logger.debug("Sent all batched data.")
break
def gather(self):
if self._multi_messages:
return self._multi_message_generator()
new_message = self._create_batch_message()
message_size = new_message.get_message_encoded_size() + self.size_offset
body_size = 0
for data in self._body_gen:
message_bytes = None
try:
if not data.application_properties: data.application_properties = self.application_properties
message_bytes = data.encode_message()
except AttributeError: wrap_message = Message(body=data, application_properties=self.application_properties)
message_bytes = wrap_message.encode_message()
body_size += len(message_bytes)
if (body_size + message_size) > self.max_message_length:
raise ValueError(
"Data set too large for a single message."
"Set multi_messages to True to split data across multiple messages.")
new_message._body.append(message_bytes) new_message.on_send_complete = self.on_send_complete
return [new_message]
class MessageProperties(object):
def __init__(self,
message_id=None,
user_id=None,
to=None,
subject=None,
reply_to=None,
correlation_id=None,
content_type=None,
content_encoding=None,
absolute_expiry_time=None,
creation_time=None,
group_id=None,
group_sequence=None,
reply_to_group_id=None,
properties=None,
encoding='UTF-8'):
self._encoding = encoding
if properties:
self._message_id = properties.message_id
self._user_id = properties.user_id
self._to = properties.to
self._subject = properties.subject
self._reply_to = properties.reply_to
self._correlation_id = properties.correlation_id
self._content_type = properties.content_type
self._content_encoding = properties.content_encoding
self._absolute_expiry_time = properties.absolute_expiry_time
self._creation_time = properties.creation_time
self._group_id = properties.group_id
self._group_sequence = properties.group_sequence
self._reply_to_group_id = properties.reply_to_group_id
else:
self.message_id = message_id
self.user_id = user_id
self.to = to
self.subject = subject
self.reply_to = reply_to
self.correlation_id = correlation_id
self.content_type = content_type
self.content_encoding = content_encoding
self.absolute_expiry_time = absolute_expiry_time
self.creation_time = creation_time
self.group_id = group_id
self.group_sequence = group_sequence
self.reply_to_group_id = reply_to_group_id
def __str__(self):
return str({
'message_id': self.message_id,
'user_id': self.user_id,
'to': self.to,
'subject': self.subject,
'reply_to': self.reply_to,
'correlation_id': self.correlation_id,
'content_type': self.content_type,
'content_encoding': self.content_encoding,
'absolute_expiry_time': self.absolute_expiry_time,
'creation_time': self.creation_time,
'group_id': self.group_id,
'group_sequence': self.group_sequence,
'reply_to_group_id': self.reply_to_group_id
})
@property
def message_id(self):
if self._message_id:
return self._message_id.value
return None
@message_id.setter
def message_id(self, value):
if value is None:
self._message_id = None
else:
self._message_id = utils.data_factory(value, encoding=self._encoding)
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("user_id must be bytes or str.")
self._user_id = value
@property
def to(self):
if self._to:
return self._to.value
return None
@to.setter
def to(self, value):
if value is None:
self._to = None
else:
self._to = utils.data_factory(value, encoding=self._encoding)
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("subject must be bytes or str.")
self._subject = value
@property
def reply_to(self):
if self._reply_to is not None:
return self._reply_to.value
return None
@reply_to.setter
def reply_to(self, value):
if value is None:
self._reply_to = None
else:
self._reply_to = utils.data_factory(value, encoding=self._encoding)
@property
def correlation_id(self):
if self._correlation_id is not None:
return self._correlation_id.value
return None
@correlation_id.setter
def correlation_id(self, value):
if value is None:
self._correlation_id = None
else:
self._correlation_id = utils.data_factory(value, encoding=self._encoding)
@property
def content_type(self):
return self._content_type
@content_type.setter
def content_type(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("content_type must be bytes or str.")
self._content_type = value
@property
def content_encoding(self):
return self._content_encoding
@content_encoding.setter
def content_encoding(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("content_encoding must be bytes or str.")
self._content_encoding = value
@property
def absolute_expiry_time(self):
return self._absolute_expiry_time
@absolute_expiry_time.setter
def absolute_expiry_time(self, value):
if value is not None and not isinstance(value, int):
raise TypeError("absolute_expiry_time must be an integer.")
self._absolute_expiry_time = value
@property
def creation_time(self):
return self._creation_time
@creation_time.setter
def creation_time(self, value):
if value is not None and not isinstance(value, int):
raise TypeError("creation_time must be an integer.")
self._creation_time = value
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("group_id must be bytes or str.")
self._group_id = value
@property
def group_sequence(self):
return self._group_sequence
@group_sequence.setter
def group_sequence(self, value):
if value is not None and not isinstance(value, int):
raise TypeError("group_sequence must be an integer.")
self._group_sequence = value
@property
def reply_to_group_id(self):
return self._reply_to_group_id
@reply_to_group_id.setter
def reply_to_group_id(self, value):
if isinstance(value, six.text_type):
value = value.encode(self._encoding)
elif value is not None and not isinstance(value, six.binary_type):
raise TypeError("reply_to_group_id must be bytes or str.")
self._reply_to_group_id = value
def _set_attr(self, attr, properties):
attr_value = getattr(self, "_" + attr)
if attr_value is not None:
setattr(properties, attr, attr_value)
def get_properties_obj(self):
properties = c_uamqp.cProperties()
self._set_attr('message_id', properties)
self._set_attr('user_id', properties)
self._set_attr('to', properties)
self._set_attr('subject', properties)
self._set_attr('reply_to', properties)
self._set_attr('correlation_id', properties)
self._set_attr('content_type', properties)
self._set_attr('content_encoding', properties)
self._set_attr('absolute_expiry_time', properties)
self._set_attr('creation_time', properties)
self._set_attr('group_id', properties)
self._set_attr('group_sequence', properties)
self._set_attr('reply_to_group_id', properties)
return properties
class MessageBody(object):
def __init__(self, c_message, encoding='UTF-8'):
self._message = c_message
self._encoding = encoding
@property
def type(self):
return self._message.body_type
@property
def data(self):
raise NotImplementedError("Only MessageBody subclasses have data.")
class DataBody(MessageBody):
def __str__(self):
if six.PY3:
return "".join(d.decode(self._encoding) for d in self.data)
return "".join(self.data)
def __unicode__(self):
return u"".join(d.decode(self._encoding) for d in self.data)
def __bytes__(self):
return b"".join(self.data)
def __len__(self):
return self._message.count_body_data()
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Index is out of range.")
data = self._message.get_body_data(index)
return data.value
def append(self, data):
if isinstance(data, six.text_type):
self._message.add_body_data(data.encode(self._encoding))
elif isinstance(data, six.binary_type):
self._message.add_body_data(data)
@property
def data(self):
for i in range(len(self)):
yield self._message.get_body_data(i)
class ValueBody(MessageBody):
def __str__(self):
data = self.data
if not data:
return ""
if six.PY3 and isinstance(data, six.binary_type):
return data.decode(self._encoding)
return str(data)
def __unicode__(self):
data = self.data
if not data:
return u""
if isinstance(data, six.binary_type):
return data.decode(self._encoding)
return unicode(data)
def __bytes__(self):
data = self.data
if not data:
return b""
return bytes(data)
def set(self, value):
value = utils.data_factory(value)
self._message.set_body_value(value)
@property
def data(self):
_value = self._message.get_body_value()
if _value:
return _value.value
return None
class MessageHeader(object):
def __init__(self, header=None):
self.delivery_count = None
self.time_to_live = None
self.first_acquirer = None
self.durable = None
self.priority = None
if header:
self.delivery_count = header.delivery_count
self.time_to_live = header.time_to_live
self.first_acquirer = header.first_acquirer
self.durable = header.durable
self.priority = header.priority
def __str__(self):
return str({
'delivery_count': self.delivery_count,
'time_to_live': self.time_to_live,
'first_acquirer': self.first_acquirer,
'durable': self.durable,
'priority': self.priority
})
def get_header_obj(self):
header = c_uamqp.create_header()
if self.delivery_count is not None:
header.delivery_count = self.delivery_count
if self.time_to_live is not None:
header.time_to_live = self.time_to_live
if self.first_acquirer is not None:
header.first_acquirer = self.first_acquirer
if self.durable is not None:
header.durable = self.durable
if self.priority is not None:
header.priority = self.priority
return header
| true
| true
|
1c48dc019533b7b44efebbf56b81cc34c04251cd
| 3,950
|
py
|
Python
|
shortcuts/actions/scripting.py
|
alexander-akhmetov/python-shortcuts
|
6d7b45fcf4e34d92e84370e147397422f096ba64
|
[
"MIT"
] | 588
|
2018-09-23T20:39:15.000Z
|
2022-03-27T13:02:48.000Z
|
shortcuts/actions/scripting.py
|
alexander-akhmetov/python-shortcuts
|
6d7b45fcf4e34d92e84370e147397422f096ba64
|
[
"MIT"
] | 63
|
2018-09-27T20:13:56.000Z
|
2022-03-29T03:22:32.000Z
|
shortcuts/actions/scripting.py
|
alexander-akhmetov/python-shortcuts
|
6d7b45fcf4e34d92e84370e147397422f096ba64
|
[
"MIT"
] | 35
|
2018-09-24T03:37:49.000Z
|
2021-07-05T07:32:04.000Z
|
from shortcuts.actions.base import (
BaseAction,
BooleanField,
ChoiceField,
Field,
FloatField,
GroupIDField,
IntegerField,
)
class NothingAction(BaseAction):
'''Nothing'''
itype = 'is.workflow.actions.nothing'
keyword = 'nothing'
class SetItemNameAction(BaseAction):
'''Set item name'''
# todo: advanced
# <dict>
# <key>WFWorkflowActionIdentifier</key>
# <string>is.workflow.actions.setitemname</string>
# <key>WFWorkflowActionParameters</key>
# <dict>
# <key>Advanced</key>
# <true/>
# <key>WFDontIncludeFileExtension</key>
# <true/>
# </dict>
# </dict>
itype = 'is.workflow.actions.setitemname'
keyword = 'set_item_name'
class ViewContentGraphAction(BaseAction):
'''View content graph'''
itype = 'is.workflow.actions.viewresult'
keyword = 'view_content_graph'
class ContinueInShortcutAppAction(BaseAction):
'''Continue in shortcut app'''
itype = 'is.workflow.actions.handoff'
keyword = 'continue_in_shortcut_app'
class ChooseFromListAction(BaseAction):
'''Choose from list'''
itype = 'is.workflow.actions.choosefromlist'
keyword = 'choose_from_list'
prompt = Field('WFChooseFromListActionPrompt', required=False)
select_multiple = BooleanField(
'WFChooseFromListActionSelectMultiple', required=False
)
select_all_initially = BooleanField(
'WFChooseFromListActionSelectAll', required=False
)
class DelayAction(BaseAction):
'''Delay'''
itype = 'is.workflow.actions.delay'
keyword = 'delay'
time = FloatField('WFDelayTime')
class WaitToReturnAction(BaseAction):
'''Wait to return'''
itype = 'is.workflow.actions.waittoreturn'
keyword = 'wait_to_return'
class RepeatStartAction(BaseAction):
'''Repeat'''
itype = 'is.workflow.actions.repeat.count'
keyword = 'repeat_start'
_additional_identifier_field = 'WFControlFlowMode'
group_id = GroupIDField('GroupingIdentifier')
count = IntegerField('WFRepeatCount')
default_fields = {
'WFControlFlowMode': 0,
}
class RepeatEndAction(BaseAction):
'''Repeat'''
itype = 'is.workflow.actions.repeat.count'
keyword = 'repeat_end'
_additional_identifier_field = 'WFControlFlowMode'
group_id = GroupIDField('GroupingIdentifier')
default_fields = {
'WFControlFlowMode': 2,
}
class RepeatEachStartAction(BaseAction):
'''Repeat with each start'''
itype = 'is.workflow.actions.repeat.each'
keyword = 'repeat_with_each_start'
_additional_identifier_field = 'WFControlFlowMode'
group_id = GroupIDField('GroupingIdentifier')
default_fields = {
'WFControlFlowMode': 0,
}
class RepeatEachEndAction(BaseAction):
'''Repeat with each end'''
itype = 'is.workflow.actions.repeat.each'
keyword = 'repeat_with_each_end'
_additional_identifier_field = 'WFControlFlowMode'
group_id = GroupIDField('GroupingIdentifier')
default_fields = {
'WFControlFlowMode': 2,
}
HASH_CHOICES = (
'MD5',
'SHA1',
'SHA256',
'SHA512',
)
class HashAction(BaseAction):
'''Hash action'''
itype = 'is.workflow.actions.hash'
keyword = 'hash'
hash_type = ChoiceField('WFHashType', choices=HASH_CHOICES, default=HASH_CHOICES[0])
class GetMyShortcutsAction(BaseAction):
'''Get my shortcuts'''
itype = 'is.workflow.actions.getmyworkflows'
keyword = 'get_my_shortcuts'
class RunShortcutAction(BaseAction):
'''Run shortcut'''
itype = 'is.workflow.actions.runworkflow'
keyword = 'run_shortcut'
show = BooleanField('WFShowWorkflow', default=False)
shortcut_name = Field('WFWorkflowName')
class OpenAppAction(BaseAction):
'''Opens the specified app.'''
itype = 'is.workflow.actions.openapp'
keyword = 'open_app'
app = Field('WFAppIdentifier')
| 21.351351
| 88
| 0.672911
|
from shortcuts.actions.base import (
BaseAction,
BooleanField,
ChoiceField,
Field,
FloatField,
GroupIDField,
IntegerField,
)
class NothingAction(BaseAction):
itype = 'is.workflow.actions.nothing'
keyword = 'nothing'
class SetItemNameAction(BaseAction):
itype = 'is.workflow.actions.setitemname'
keyword = 'set_item_name'
class ViewContentGraphAction(BaseAction):
itype = 'is.workflow.actions.viewresult'
keyword = 'view_content_graph'
class ContinueInShortcutAppAction(BaseAction):
itype = 'is.workflow.actions.handoff'
keyword = 'continue_in_shortcut_app'
class ChooseFromListAction(BaseAction):
itype = 'is.workflow.actions.choosefromlist'
keyword = 'choose_from_list'
prompt = Field('WFChooseFromListActionPrompt', required=False)
select_multiple = BooleanField(
'WFChooseFromListActionSelectMultiple', required=False
)
select_all_initially = BooleanField(
'WFChooseFromListActionSelectAll', required=False
)
class DelayAction(BaseAction):
itype = 'is.workflow.actions.delay'
keyword = 'delay'
time = FloatField('WFDelayTime')
class WaitToReturnAction(BaseAction):
itype = 'is.workflow.actions.waittoreturn'
keyword = 'wait_to_return'
class RepeatStartAction(BaseAction):
itype = 'is.workflow.actions.repeat.count'
keyword = 'repeat_start'
_additional_identifier_field = 'WFControlFlowMode'
group_id = GroupIDField('GroupingIdentifier')
count = IntegerField('WFRepeatCount')
default_fields = {
'WFControlFlowMode': 0,
}
class RepeatEndAction(BaseAction):
itype = 'is.workflow.actions.repeat.count'
keyword = 'repeat_end'
_additional_identifier_field = 'WFControlFlowMode'
group_id = GroupIDField('GroupingIdentifier')
default_fields = {
'WFControlFlowMode': 2,
}
class RepeatEachStartAction(BaseAction):
itype = 'is.workflow.actions.repeat.each'
keyword = 'repeat_with_each_start'
_additional_identifier_field = 'WFControlFlowMode'
group_id = GroupIDField('GroupingIdentifier')
default_fields = {
'WFControlFlowMode': 0,
}
class RepeatEachEndAction(BaseAction):
itype = 'is.workflow.actions.repeat.each'
keyword = 'repeat_with_each_end'
_additional_identifier_field = 'WFControlFlowMode'
group_id = GroupIDField('GroupingIdentifier')
default_fields = {
'WFControlFlowMode': 2,
}
HASH_CHOICES = (
'MD5',
'SHA1',
'SHA256',
'SHA512',
)
class HashAction(BaseAction):
itype = 'is.workflow.actions.hash'
keyword = 'hash'
hash_type = ChoiceField('WFHashType', choices=HASH_CHOICES, default=HASH_CHOICES[0])
class GetMyShortcutsAction(BaseAction):
itype = 'is.workflow.actions.getmyworkflows'
keyword = 'get_my_shortcuts'
class RunShortcutAction(BaseAction):
itype = 'is.workflow.actions.runworkflow'
keyword = 'run_shortcut'
show = BooleanField('WFShowWorkflow', default=False)
shortcut_name = Field('WFWorkflowName')
class OpenAppAction(BaseAction):
itype = 'is.workflow.actions.openapp'
keyword = 'open_app'
app = Field('WFAppIdentifier')
| true
| true
|
1c48dc75724e3f6c2006bc3255a52eac0f0e12c2
| 825
|
py
|
Python
|
instaloader_test.py
|
sam5epi0l/bottuber
|
098d3c74bd610f39c6e53c663bcd8e395cb3ecb4
|
[
"MIT"
] | 91
|
2022-01-14T12:18:08.000Z
|
2022-03-16T11:56:13.000Z
|
instaloader_test.py
|
pradeepjhuriya/bottuber
|
098d3c74bd610f39c6e53c663bcd8e395cb3ecb4
|
[
"MIT"
] | 6
|
2022-01-21T09:05:57.000Z
|
2022-03-17T08:31:44.000Z
|
instaloader_test.py
|
pradeepjhuriya/bottuber
|
098d3c74bd610f39c6e53c663bcd8e395cb3ecb4
|
[
"MIT"
] | 15
|
2022-01-17T15:27:00.000Z
|
2022-03-28T16:43:05.000Z
|
from datetime import datetime
import instaloader
# Do not change
# instaloader downloads some posts under the hashtag urbanphotography
L = instaloader.Instaloader()
posts = instaloader.Hashtag.from_name(L.context, "urbanphotography").get_posts()
SINCE = datetime(2020, 5, 10) # further from today, inclusive
UNTIL = datetime(2020, 5, 11) # closer to today, not inclusive
k = 0 # initiate k
#k_list = [] # uncomment this to tune k
for post in posts:
postdate = post.date
if postdate > UNTIL:
continue
elif postdate <= SINCE:
k += 1
if k == 50:
break
else:
continue
else:
L.download_post(post, "#urbanphotography")
# if you want to tune k, uncomment below to get your k max
#k_list.append(k)
k = 0 # set k to 0
| 25
| 80
| 0.637576
|
from datetime import datetime
import instaloader
L = instaloader.Instaloader()
posts = instaloader.Hashtag.from_name(L.context, "urbanphotography").get_posts()
SINCE = datetime(2020, 5, 10) UNTIL = datetime(2020, 5, 11)
k = 0
for post in posts:
postdate = post.date
if postdate > UNTIL:
continue
elif postdate <= SINCE:
k += 1
if k == 50:
break
else:
continue
else:
L.download_post(post, "#urbanphotography")
k = 0
| true
| true
|
1c48dcb2a20ec13231ae451adf850eda0f856561
| 2,084
|
py
|
Python
|
PyBank/main.py
|
KristianSHamilton/python-challenge
|
5fc9c62028fa5c792a48c3f7e758fac713b5bf4f
|
[
"MIT"
] | null | null | null |
PyBank/main.py
|
KristianSHamilton/python-challenge
|
5fc9c62028fa5c792a48c3f7e758fac713b5bf4f
|
[
"MIT"
] | null | null | null |
PyBank/main.py
|
KristianSHamilton/python-challenge
|
5fc9c62028fa5c792a48c3f7e758fac713b5bf4f
|
[
"MIT"
] | null | null | null |
import os
import csv
monthsTotal = 0
profitTotal = 0
profitDelta = 0
profitPrior = 0
profitDeltaTotal = 0
profitDeltaGreatest = 0
profitDeltaLeast = 0
csvPath = os.path.join( ".","Resources","budget_data.csv")
Financial_Analysis_Export = os.path.join(".", "Analysis","Financial_Analysis.txt")
#Read CSV from path
with open(csvPath) as csvFile:
csvReader = csv.reader(csvFile, delimiter = ",")
csvHeader = next(csvReader)
#skip headers in first row set data accordingly
firstRow = next(csvReader)
profitPrior = int(firstRow[1])
monthsTotal = 1
profitTotal = int(firstRow[1])
for row in csvReader:
#increments month variable for every row
monthsTotal = monthsTotal + 1
#adds current row's profit to the total profit variable
profitTotal = profitTotal + int(row[1])
#calcs change in price by subtracting the last row's profit from the current
profitDelta = int(row[1]) - profitPrior
#keeps a running total of the price changes by adding current row delta to total variable
profitDeltaTotal = profitDeltaTotal + profitDelta
#now that profitPrior has been used in current loop, sets variable for next loop
profitPrior = int(row[1])
#finds greatest Delta value
if profitDelta > profitDeltaGreatest:
profitDeltaGreatest = profitDelta
#finds smallest Delta value
if profitDelta < profitDeltaLeast:
profitDeltaLeast = profitDelta
# calc average change by dividing profit Delta by monthsTotal - 1 to account for nonexistent change on first month
avgChange = profitDeltaTotal/(monthsTotal - 1)
output = (
"Financial Analysis\n"
"-----------------------------\n"
f"Total Months: {monthsTotal}\n"
f"Total Profit: ${profitTotal}\n"
f"Average Change: ${avgChange}\n"
f"Greatest Increase in Profits: ${profitDeltaGreatest}\n"
f"Greatest Decrease in Profits: ${profitDeltaLeast}"
)
print(output)
#writes output to file
with open(Financial_Analysis_Export, "w") as txt_file:
txt_file.write(output)
| 33.079365
| 114
| 0.693378
|
import os
import csv
monthsTotal = 0
profitTotal = 0
profitDelta = 0
profitPrior = 0
profitDeltaTotal = 0
profitDeltaGreatest = 0
profitDeltaLeast = 0
csvPath = os.path.join( ".","Resources","budget_data.csv")
Financial_Analysis_Export = os.path.join(".", "Analysis","Financial_Analysis.txt")
with open(csvPath) as csvFile:
csvReader = csv.reader(csvFile, delimiter = ",")
csvHeader = next(csvReader)
firstRow = next(csvReader)
profitPrior = int(firstRow[1])
monthsTotal = 1
profitTotal = int(firstRow[1])
for row in csvReader:
monthsTotal = monthsTotal + 1
profitTotal = profitTotal + int(row[1])
#calcs change in price by subtracting the last row's profit from the current
profitDelta = int(row[1]) - profitPrior
profitDeltaTotal = profitDeltaTotal + profitDelta
profitPrior = int(row[1])
if profitDelta > profitDeltaGreatest:
profitDeltaGreatest = profitDelta
if profitDelta < profitDeltaLeast:
profitDeltaLeast = profitDelta
avgChange = profitDeltaTotal/(monthsTotal - 1)
output = (
"Financial Analysis\n"
"-----------------------------\n"
f"Total Months: {monthsTotal}\n"
f"Total Profit: ${profitTotal}\n"
f"Average Change: ${avgChange}\n"
f"Greatest Increase in Profits: ${profitDeltaGreatest}\n"
f"Greatest Decrease in Profits: ${profitDeltaLeast}"
)
print(output)
with open(Financial_Analysis_Export, "w") as txt_file:
txt_file.write(output)
| true
| true
|
1c48de3928d99316fdc7080094a41cbcec3a248f
| 5,721
|
py
|
Python
|
ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py
|
maestro-hybrid-cloud/ceilometer
|
939cb080a193e14af8ceb44df3b631f5c2f6bf6d
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py
|
maestro-hybrid-cloud/ceilometer
|
939cb080a193e14af8ceb44df3b631f5c2f6bf6d
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py
|
maestro-hybrid-cloud/ceilometer
|
939cb080a193e14af8ceb44df3b631f5c2f6bf6d
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test listing raw samples.
"""
import datetime
import mock
from oslo_utils import timeutils
import six
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.tests import db as tests_db
from ceilometer.tests.functional.api import v2
class TestListSamples(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestListSamples, self).setUp()
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42)
self.sample1 = sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 7, 2, 10, 40),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample',
'dict_properties': {'key': 'value'},
'not_ignored_list': ['returned'],
},
source='test_source',
)
msg = utils.meter_message_from_counter(
self.sample1, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
self.sample2 = sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id2',
'project2',
'resource-id-alternate',
timestamp=datetime.datetime(2012, 7, 2, 10, 41),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample2',
},
source='source2',
)
msg2 = utils.meter_message_from_counter(
self.sample2, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg2)
def test_all(self):
data = self.get_json('/meters/instance')
self.assertEqual(2, len(data))
for s in data:
self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at'])
def test_all_trailing_slash(self):
data = self.get_json('/meters/instance/')
self.assertEqual(2, len(data))
def test_empty_project(self):
data = self.get_json('/meters/instance',
q=[{'field': 'project_id',
'value': 'no-such-project',
}])
self.assertEqual([], data)
def test_by_project(self):
data = self.get_json('/meters/instance',
q=[{'field': 'project_id',
'value': 'project1',
}])
self.assertEqual(1, len(data))
def test_empty_resource(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'no-such-resource',
}])
self.assertEqual([], data)
def test_by_resource(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'resource-id',
}])
self.assertEqual(1, len(data))
def test_empty_source(self):
data = self.get_json('/meters/instance',
q=[{'field': 'source',
'value': 'no-such-source',
}])
self.assertEqual(0, len(data))
def test_by_source(self):
data = self.get_json('/meters/instance',
q=[{'field': 'source',
'value': 'test_source',
}])
self.assertEqual(1, len(data))
def test_empty_user(self):
data = self.get_json('/meters/instance',
q=[{'field': 'user_id',
'value': 'no-such-user',
}])
self.assertEqual([], data)
def test_by_user(self):
data = self.get_json('/meters/instance',
q=[{'field': 'user_id',
'value': 'user-id',
}])
self.assertEqual(1, len(data))
def test_metadata(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'resource-id',
}])
sample = data[0]
self.assertIn('resource_metadata', sample)
self.assertEqual(
[('dict_properties.key', 'value'),
('display_name', 'test-server'),
('not_ignored_list', "['returned']"),
('tag', 'self.sample'),
],
list(sorted(six.iteritems(sample['resource_metadata']))))
| 35.981132
| 78
| 0.500612
|
import datetime
import mock
from oslo_utils import timeutils
import six
from ceilometer.publisher import utils
from ceilometer import sample
from ceilometer.tests import db as tests_db
from ceilometer.tests.functional.api import v2
class TestListSamples(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestListSamples, self).setUp()
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42)
self.sample1 = sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 7, 2, 10, 40),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample',
'dict_properties': {'key': 'value'},
'not_ignored_list': ['returned'],
},
source='test_source',
)
msg = utils.meter_message_from_counter(
self.sample1, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg)
self.sample2 = sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id2',
'project2',
'resource-id-alternate',
timestamp=datetime.datetime(2012, 7, 2, 10, 41),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample2',
},
source='source2',
)
msg2 = utils.meter_message_from_counter(
self.sample2, self.CONF.publisher.telemetry_secret,
)
self.conn.record_metering_data(msg2)
def test_all(self):
data = self.get_json('/meters/instance')
self.assertEqual(2, len(data))
for s in data:
self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at'])
def test_all_trailing_slash(self):
data = self.get_json('/meters/instance/')
self.assertEqual(2, len(data))
def test_empty_project(self):
data = self.get_json('/meters/instance',
q=[{'field': 'project_id',
'value': 'no-such-project',
}])
self.assertEqual([], data)
def test_by_project(self):
data = self.get_json('/meters/instance',
q=[{'field': 'project_id',
'value': 'project1',
}])
self.assertEqual(1, len(data))
def test_empty_resource(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'no-such-resource',
}])
self.assertEqual([], data)
def test_by_resource(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'resource-id',
}])
self.assertEqual(1, len(data))
def test_empty_source(self):
data = self.get_json('/meters/instance',
q=[{'field': 'source',
'value': 'no-such-source',
}])
self.assertEqual(0, len(data))
def test_by_source(self):
data = self.get_json('/meters/instance',
q=[{'field': 'source',
'value': 'test_source',
}])
self.assertEqual(1, len(data))
def test_empty_user(self):
data = self.get_json('/meters/instance',
q=[{'field': 'user_id',
'value': 'no-such-user',
}])
self.assertEqual([], data)
def test_by_user(self):
data = self.get_json('/meters/instance',
q=[{'field': 'user_id',
'value': 'user-id',
}])
self.assertEqual(1, len(data))
def test_metadata(self):
data = self.get_json('/meters/instance',
q=[{'field': 'resource_id',
'value': 'resource-id',
}])
sample = data[0]
self.assertIn('resource_metadata', sample)
self.assertEqual(
[('dict_properties.key', 'value'),
('display_name', 'test-server'),
('not_ignored_list', "['returned']"),
('tag', 'self.sample'),
],
list(sorted(six.iteritems(sample['resource_metadata']))))
| true
| true
|
1c48df74469efb5d2a9361dfd6676c5ce25809d5
| 1,722
|
py
|
Python
|
scripts/kbcontrol.py
|
zkytony/thortils
|
07ddfa6f6d09662094ba39343f89ba124c250e03
|
[
"MIT"
] | null | null | null |
scripts/kbcontrol.py
|
zkytony/thortils
|
07ddfa6f6d09662094ba39343f89ba124c250e03
|
[
"MIT"
] | null | null | null |
scripts/kbcontrol.py
|
zkytony/thortils
|
07ddfa6f6d09662094ba39343f89ba124c250e03
|
[
"MIT"
] | null | null | null |
# Keyboard control of Ai2Thor
import thortils
import thortils.constants as constants
from thortils.utils import getch
import argparse
import time
def print_controls(controls):
reverse = {controls[k]:k for k in controls}
ss =f"""
{reverse['MoveAhead']}
(MoveAhead)
{reverse['RotateLeft']} {reverse['RotateRight']}
(RotateLeft) (RotateRight)
{reverse['LookUp']}
(LookUp)
{reverse['LookDown']}
(LookDown)
q
(quit)
"""
print(ss)
def main(init_func=None, step_func=None):
parser = argparse.ArgumentParser(
description="Keyboard control of agent in ai2thor")
parser.add_argument("-s", "--scene",
type=str, help="scene. E.g. FloorPlan1",
default="FloorPlan1")
args = parser.parse_args()
controls = {
"w": "MoveAhead",
"a": "RotateLeft",
"d": "RotateRight",
"e": "LookUp",
"c": "LookDown"
}
print_controls(controls)
controller = thortils.launch_controller({**constants.CONFIG, **{"scene": args.scene}})
if init_func is not None:
config = init_func(controller)
while True:
k = getch()
if k == "q":
print("bye.")
break
if k in controls:
action = controls[k]
params = constants.MOVEMENT_PARAMS[action]
event = controller.step(action=action, **params)
event = controller.step(action="Pass")
if step_func is not None:
step_func(event, config)
print("{} | Agent pose: {}".format(k, thortils.thor_agent_pose(controller, as_tuple=True)))
if __name__ == "__main__":
main()
| 24.956522
| 103
| 0.573751
|
import thortils
import thortils.constants as constants
from thortils.utils import getch
import argparse
import time
def print_controls(controls):
reverse = {controls[k]:k for k in controls}
ss =f"""
{reverse['MoveAhead']}
(MoveAhead)
{reverse['RotateLeft']} {reverse['RotateRight']}
(RotateLeft) (RotateRight)
{reverse['LookUp']}
(LookUp)
{reverse['LookDown']}
(LookDown)
q
(quit)
"""
print(ss)
def main(init_func=None, step_func=None):
parser = argparse.ArgumentParser(
description="Keyboard control of agent in ai2thor")
parser.add_argument("-s", "--scene",
type=str, help="scene. E.g. FloorPlan1",
default="FloorPlan1")
args = parser.parse_args()
controls = {
"w": "MoveAhead",
"a": "RotateLeft",
"d": "RotateRight",
"e": "LookUp",
"c": "LookDown"
}
print_controls(controls)
controller = thortils.launch_controller({**constants.CONFIG, **{"scene": args.scene}})
if init_func is not None:
config = init_func(controller)
while True:
k = getch()
if k == "q":
print("bye.")
break
if k in controls:
action = controls[k]
params = constants.MOVEMENT_PARAMS[action]
event = controller.step(action=action, **params)
event = controller.step(action="Pass")
if step_func is not None:
step_func(event, config)
print("{} | Agent pose: {}".format(k, thortils.thor_agent_pose(controller, as_tuple=True)))
if __name__ == "__main__":
main()
| true
| true
|
1c48dfb24d4b32da11dc3b82cac98790cf672df3
| 10,254
|
py
|
Python
|
pyxform/tests_v1/test_randomize_itemsets.py
|
PMA-2020/pmaxform3
|
9d36f97f25cb09f0fb8aafb69370454731ecbbd5
|
[
"BSD-2-Clause"
] | 1
|
2020-10-19T15:37:36.000Z
|
2020-10-19T15:37:36.000Z
|
pyxform/tests_v1/test_randomize_itemsets.py
|
PMA-2020/pmaxform3
|
9d36f97f25cb09f0fb8aafb69370454731ecbbd5
|
[
"BSD-2-Clause"
] | 1
|
2022-03-16T13:48:25.000Z
|
2022-03-17T07:33:15.000Z
|
pyxform/tests_v1/test_randomize_itemsets.py
|
PMA-2020/pmaxform3
|
9d36f97f25cb09f0fb8aafb69370454731ecbbd5
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Test randomize itemsets.
"""
from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
class RandomizeItemsetsTest(PyxformTestCase):
def test_randomized_select_one(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| randomize=true |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item)\">"
],
)
def test_randomized_seeded_select_one(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| randomize=true, seed=42 |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item, 42)\">"
],
)
def test_randomized_seeded_select_one_nameset_seed(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | | |
| | type | name | label | parameters | calculation |
| | calculate | seed | | | once(decimal-date-time(now())) |
| | select_one choices | select | Select| randomize=true,seed=${seed} | |
| choices| | | | | |
| | list_name | name | label | | |
| | choices | a | opt_a | | |
| | choices | b | opt_b | | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item, /data/seed)\">"
],
)
def test_randomized_seeded_filtered_select_one(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | | |
| | type | name | label | parameters | choice_filter |
| | select_one choices | select | Select| randomize=true, seed=42 | name='a' |
| choices| | | | | |
| | list_name | name | label | | |
| | choices | a | opt_a | | |
| | choices | b | opt_b | | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item[name='a'], 42)\">"
],
)
def test_randomized_select_multiple(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_multiple choices | select | Select| randomize=true |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item)\">"
],
)
def test_randomized_seeded_select_multiple(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_multiple choices | select | Select| randomize=true, seed=42 |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item, 42)\">"
],
)
def test_randomized_external_xml_instance(self):
self.assertPyxformXform(
name="ecsv",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one_from_file cities.xml | city | City | randomize=true |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('cities')/root/item)\">"
],
)
def test_randomized_select_one_bad_param(self):
self.assertPyxformXform(
name="data",
errored="true",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| step=10 |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
error__contains=[
"Accepted parameters are 'randomize, seed': 'step' is an invalid parameter."
],
)
def test_randomized_select_one_bad_randomize(self):
self.assertPyxformXform(
name="data",
errored="true",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| randomize=ukanga |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
error__contains=[
"randomize must be set to true or false: 'ukanga' is an invalid value"
],
)
def test_randomized_select_one_bad_seed(self):
self.assertPyxformXform(
name="data",
errored="true",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| randomize=true, seed=ukanga |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
error__contains=[
"seed value must be a number or a reference to another field."
],
)
def test_randomized_select_one_seed_without_randomize(self):
self.assertPyxformXform(
name="data",
errored="true",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| seed=42 |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
error__contains=["Parameters must include randomize=true to use a seed."],
)
| 50.019512
| 125
| 0.302906
|
from pyxform.tests_v1.pyxform_test_case import PyxformTestCase
class RandomizeItemsetsTest(PyxformTestCase):
def test_randomized_select_one(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| randomize=true |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item)\">"
],
)
def test_randomized_seeded_select_one(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| randomize=true, seed=42 |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item, 42)\">"
],
)
def test_randomized_seeded_select_one_nameset_seed(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | | |
| | type | name | label | parameters | calculation |
| | calculate | seed | | | once(decimal-date-time(now())) |
| | select_one choices | select | Select| randomize=true,seed=${seed} | |
| choices| | | | | |
| | list_name | name | label | | |
| | choices | a | opt_a | | |
| | choices | b | opt_b | | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item, /data/seed)\">"
],
)
def test_randomized_seeded_filtered_select_one(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | | |
| | type | name | label | parameters | choice_filter |
| | select_one choices | select | Select| randomize=true, seed=42 | name='a' |
| choices| | | | | |
| | list_name | name | label | | |
| | choices | a | opt_a | | |
| | choices | b | opt_b | | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item[name='a'], 42)\">"
],
)
def test_randomized_select_multiple(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_multiple choices | select | Select| randomize=true |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item)\">"
],
)
def test_randomized_seeded_select_multiple(self):
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_multiple choices | select | Select| randomize=true, seed=42 |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('choices')/root/item, 42)\">"
],
)
def test_randomized_external_xml_instance(self):
self.assertPyxformXform(
name="ecsv",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one_from_file cities.xml | city | City | randomize=true |
""",
xml__contains=[
"<itemset nodeset=\"randomize(instance('cities')/root/item)\">"
],
)
def test_randomized_select_one_bad_param(self):
self.assertPyxformXform(
name="data",
errored="true",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| step=10 |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
error__contains=[
"Accepted parameters are 'randomize, seed': 'step' is an invalid parameter."
],
)
def test_randomized_select_one_bad_randomize(self):
self.assertPyxformXform(
name="data",
errored="true",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| randomize=ukanga |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
error__contains=[
"randomize must be set to true or false: 'ukanga' is an invalid value"
],
)
def test_randomized_select_one_bad_seed(self):
self.assertPyxformXform(
name="data",
errored="true",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| randomize=true, seed=ukanga |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
error__contains=[
"seed value must be a number or a reference to another field."
],
)
def test_randomized_select_one_seed_without_randomize(self):
self.assertPyxformXform(
name="data",
errored="true",
md="""
| survey | | | | |
| | type | name | label | parameters |
| | select_one choices | select | Select| seed=42 |
| choices| | | | |
| | list_name | name | label | |
| | choices | a | opt_a | |
| | choices | b | opt_b | |
""",
error__contains=["Parameters must include randomize=true to use a seed."],
)
| true
| true
|
1c48e0dea15ab95cf58d0a4ccc4269251a9520c7
| 12,741
|
py
|
Python
|
dogepartylib/lib/messages/issuance.py
|
coinwarp/dogeparty-lib
|
1823db21b25de723448fb50957fbfe9ff8d092c9
|
[
"MIT"
] | 2
|
2016-01-31T18:13:11.000Z
|
2020-05-08T23:54:55.000Z
|
dogepartylib/lib/messages/issuance.py
|
coinwarp/dogeparty-lib
|
1823db21b25de723448fb50957fbfe9ff8d092c9
|
[
"MIT"
] | 1
|
2015-11-07T10:17:05.000Z
|
2015-11-07T10:17:05.000Z
|
dogepartylib/lib/messages/issuance.py
|
coinwarp/dogeparty-lib
|
1823db21b25de723448fb50957fbfe9ff8d092c9
|
[
"MIT"
] | 2
|
2015-11-03T19:12:02.000Z
|
2021-12-18T04:48:52.000Z
|
#! /usr/bin/python3
"""
Allow simultaneous lock and transfer.
"""
import struct
import decimal
D = decimal.Decimal
from dogepartylib.lib import (config, util, exceptions, util)
FORMAT_1 = '>QQ?'
LENGTH_1 = 8 + 8 + 1
FORMAT_2 = '>QQ??If'
LENGTH_2 = 8 + 8 + 1 + 1 + 4 + 4
ID = 20
# NOTE: Pascal strings are used for storing descriptions for backwards‐compatibility.
def initialise(db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS issuances(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
asset TEXT,
quantity INTEGER,
divisible BOOL,
source TEXT,
issuer TEXT,
transfer BOOL,
callable BOOL,
call_date INTEGER,
call_price REAL,
description TEXT,
fee_paid INTEGER,
locked BOOL,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON issuances (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
valid_asset_idx ON issuances (asset, status)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
status_idx ON issuances (status)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON issuances (source)
''')
def validate (db, source, destination, asset, quantity, divisible, callable_, call_date, call_price, description, block_index):
problems = []
fee = 0
if asset in (config.BTC, config.XCP):
problems.append('cannot issue {} or {}'.format(config.BTC, config.XCP))
if call_date is None: call_date = 0
if call_price is None: call_price = 0.0
if description is None: description = ""
if divisible is None: divisible = True
if isinstance(call_price, int): call_price = float(call_price)
#^ helps especially with calls from JS‐based clients, where parseFloat(15) returns 15 (not 15.0), which json takes as an int
if not isinstance(quantity, int):
problems.append('quantity must be in satoshis')
return call_date, call_price, problems, fee, description, divisible, None
if call_date and not isinstance(call_date, int):
problems.append('call_date must be epoch integer')
return call_date, call_price, problems, fee, description, divisible, None
if call_price and not isinstance(call_price, float):
problems.append('call_price must be a float')
return call_date, call_price, problems, fee, description, divisible, None
if quantity < 0: problems.append('negative quantity')
if call_price < 0: problems.append('negative call price')
if call_date < 0: problems.append('negative call date')
# Callable, or not.
if not callable_:
if block_index >= 312500 or config.TESTNET: # Protocol change.
call_date = 0
call_price = 0.0
elif block_index >= 310000: # Protocol change.
if call_date:
problems.append('call date for non‐callable asset')
if call_price:
problems.append('call price for non‐callable asset')
# Valid re-issuance?
cursor = db.cursor()
cursor.execute('''SELECT * FROM issuances \
WHERE (status = ? AND asset = ?)
ORDER BY tx_index ASC''', ('valid', asset))
issuances = cursor.fetchall()
cursor.close()
if issuances:
reissuance = True
last_issuance = issuances[-1]
if last_issuance['issuer'] != source:
problems.append('issued by another address')
if bool(last_issuance['divisible']) != bool(divisible):
problems.append('cannot change divisibility')
if bool(last_issuance['callable']) != bool(callable_):
problems.append('cannot change callability')
if last_issuance['call_date'] > call_date and (call_date != 0 or (block_index < 312500 and not config.TESTNET)):
problems.append('cannot advance call date')
if last_issuance['call_price'] > call_price:
problems.append('cannot reduce call price')
if last_issuance['locked'] and quantity:
problems.append('locked asset and non‐zero quantity')
else:
reissuance = False
if description.lower() == 'lock':
problems.append('cannot lock a non‐existent asset')
if destination:
problems.append('cannot transfer a non‐existent asset')
# Check for existence of fee funds.
if quantity or (block_index >= 315000 or config.TESTNET): # Protocol change.
if not reissuance or (block_index < 310000 and not config.TESTNET): # Pay fee only upon first issuance. (Protocol change.)
cursor = db.cursor()
cursor.execute('''SELECT * FROM balances \
WHERE (address = ? AND asset = ?)''', (source, config.XCP))
balances = cursor.fetchall()
cursor.close()
if util.enabled('numeric_asset_names'): # Protocol change.
if len(asset) > config.NAMED_ASSET_MAXLEN:
fee = 0
else:
fee = int(0.5 * config.UNIT)
elif block_index >= 291700 or config.TESTNET: # Protocol change.
fee = int(0.5 * config.UNIT)
elif block_index >= 286000 or config.TESTNET: # Protocol change.
fee = 5 * config.UNIT
elif block_index > 281236 or config.TESTNET: # Protocol change.
fee = 5
if fee and (not balances or balances[0]['quantity'] < fee):
problems.append('insufficient funds')
if not (block_index >= 317500 or config.TESTNET): # Protocol change.
if len(description) > 42:
problems.append('description too long')
# For SQLite3
call_date = min(call_date, config.MAX_INT)
total = sum([issuance['quantity'] for issuance in issuances])
assert isinstance(quantity, int)
if total + quantity > config.MAX_INT:
problems.append('total quantity overflow')
if destination and quantity:
problems.append('cannot issue and transfer simultaneously')
return call_date, call_price, problems, fee, description, divisible, reissuance
def compose (db, source, transfer_destination, asset, quantity, divisible, description):
# Callability is deprecated, so for re‐issuances set relevant parameters
# to old values; for first issuances, make uncallable.
cursor = db.cursor()
cursor.execute('''SELECT * FROM issuances \
WHERE (status = ? AND asset = ?)
ORDER BY tx_index ASC''', ('valid', asset))
issuances = cursor.fetchall()
if issuances:
last_issuance = issuances[-1]
callable_ = last_issuance['callable']
call_date = last_issuance['call_date']
call_price = last_issuance['call_price']
else:
callable_ = False
call_date = 0
call_price = 0.0
cursor.close()
call_date, call_price, problems, fee, description, divisible, reissuance = validate(db, source, transfer_destination, asset, quantity, divisible, callable_, call_date, call_price, description, util.CURRENT_BLOCK_INDEX)
if problems: raise exceptions.ComposeError(problems)
asset_id = util.generate_asset_id(asset, util.CURRENT_BLOCK_INDEX)
data = struct.pack(config.TXTYPE_FORMAT, ID)
if len(description) <= 42:
curr_format = FORMAT_2 + '{}p'.format(len(description) + 1)
else:
curr_format = FORMAT_2 + '{}s'.format(len(description))
data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if callable_ else 0,
call_date or 0, call_price or 0.0, description.encode('utf-8'))
if transfer_destination:
destination_outputs = [(transfer_destination, None)]
else:
destination_outputs = []
return (source, destination_outputs, data)
def parse (db, tx, message):
issuance_parse_cursor = db.cursor()
# Unpack message.
try:
if (tx['block_index'] > 283271 or config.TESTNET) and len(message) >= LENGTH_2: # Protocol change.
if len(message) - LENGTH_2 <= 42:
curr_format = FORMAT_2 + '{}p'.format(len(message) - LENGTH_2)
else:
curr_format = FORMAT_2 + '{}s'.format(len(message) - LENGTH_2)
asset_id, quantity, divisible, callable_, call_date, call_price, description = struct.unpack(curr_format, message)
call_price = round(call_price, 6) # TODO: arbitrary
try:
description = description.decode('utf-8')
except UnicodeDecodeError:
description = ''
else:
if len(message) != LENGTH_1:
raise exceptions.UnpackError
asset_id, quantity, divisible = struct.unpack(FORMAT_1, message)
callable_, call_date, call_price, description = False, 0, 0.0, ''
try:
asset = util.generate_asset_name(asset_id, tx['block_index'])
except exceptions.AssetNameError:
asset = None
status = 'invalid: bad asset name'
status = 'valid'
except exceptions.UnpackError as e:
asset, quantity, divisible, callable_, call_date, call_price, description = None, None, None, None, None, None, None
status = 'invalid: could not unpack'
fee = 0
if status == 'valid':
call_date, call_price, problems, fee, description, divisible, reissuance = validate(db, tx['source'], tx['destination'], asset, quantity, divisible, callable_, call_date, call_price, description, block_index=tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
if 'total quantity overflow' in problems:
quantity = 0
if tx['destination']:
issuer = tx['destination']
transfer = True
quantity = 0
else:
issuer = tx['source']
transfer = False
# Debit fee.
if status == 'valid':
util.debit(db, tx['source'], config.XCP, fee, action="issuance fee", event=tx['tx_hash'])
# Lock?
lock = False
if status == 'valid':
if description and description.lower() == 'lock':
lock = True
cursor = db.cursor()
issuances = list(cursor.execute('''SELECT * FROM issuances \
WHERE (status = ? AND asset = ?)
ORDER BY tx_index ASC''', ('valid', asset)))
cursor.close()
description = issuances[-1]['description'] # Use last description. (Assume previous issuance exists because tx is valid.)
timestamp, value_int, fee_fraction_int = None, None, None
if not reissuance:
# Add to table of assets.
bindings= {
'asset_id': str(asset_id),
'asset_name': str(asset),
'block_index': tx['block_index'],
}
sql='insert into assets values(:asset_id, :asset_name, :block_index)'
issuance_parse_cursor.execute(sql, bindings)
# Add parsed transaction to message-type–specific table.
bindings= {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'asset': asset,
'quantity': quantity,
'divisible': divisible,
'source': tx['source'],
'issuer': issuer,
'transfer': transfer,
'callable': callable_,
'call_date': call_date,
'call_price': call_price,
'description': description,
'fee_paid': fee,
'locked': lock,
'status': status,
}
sql='insert into issuances values(:tx_index, :tx_hash, :block_index, :asset, :quantity, :divisible, :source, :issuer, :transfer, :callable, :call_date, :call_price, :description, :fee_paid, :locked, :status)'
issuance_parse_cursor.execute(sql, bindings)
# Credit.
if status == 'valid' and quantity:
util.credit(db, tx['source'], asset, quantity, action="issuance", event=tx['tx_hash'])
issuance_parse_cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 42.188742
| 234
| 0.599874
|
import struct
import decimal
D = decimal.Decimal
from dogepartylib.lib import (config, util, exceptions, util)
FORMAT_1 = '>QQ?'
LENGTH_1 = 8 + 8 + 1
FORMAT_2 = '>QQ??If'
LENGTH_2 = 8 + 8 + 1 + 1 + 4 + 4
ID = 20
def initialise(db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS issuances(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
asset TEXT,
quantity INTEGER,
divisible BOOL,
source TEXT,
issuer TEXT,
transfer BOOL,
callable BOOL,
call_date INTEGER,
call_price REAL,
description TEXT,
fee_paid INTEGER,
locked BOOL,
status TEXT,
FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON issuances (block_index)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
valid_asset_idx ON issuances (asset, status)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
status_idx ON issuances (status)
''')
cursor.execute('''CREATE INDEX IF NOT EXISTS
source_idx ON issuances (source)
''')
def validate (db, source, destination, asset, quantity, divisible, callable_, call_date, call_price, description, block_index):
problems = []
fee = 0
if asset in (config.BTC, config.XCP):
problems.append('cannot issue {} or {}'.format(config.BTC, config.XCP))
if call_date is None: call_date = 0
if call_price is None: call_price = 0.0
if description is None: description = ""
if divisible is None: divisible = True
if isinstance(call_price, int): call_price = float(call_price)
if not isinstance(quantity, int):
problems.append('quantity must be in satoshis')
return call_date, call_price, problems, fee, description, divisible, None
if call_date and not isinstance(call_date, int):
problems.append('call_date must be epoch integer')
return call_date, call_price, problems, fee, description, divisible, None
if call_price and not isinstance(call_price, float):
problems.append('call_price must be a float')
return call_date, call_price, problems, fee, description, divisible, None
if quantity < 0: problems.append('negative quantity')
if call_price < 0: problems.append('negative call price')
if call_date < 0: problems.append('negative call date')
if not callable_:
if block_index >= 312500 or config.TESTNET: call_date = 0
call_price = 0.0
elif block_index >= 310000: if call_date:
problems.append('call date for non‐callable asset')
if call_price:
problems.append('call price for non‐callable asset')
cursor = db.cursor()
cursor.execute('''SELECT * FROM issuances \
WHERE (status = ? AND asset = ?)
ORDER BY tx_index ASC''', ('valid', asset))
issuances = cursor.fetchall()
cursor.close()
if issuances:
reissuance = True
last_issuance = issuances[-1]
if last_issuance['issuer'] != source:
problems.append('issued by another address')
if bool(last_issuance['divisible']) != bool(divisible):
problems.append('cannot change divisibility')
if bool(last_issuance['callable']) != bool(callable_):
problems.append('cannot change callability')
if last_issuance['call_date'] > call_date and (call_date != 0 or (block_index < 312500 and not config.TESTNET)):
problems.append('cannot advance call date')
if last_issuance['call_price'] > call_price:
problems.append('cannot reduce call price')
if last_issuance['locked'] and quantity:
problems.append('locked asset and non‐zero quantity')
else:
reissuance = False
if description.lower() == 'lock':
problems.append('cannot lock a non‐existent asset')
if destination:
problems.append('cannot transfer a non‐existent asset')
if quantity or (block_index >= 315000 or config.TESTNET): if not reissuance or (block_index < 310000 and not config.TESTNET): cursor = db.cursor()
cursor.execute('''SELECT * FROM balances \
WHERE (address = ? AND asset = ?)''', (source, config.XCP))
balances = cursor.fetchall()
cursor.close()
if util.enabled('numeric_asset_names'): if len(asset) > config.NAMED_ASSET_MAXLEN:
fee = 0
else:
fee = int(0.5 * config.UNIT)
elif block_index >= 291700 or config.TESTNET: fee = int(0.5 * config.UNIT)
elif block_index >= 286000 or config.TESTNET: fee = 5 * config.UNIT
elif block_index > 281236 or config.TESTNET: fee = 5
if fee and (not balances or balances[0]['quantity'] < fee):
problems.append('insufficient funds')
if not (block_index >= 317500 or config.TESTNET): if len(description) > 42:
problems.append('description too long')
call_date = min(call_date, config.MAX_INT)
total = sum([issuance['quantity'] for issuance in issuances])
assert isinstance(quantity, int)
if total + quantity > config.MAX_INT:
problems.append('total quantity overflow')
if destination and quantity:
problems.append('cannot issue and transfer simultaneously')
return call_date, call_price, problems, fee, description, divisible, reissuance
def compose (db, source, transfer_destination, asset, quantity, divisible, description):
cursor = db.cursor()
cursor.execute('''SELECT * FROM issuances \
WHERE (status = ? AND asset = ?)
ORDER BY tx_index ASC''', ('valid', asset))
issuances = cursor.fetchall()
if issuances:
last_issuance = issuances[-1]
callable_ = last_issuance['callable']
call_date = last_issuance['call_date']
call_price = last_issuance['call_price']
else:
callable_ = False
call_date = 0
call_price = 0.0
cursor.close()
call_date, call_price, problems, fee, description, divisible, reissuance = validate(db, source, transfer_destination, asset, quantity, divisible, callable_, call_date, call_price, description, util.CURRENT_BLOCK_INDEX)
if problems: raise exceptions.ComposeError(problems)
asset_id = util.generate_asset_id(asset, util.CURRENT_BLOCK_INDEX)
data = struct.pack(config.TXTYPE_FORMAT, ID)
if len(description) <= 42:
curr_format = FORMAT_2 + '{}p'.format(len(description) + 1)
else:
curr_format = FORMAT_2 + '{}s'.format(len(description))
data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if callable_ else 0,
call_date or 0, call_price or 0.0, description.encode('utf-8'))
if transfer_destination:
destination_outputs = [(transfer_destination, None)]
else:
destination_outputs = []
return (source, destination_outputs, data)
def parse (db, tx, message):
issuance_parse_cursor = db.cursor()
try:
if (tx['block_index'] > 283271 or config.TESTNET) and len(message) >= LENGTH_2: if len(message) - LENGTH_2 <= 42:
curr_format = FORMAT_2 + '{}p'.format(len(message) - LENGTH_2)
else:
curr_format = FORMAT_2 + '{}s'.format(len(message) - LENGTH_2)
asset_id, quantity, divisible, callable_, call_date, call_price, description = struct.unpack(curr_format, message)
call_price = round(call_price, 6) try:
description = description.decode('utf-8')
except UnicodeDecodeError:
description = ''
else:
if len(message) != LENGTH_1:
raise exceptions.UnpackError
asset_id, quantity, divisible = struct.unpack(FORMAT_1, message)
callable_, call_date, call_price, description = False, 0, 0.0, ''
try:
asset = util.generate_asset_name(asset_id, tx['block_index'])
except exceptions.AssetNameError:
asset = None
status = 'invalid: bad asset name'
status = 'valid'
except exceptions.UnpackError as e:
asset, quantity, divisible, callable_, call_date, call_price, description = None, None, None, None, None, None, None
status = 'invalid: could not unpack'
fee = 0
if status == 'valid':
call_date, call_price, problems, fee, description, divisible, reissuance = validate(db, tx['source'], tx['destination'], asset, quantity, divisible, callable_, call_date, call_price, description, block_index=tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
if 'total quantity overflow' in problems:
quantity = 0
if tx['destination']:
issuer = tx['destination']
transfer = True
quantity = 0
else:
issuer = tx['source']
transfer = False
if status == 'valid':
util.debit(db, tx['source'], config.XCP, fee, action="issuance fee", event=tx['tx_hash'])
lock = False
if status == 'valid':
if description and description.lower() == 'lock':
lock = True
cursor = db.cursor()
issuances = list(cursor.execute('''SELECT * FROM issuances \
WHERE (status = ? AND asset = ?)
ORDER BY tx_index ASC''', ('valid', asset)))
cursor.close()
description = issuances[-1]['description'] timestamp, value_int, fee_fraction_int = None, None, None
if not reissuance:
bindings= {
'asset_id': str(asset_id),
'asset_name': str(asset),
'block_index': tx['block_index'],
}
sql='insert into assets values(:asset_id, :asset_name, :block_index)'
issuance_parse_cursor.execute(sql, bindings)
bindings= {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'asset': asset,
'quantity': quantity,
'divisible': divisible,
'source': tx['source'],
'issuer': issuer,
'transfer': transfer,
'callable': callable_,
'call_date': call_date,
'call_price': call_price,
'description': description,
'fee_paid': fee,
'locked': lock,
'status': status,
}
sql='insert into issuances values(:tx_index, :tx_hash, :block_index, :asset, :quantity, :divisible, :source, :issuer, :transfer, :callable, :call_date, :call_price, :description, :fee_paid, :locked, :status)'
issuance_parse_cursor.execute(sql, bindings)
if status == 'valid' and quantity:
util.credit(db, tx['source'], asset, quantity, action="issuance", event=tx['tx_hash'])
issuance_parse_cursor.close()
| true
| true
|
1c48e156aedf36e3dbe9148bea6fd63d46a9b547
| 160
|
py
|
Python
|
example/__init__.py
|
roberto-prevato-test-org/GitHubActionsLab
|
d74029bb6c3b09735f6ef55784cc7d3c5b94e58e
|
[
"MIT"
] | 1
|
2020-01-31T05:04:45.000Z
|
2020-01-31T05:04:45.000Z
|
example/__init__.py
|
roberto-prevato-test-org/GitHubActionsLab
|
d74029bb6c3b09735f6ef55784cc7d3c5b94e58e
|
[
"MIT"
] | 6
|
2020-02-05T07:10:44.000Z
|
2020-06-06T20:00:09.000Z
|
example/__init__.py
|
RobertoPrevato/GitHubActionsLab
|
d74029bb6c3b09735f6ef55784cc7d3c5b94e58e
|
[
"MIT"
] | null | null | null |
class Foo:
def __init__(self):
...
def not_tested(self) -> Ellipsis:
return ...
def __str__(self) -> str:
return 'foo'
| 12.307692
| 37
| 0.5
|
class Foo:
def __init__(self):
...
def not_tested(self) -> Ellipsis:
return ...
def __str__(self) -> str:
return 'foo'
| true
| true
|
1c48e2dfb424d480b636e141144ad4ac767afbd8
| 52,008
|
py
|
Python
|
rllib/agents/trainer.py
|
AnesBenmerzoug/ray
|
5921e87ecd4e359fad60dab55f45855456d591e5
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/trainer.py
|
AnesBenmerzoug/ray
|
5921e87ecd4e359fad60dab55f45855456d591e5
|
[
"Apache-2.0"
] | null | null | null |
rllib/agents/trainer.py
|
AnesBenmerzoug/ray
|
5921e87ecd4e359fad60dab55f45855456d591e5
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import numpy as np
import copy
import logging
import math
import os
import pickle
import time
import tempfile
from typing import Callable, Dict, List, Optional, Type, Union
import ray
from ray.exceptions import RayError
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env.normalize_actions import NormalizeActionWrapper
from ray.rllib.env.env_context import EnvContext
from ray.rllib.models import MODEL_DEFAULTS
from ray.rllib.policy import Policy
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.utils import FilterManager, deep_update, merge_dicts
from ray.rllib.utils.spaces import space_utils
from ray.rllib.utils.framework import try_import_tf, TensorStructType
from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.typing import TrainerConfigDict, \
PartialTrainerConfigDict, EnvInfoDict, ResultDict, EnvType, PolicyID
from ray.tune.registry import ENV_CREATOR, register_env, _global_registry
from ray.tune.trainable import Trainable
from ray.tune.trial import ExportFormat
from ray.tune.resources import Resources
from ray.tune.logger import Logger, UnifiedLogger
from ray.tune.result import DEFAULT_RESULTS_DIR
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
# Max number of times to retry a worker failure. We shouldn't try too many
# times in a row since that would indicate a persistent cluster issue.
MAX_WORKER_FAILURE_RETRIES = 3
# yapf: disable
# __sphinx_doc_begin__
COMMON_CONFIG: TrainerConfigDict = {
# === Settings for Rollout Worker processes ===
# Number of rollout worker actors to create for parallel sampling. Setting
# this to 0 will force rollouts to be done in the trainer actor.
"num_workers": 2,
# Number of environments to evaluate vectorwise per worker. This enables
# model inference batching, which can improve performance for inference
# bottlenecked workloads.
"num_envs_per_worker": 1,
# Divide episodes into fragments of this many steps each during rollouts.
# Sample batches of this size are collected from rollout workers and
# combined into a larger batch of `train_batch_size` for learning.
#
# For example, given rollout_fragment_length=100 and train_batch_size=1000:
# 1. RLlib collects 10 fragments of 100 steps each from rollout workers.
# 2. These fragments are concatenated and we perform an epoch of SGD.
#
# When using multiple envs per worker, the fragment size is multiplied by
# `num_envs_per_worker`. This is since we are collecting steps from
# multiple envs in parallel. For example, if num_envs_per_worker=5, then
# rollout workers will return experiences in chunks of 5*100 = 500 steps.
#
# The dataflow here can vary per algorithm. For example, PPO further
# divides the train batch into minibatches for multi-epoch SGD.
"rollout_fragment_length": 200,
# Whether to rollout "complete_episodes" or "truncate_episodes" to
# `rollout_fragment_length` length unrolls. Episode truncation guarantees
# evenly sized batches, but increases variance as the reward-to-go will
# need to be estimated at truncation boundaries.
"batch_mode": "truncate_episodes",
# === Settings for the Trainer process ===
# Number of GPUs to allocate to the trainer process. Note that not all
# algorithms can take advantage of trainer GPUs. This can be fractional
# (e.g., 0.3 GPUs).
"num_gpus": 0,
# Training batch size, if applicable. Should be >= rollout_fragment_length.
# Samples batches will be concatenated together to a batch of this size,
# which is then passed to SGD.
"train_batch_size": 200,
# Arguments to pass to the policy model. See models/catalog.py for a full
# list of the available model options.
"model": MODEL_DEFAULTS,
# Arguments to pass to the policy optimizer. These vary by optimizer.
"optimizer": {},
# === Environment Settings ===
# Discount factor of the MDP.
"gamma": 0.99,
# Number of steps after which the episode is forced to terminate. Defaults
# to `env.spec.max_episode_steps` (if present) for Gym envs.
"horizon": None,
# Calculate rewards but don't reset the environment when the horizon is
# hit. This allows value estimation and RNN state to span across logical
# episodes denoted by horizon. This only has an effect if horizon != inf.
"soft_horizon": False,
# Don't set 'done' at the end of the episode. Note that you still need to
# set this if soft_horizon=True, unless your env is actually running
# forever without returning done=True.
"no_done_at_end": False,
# Arguments to pass to the env creator.
"env_config": {},
# Environment name can also be passed via config.
"env": None,
# Unsquash actions to the upper and lower bounds of env's action space
"normalize_actions": False,
# Whether to clip rewards during Policy's postprocessing.
# None (default): Clip for Atari only (r=sign(r)).
# True: r=sign(r): Fixed rewards -1.0, 1.0, or 0.0.
# False: Never clip.
# [float value]: Clip at -value and + value.
# Tuple[value1, value2]: Clip at value1 and value2.
"clip_rewards": None,
# Whether to clip actions to the action space's low/high range spec.
"clip_actions": True,
# Whether to use "rllib" or "deepmind" preprocessors by default
"preprocessor_pref": "deepmind",
# The default learning rate.
"lr": 0.0001,
# === Debug Settings ===
# Whether to write episode stats and videos to the agent log dir. This is
# typically located in ~/ray_results.
"monitor": False,
# Set the ray.rllib.* log level for the agent process and its workers.
# Should be one of DEBUG, INFO, WARN, or ERROR. The DEBUG level will also
# periodically print out summaries of relevant internal dataflow (this is
# also printed out once at startup at the INFO level). When using the
# `rllib train` command, you can also use the `-v` and `-vv` flags as
# shorthand for INFO and DEBUG.
"log_level": "WARN",
# Callbacks that will be run during various phases of training. See the
# `DefaultCallbacks` class and `examples/custom_metrics_and_callbacks.py`
# for more usage information.
"callbacks": DefaultCallbacks,
# Whether to attempt to continue training if a worker crashes. The number
# of currently healthy workers is reported as the "num_healthy_workers"
# metric.
"ignore_worker_failures": False,
# Log system resource metrics to results. This requires `psutil` to be
# installed for sys stats, and `gputil` for GPU metrics.
"log_sys_usage": True,
# Use fake (infinite speed) sampler. For testing only.
"fake_sampler": False,
# === Deep Learning Framework Settings ===
# tf: TensorFlow
# tfe: TensorFlow eager
# torch: PyTorch
"framework": "tf",
# Enable tracing in eager mode. This greatly improves performance, but
# makes it slightly harder to debug since Python code won't be evaluated
# after the initial eager pass. Only possible if framework=tfe.
"eager_tracing": False,
# === Exploration Settings ===
# Default exploration behavior, iff `explore`=None is passed into
# compute_action(s).
# Set to False for no exploration behavior (e.g., for evaluation).
"explore": True,
# Provide a dict specifying the Exploration object's config.
"exploration_config": {
# The Exploration class to use. In the simplest case, this is the name
# (str) of any class present in the `rllib.utils.exploration` package.
# You can also provide the python class directly or the full location
# of your class (e.g. "ray.rllib.utils.exploration.epsilon_greedy.
# EpsilonGreedy").
"type": "StochasticSampling",
# Add constructor kwargs here (if any).
},
# === Evaluation Settings ===
# Evaluate with every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period. If using multiple
# evaluation workers, we will run at least this many episodes total.
"evaluation_num_episodes": 10,
# Internal flag that is set to True for evaluation workers.
"in_evaluation": False,
# Typical usage is to pass extra args to evaluation env creator
# and to disable exploration by computing deterministic actions.
# IMPORTANT NOTE: Policy gradient algorithms are able to find the optimal
# policy, even if this is a stochastic one. Setting "explore=False" here
# will result in the evaluation workers not using this optimal policy!
"evaluation_config": {
# Example: overriding env_config, exploration, etc:
# "env_config": {...},
# "explore": False
},
# Number of parallel workers to use for evaluation. Note that this is set
# to zero by default, which means evaluation will be run in the trainer
# process. If you increase this, it will increase the Ray resource usage
# of the trainer since evaluation workers are created separately from
# rollout workers.
"evaluation_num_workers": 0,
# Customize the evaluation method. This must be a function of signature
# (trainer: Trainer, eval_workers: WorkerSet) -> metrics: dict. See the
# Trainer._evaluate() method to see the default implementation. The
# trainer guarantees all eval workers have the latest policy state before
# this function is called.
"custom_eval_function": None,
# === Advanced Rollout Settings ===
# Use a background thread for sampling (slightly off-policy, usually not
# advisable to turn on unless your env specifically requires it).
"sample_async": False,
# Experimental flag to speed up sampling and use "trajectory views" as
# generic ModelV2 `input_dicts` that can be requested by the model to
# contain different information on the ongoing episode.
# NOTE: Only supported for PyTorch so far.
"_use_trajectory_view_api": False,
# Element-wise observation filter, either "NoFilter" or "MeanStdFilter".
"observation_filter": "NoFilter",
# Whether to synchronize the statistics of remote filters.
"synchronize_filters": True,
# Configures TF for single-process operation by default.
"tf_session_args": {
# note: overriden by `local_tf_session_args`
"intra_op_parallelism_threads": 2,
"inter_op_parallelism_threads": 2,
"gpu_options": {
"allow_growth": True,
},
"log_device_placement": False,
"device_count": {
"CPU": 1
},
"allow_soft_placement": True, # required by PPO multi-gpu
},
# Override the following tf session args on the local worker
"local_tf_session_args": {
# Allow a higher level of parallelism by default, but not unlimited
# since that can cause crashes with many concurrent drivers.
"intra_op_parallelism_threads": 8,
"inter_op_parallelism_threads": 8,
},
# Whether to LZ4 compress individual observations
"compress_observations": False,
# Wait for metric batches for at most this many seconds. Those that
# have not returned in time will be collected in the next train iteration.
"collect_metrics_timeout": 180,
# Smooth metrics over this many episodes.
"metrics_smoothing_episodes": 100,
# If using num_envs_per_worker > 1, whether to create those new envs in
# remote processes instead of in the same worker. This adds overheads, but
# can make sense if your envs can take much time to step / reset
# (e.g., for StarCraft). Use this cautiously; overheads are significant.
"remote_worker_envs": False,
# Timeout that remote workers are waiting when polling environments.
# 0 (continue when at least one env is ready) is a reasonable default,
# but optimal value could be obtained by measuring your environment
# step / reset and model inference perf.
"remote_env_batch_wait_ms": 0,
# Minimum time per train iteration (frequency of metrics reporting).
"min_iter_time_s": 0,
# Minimum env steps to optimize for per train call. This value does
# not affect learning, only the length of train iterations.
"timesteps_per_iteration": 0,
# This argument, in conjunction with worker_index, sets the random seed of
# each worker, so that identically configured trials will have identical
# results. This makes experiments reproducible.
"seed": None,
# Any extra python env vars to set in the trainer process, e.g.,
# {"OMP_NUM_THREADS": "16"}
"extra_python_environs_for_driver": {},
# The extra python environments need to set for worker processes.
"extra_python_environs_for_worker": {},
# === Advanced Resource Settings ===
# Number of CPUs to allocate per worker.
"num_cpus_per_worker": 1,
# Number of GPUs to allocate per worker. This can be fractional. This is
# usually needed only if your env itself requires a GPU (i.e., it is a
# GPU-intensive video game), or model inference is unusually expensive.
"num_gpus_per_worker": 0,
# Any custom Ray resources to allocate per worker.
"custom_resources_per_worker": {},
# Number of CPUs to allocate for the trainer. Note: this only takes effect
# when running in Tune. Otherwise, the trainer runs in the main program.
"num_cpus_for_driver": 1,
# You can set these memory quotas to tell Ray to reserve memory for your
# training run. This guarantees predictable execution, but the tradeoff is
# if your workload exceeeds the memory quota it will fail.
# Heap memory to reserve for the trainer process (0 for unlimited). This
# can be large if your are using large train batches, replay buffers, etc.
"memory": 0,
# Object store memory to reserve for the trainer process. Being large
# enough to fit a few copies of the model weights should be sufficient.
# This is enabled by default since models are typically quite small.
"object_store_memory": 0,
# Heap memory to reserve for each worker. Should generally be small unless
# your environment is very heavyweight.
"memory_per_worker": 0,
# Object store memory to reserve for each worker. This only needs to be
# large enough to fit a few sample batches at a time. This is enabled
# by default since it almost never needs to be larger than ~200MB.
"object_store_memory_per_worker": 0,
# === Offline Datasets ===
# Specify how to generate experiences:
# - "sampler": generate experiences via online simulation (default)
# - a local directory or file glob expression (e.g., "/tmp/*.json")
# - a list of individual file paths/URIs (e.g., ["/tmp/1.json",
# "s3://bucket/2.json"])
# - a dict with string keys and sampling probabilities as values (e.g.,
# {"sampler": 0.4, "/tmp/*.json": 0.4, "s3://bucket/expert.json": 0.2}).
# - a function that returns a rllib.offline.InputReader
"input": "sampler",
# Specify how to evaluate the current policy. This only has an effect when
# reading offline experiences. Available options:
# - "wis": the weighted step-wise importance sampling estimator.
# - "is": the step-wise importance sampling estimator.
# - "simulation": run the environment in the background, but use
# this data for evaluation only and not for learning.
"input_evaluation": ["is", "wis"],
# Whether to run postprocess_trajectory() on the trajectory fragments from
# offline inputs. Note that postprocessing will be done using the *current*
# policy, not the *behavior* policy, which is typically undesirable for
# on-policy algorithms.
"postprocess_inputs": False,
# If positive, input batches will be shuffled via a sliding window buffer
# of this number of batches. Use this if the input data is not in random
# enough order. Input is delayed until the shuffle buffer is filled.
"shuffle_buffer_size": 0,
# Specify where experiences should be saved:
# - None: don't save any experiences
# - "logdir" to save to the agent log dir
# - a path/URI to save to a custom output directory (e.g., "s3://bucket/")
# - a function that returns a rllib.offline.OutputWriter
"output": None,
# What sample batch columns to LZ4 compress in the output data.
"output_compress_columns": ["obs", "new_obs"],
# Max output file size before rolling over to a new file.
"output_max_file_size": 64 * 1024 * 1024,
# === Settings for Multi-Agent Environments ===
"multiagent": {
# Map of type MultiAgentPolicyConfigDict from policy ids to tuples
# of (policy_cls, obs_space, act_space, config). This defines the
# observation and action spaces of the policies and any extra config.
"policies": {},
# Function mapping agent ids to policy ids.
"policy_mapping_fn": None,
# Optional list of policies to train, or None for all policies.
"policies_to_train": None,
# Optional function that can be used to enhance the local agent
# observations to include more state.
# See rllib/evaluation/observation_function.py for more info.
"observation_fn": None,
# When replay_mode=lockstep, RLlib will replay all the agent
# transitions at a particular timestep together in a batch. This allows
# the policy to implement differentiable shared computations between
# agents it controls at that timestep. When replay_mode=independent,
# transitions are replayed independently per policy.
"replay_mode": "independent",
},
# === Logger ===
# Define logger-specific configuration to be used inside Logger
# Default value None allows overwriting with nested dicts
"logger_config": None,
# === Replay Settings ===
# The number of contiguous environment steps to replay at once. This may
# be set to greater than 1 to support recurrent models.
"replay_sequence_length": 1,
}
# __sphinx_doc_end__
# yapf: enable
@DeveloperAPI
def with_common_config(
extra_config: PartialTrainerConfigDict) -> TrainerConfigDict:
"""Returns the given config dict merged with common agent confs.
Args:
extra_config (PartialTrainerConfigDict): A user defined partial config
which will get merged with COMMON_CONFIG and returned.
Returns:
TrainerConfigDict: The merged config dict resulting of COMMON_CONFIG
plus `extra_config`.
"""
return Trainer.merge_trainer_configs(
COMMON_CONFIG, extra_config, _allow_unknown_configs=True)
@PublicAPI
class Trainer(Trainable):
"""A trainer coordinates the optimization of one or more RL policies.
All RLlib trainers extend this base class, e.g., the A3CTrainer implements
the A3C algorithm for single and multi-agent training.
Trainer objects retain internal model state between calls to train(), so
you should create a new trainer instance for each training session.
Attributes:
env_creator (func): Function that creates a new training env.
config (obj): Algorithm-specific configuration data.
logdir (str): Directory in which training outputs should be placed.
"""
# Whether to allow unknown top-level config keys.
_allow_unknown_configs = False
# List of top-level keys with value=dict, for which new sub-keys are
# allowed to be added to the value dict.
_allow_unknown_subkeys = [
"tf_session_args", "local_tf_session_args", "env_config", "model",
"optimizer", "multiagent", "custom_resources_per_worker",
"evaluation_config", "exploration_config",
"extra_python_environs_for_driver", "extra_python_environs_for_worker"
]
# List of top level keys with value=dict, for which we always override the
# entire value (dict), iff the "type" key in that value dict changes.
_override_all_subkeys_if_type_changes = ["exploration_config"]
@PublicAPI
def __init__(self,
config: TrainerConfigDict = None,
env: str = None,
logger_creator: Callable[[], Logger] = None):
"""Initialize an RLLib trainer.
Args:
config (dict): Algorithm-specific configuration data.
env (str): Name of the environment to use. Note that this can also
be specified as the `env` key in config.
logger_creator (func): Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
"""
# User provided config (this is w/o the default Trainer's
# `COMMON_CONFIG` (see above)). Will get merged with COMMON_CONFIG
# in self.setup().
config = config or {}
# Vars to synchronize to workers on each train call
self.global_vars = {"timestep": 0}
# Trainers allow env ids to be passed directly to the constructor.
self._env_id = self._register_if_needed(env or config.get("env"))
# Create a default logger creator if no logger_creator is specified
if logger_creator is None:
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_{}_{}".format(self._name, self._env_id,
timestr)
def default_logger_creator(config):
"""Creates a Unified logger with a default logdir prefix
containing the agent name and the env id
"""
if not os.path.exists(DEFAULT_RESULTS_DIR):
os.makedirs(DEFAULT_RESULTS_DIR)
logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
return UnifiedLogger(config, logdir, loggers=None)
logger_creator = default_logger_creator
super().__init__(config, logger_creator)
@classmethod
@override(Trainable)
def default_resource_request(
cls, config: PartialTrainerConfigDict) -> Resources:
cf = dict(cls._default_config, **config)
Trainer._validate_config(cf)
num_workers = cf["num_workers"] + cf["evaluation_num_workers"]
# TODO(ekl): add custom resources here once tune supports them
return Resources(
cpu=cf["num_cpus_for_driver"],
gpu=cf["num_gpus"],
memory=cf["memory"],
object_store_memory=cf["object_store_memory"],
extra_cpu=cf["num_cpus_per_worker"] * num_workers,
extra_gpu=cf["num_gpus_per_worker"] * num_workers,
extra_memory=cf["memory_per_worker"] * num_workers,
extra_object_store_memory=cf["object_store_memory_per_worker"] *
num_workers)
@override(Trainable)
@PublicAPI
def train(self) -> ResultDict:
"""Overrides super.train to synchronize global vars."""
result = None
for _ in range(1 + MAX_WORKER_FAILURE_RETRIES):
try:
result = Trainable.train(self)
except RayError as e:
if self.config["ignore_worker_failures"]:
logger.exception(
"Error in train call, attempting to recover")
self._try_recover()
else:
logger.info(
"Worker crashed during call to train(). To attempt to "
"continue training without the failed worker, set "
"`'ignore_worker_failures': True`.")
raise e
except Exception as e:
time.sleep(0.5) # allow logs messages to propagate
raise e
else:
break
if result is None:
raise RuntimeError("Failed to recover from worker crash")
if hasattr(self, "workers") and isinstance(self.workers, WorkerSet):
self._sync_filters_if_needed(self.workers)
if self.config["evaluation_interval"] == 1 or (
self._iteration > 0 and self.config["evaluation_interval"]
and self._iteration % self.config["evaluation_interval"] == 0):
evaluation_metrics = self._evaluate()
assert isinstance(evaluation_metrics, dict), \
"_evaluate() needs to return a dict."
result.update(evaluation_metrics)
return result
def _sync_filters_if_needed(self, workers: WorkerSet):
if self.config.get("observation_filter", "NoFilter") != "NoFilter":
FilterManager.synchronize(
workers.local_worker().filters,
workers.remote_workers(),
update_remote=self.config["synchronize_filters"])
logger.debug("synchronized filters: {}".format(
workers.local_worker().filters))
@override(Trainable)
def log_result(self, result: ResultDict):
self.callbacks.on_train_result(trainer=self, result=result)
# log after the callback is invoked, so that the user has a chance
# to mutate the result
Trainable.log_result(self, result)
@override(Trainable)
def setup(self, config: PartialTrainerConfigDict):
env = self._env_id
if env:
config["env"] = env
# An already registered env.
if _global_registry.contains(ENV_CREATOR, env):
self.env_creator = _global_registry.get(ENV_CREATOR, env)
# A class specifier.
elif "." in env:
self.env_creator = \
lambda env_config: from_config(env, env_config)
# Try gym.
else:
import gym # soft dependency
self.env_creator = \
lambda env_config: gym.make(env, **env_config)
else:
self.env_creator = lambda env_config: None
# Merge the supplied config with the class default, but store the
# user-provided one.
self.raw_user_config = config
self.config = Trainer.merge_trainer_configs(self._default_config,
config)
# Check and resolve DL framework settings.
# Enable eager/tracing support.
if tf1 and self.config["framework"] in ["tf2", "tfe"]:
if self.config["framework"] == "tf2" and tfv < 2:
raise ValueError("`framework`=tf2, but tf-version is < 2.0!")
if not tf1.executing_eagerly():
tf1.enable_eager_execution()
logger.info("Executing eagerly, with eager_tracing={}".format(
self.config["eager_tracing"]))
if tf1 and not tf1.executing_eagerly() and \
self.config["framework"] != "torch":
logger.info("Tip: set framework=tfe or the --eager flag to enable "
"TensorFlow eager execution")
if self.config["normalize_actions"]:
inner = self.env_creator
def normalize(env):
import gym # soft dependency
if not isinstance(env, gym.Env):
raise ValueError(
"Cannot apply NormalizeActionActionWrapper to env of "
"type {}, which does not subclass gym.Env.", type(env))
return NormalizeActionWrapper(env)
self.env_creator = lambda env_config: normalize(inner(env_config))
Trainer._validate_config(self.config)
if not callable(self.config["callbacks"]):
raise ValueError(
"`callbacks` must be a callable method that "
"returns a subclass of DefaultCallbacks, got {}".format(
self.config["callbacks"]))
self.callbacks = self.config["callbacks"]()
log_level = self.config.get("log_level")
if log_level in ["WARN", "ERROR"]:
logger.info("Current log_level is {}. For more information, "
"set 'log_level': 'INFO' / 'DEBUG' or use the -v and "
"-vv flags.".format(log_level))
if self.config.get("log_level"):
logging.getLogger("ray.rllib").setLevel(self.config["log_level"])
def get_scope():
if tf1 and not tf1.executing_eagerly():
return tf1.Graph().as_default()
else:
return open(os.devnull) # fake a no-op scope
with get_scope():
self._init(self.config, self.env_creator)
# Evaluation setup.
if self.config.get("evaluation_interval"):
# Update env_config with evaluation settings:
extra_config = copy.deepcopy(self.config["evaluation_config"])
# Assert that user has not unset "in_evaluation".
assert "in_evaluation" not in extra_config or \
extra_config["in_evaluation"] is True
extra_config.update({
"batch_mode": "complete_episodes",
"rollout_fragment_length": 1,
"in_evaluation": True,
})
logger.debug(
"using evaluation_config: {}".format(extra_config))
self.evaluation_workers = self._make_workers(
self.env_creator,
self._policy_class,
merge_dicts(self.config, extra_config),
num_workers=self.config["evaluation_num_workers"])
self.evaluation_metrics = {}
@override(Trainable)
def cleanup(self):
if hasattr(self, "workers"):
self.workers.stop()
if hasattr(self, "optimizer") and self.optimizer:
self.optimizer.stop()
@override(Trainable)
def save_checkpoint(self, checkpoint_dir: str) -> str:
checkpoint_path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(self.iteration))
pickle.dump(self.__getstate__(), open(checkpoint_path, "wb"))
return checkpoint_path
@override(Trainable)
def load_checkpoint(self, checkpoint_path: str):
extra_data = pickle.load(open(checkpoint_path, "rb"))
self.__setstate__(extra_data)
@DeveloperAPI
def _make_workers(self, env_creator: Callable[[EnvContext], EnvType],
policy_class: Type[Policy], config: TrainerConfigDict,
num_workers: int) -> WorkerSet:
"""Default factory method for a WorkerSet running under this Trainer.
Override this method by passing a custom `make_workers` into
`build_trainer`.
Args:
env_creator (callable): A function that return and Env given an env
config.
policy (Type[Policy]): The Policy class to use for creating the
policies of the workers.
config (TrainerConfigDict): The Trainer's config.
num_workers (int): Number of remote rollout workers to create.
0 for local only.
Returns:
WorkerSet: The created WorkerSet.
"""
return WorkerSet(
env_creator=env_creator,
policy_class=policy_class,
trainer_config=config,
num_workers=num_workers,
logdir=self.logdir)
@DeveloperAPI
def _init(self, config: TrainerConfigDict,
env_creator: Callable[[EnvContext], EnvType]):
"""Subclasses should override this for custom initialization."""
raise NotImplementedError
@DeveloperAPI
def _evaluate(self) -> dict:
"""Evaluates current policy under `evaluation_config` settings.
Note that this default implementation does not do anything beyond
merging evaluation_config with the normal trainer config.
"""
self._before_evaluate()
# Broadcast the new policy weights to all evaluation workers.
logger.info("Synchronizing weights to evaluation workers.")
weights = ray.put(self.workers.local_worker().save())
self.evaluation_workers.foreach_worker(
lambda w: w.restore(ray.get(weights)))
self._sync_filters_if_needed(self.evaluation_workers)
if self.config["custom_eval_function"]:
logger.info("Running custom eval function {}".format(
self.config["custom_eval_function"]))
metrics = self.config["custom_eval_function"](
self, self.evaluation_workers)
if not metrics or not isinstance(metrics, dict):
raise ValueError("Custom eval function must return "
"dict of metrics, got {}.".format(metrics))
else:
logger.info("Evaluating current policy for {} episodes.".format(
self.config["evaluation_num_episodes"]))
if self.config["evaluation_num_workers"] == 0:
for _ in range(self.config["evaluation_num_episodes"]):
self.evaluation_workers.local_worker().sample()
else:
num_rounds = int(
math.ceil(self.config["evaluation_num_episodes"] /
self.config["evaluation_num_workers"]))
num_workers = len(self.evaluation_workers.remote_workers())
num_episodes = num_rounds * num_workers
for i in range(num_rounds):
logger.info("Running round {} of parallel evaluation "
"({}/{} episodes)".format(
i, (i + 1) * num_workers, num_episodes))
ray.get([
w.sample.remote()
for w in self.evaluation_workers.remote_workers()
])
metrics = collect_metrics(self.evaluation_workers.local_worker(),
self.evaluation_workers.remote_workers())
return {"evaluation": metrics}
@DeveloperAPI
def _before_evaluate(self):
"""Pre-evaluation callback."""
pass
@PublicAPI
def compute_action(self,
observation: TensorStructType,
state: List[TensorStructType] = None,
prev_action: TensorStructType = None,
prev_reward: float = None,
info: EnvInfoDict = None,
policy_id: PolicyID = DEFAULT_POLICY_ID,
full_fetch: bool = False,
explore: bool = None) -> TensorStructType:
"""Computes an action for the specified policy on the local Worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Args:
observation (TensorStructType): observation from the environment.
state (List[TensorStructType]): RNN hidden state, if any. If state
is not None, then all of compute_single_action(...) is returned
(computed action, rnn state(s), logits dictionary).
Otherwise compute_single_action(...)[0] is returned
(computed action).
prev_action (TensorStructType): Previous action value, if any.
prev_reward (float): Previous reward, if any.
info (EnvInfoDict): info object, if any
policy_id (PolicyID): Policy to query (only applies to
multi-agent).
full_fetch (bool): Whether to return extra action fetch results.
This is always set to True if RNN state is specified.
explore (bool): Whether to pick an exploitation or exploration
action (default: None -> use self.config["explore"]).
Returns:
any: The computed action if full_fetch=False, or
tuple: The full output of policy.compute_actions() if
full_fetch=True or we have an RNN-based Policy.
"""
if state is None:
state = []
preprocessed = self.workers.local_worker().preprocessors[
policy_id].transform(observation)
filtered_obs = self.workers.local_worker().filters[policy_id](
preprocessed, update=False)
# Figure out the current (sample) time step and pass it into Policy.
self.global_vars["timestep"] += 1
result = self.get_policy(policy_id).compute_single_action(
filtered_obs,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"],
explore=explore,
timestep=self.global_vars["timestep"])
if state or full_fetch:
return result
else:
return result[0] # backwards compatibility
def compute_actions(self,
observations,
state=None,
prev_action=None,
prev_reward=None,
info=None,
policy_id=DEFAULT_POLICY_ID,
full_fetch=False,
explore=None):
"""Computes an action for the specified policy on the local Worker.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Args:
observation (obj): observation from the environment.
state (dict): RNN hidden state, if any. If state is not None,
then all of compute_single_action(...) is returned
(computed action, rnn state(s), logits dictionary).
Otherwise compute_single_action(...)[0] is returned
(computed action).
prev_action (obj): previous action value, if any
prev_reward (int): previous reward, if any
info (dict): info object, if any
policy_id (str): Policy to query (only applies to multi-agent).
full_fetch (bool): Whether to return extra action fetch results.
This is always set to True if RNN state is specified.
explore (bool): Whether to pick an exploitation or exploration
action (default: None -> use self.config["explore"]).
Returns:
any: The computed action if full_fetch=False, or
tuple: The full output of policy.compute_actions() if
full_fetch=True or we have an RNN-based Policy.
"""
# Preprocess obs and states
stateDefined = state is not None
policy = self.get_policy(policy_id)
filtered_obs, filtered_state = [], []
for agent_id, ob in observations.items():
worker = self.workers.local_worker()
preprocessed = worker.preprocessors[policy_id].transform(ob)
filtered = worker.filters[policy_id](preprocessed, update=False)
filtered_obs.append(filtered)
if state is None:
continue
elif agent_id in state:
filtered_state.append(state[agent_id])
else:
filtered_state.append(policy.get_initial_state())
# Batch obs and states
obs_batch = np.stack(filtered_obs)
if state is None:
state = []
else:
state = list(zip(*filtered_state))
state = [np.stack(s) for s in state]
# Figure out the current (sample) time step and pass it into Policy.
self.global_vars["timestep"] += 1
# Batch compute actions
actions, states, infos = policy.compute_actions(
obs_batch,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"],
explore=explore,
timestep=self.global_vars["timestep"])
# Unbatch actions for the environment
atns, actions = space_utils.unbatch(actions), {}
for key, atn in zip(observations, atns):
actions[key] = atn
# Unbatch states into a dict
unbatched_states = {}
for idx, agent_id in enumerate(observations):
unbatched_states[agent_id] = [s[idx] for s in states]
# Return only actions or full tuple
if stateDefined or full_fetch:
return actions, unbatched_states, infos
else:
return actions
@property
def _name(self) -> str:
"""Subclasses should override this to declare their name."""
raise NotImplementedError
@property
def _default_config(self) -> TrainerConfigDict:
"""Subclasses should override this to declare their default config."""
raise NotImplementedError
@PublicAPI
def get_policy(self, policy_id: PolicyID = DEFAULT_POLICY_ID) -> Policy:
"""Return policy for the specified id, or None.
Args:
policy_id (str): id of policy to return.
"""
return self.workers.local_worker().get_policy(policy_id)
@PublicAPI
def get_weights(self, policies: List[PolicyID] = None) -> dict:
"""Return a dictionary of policy ids to weights.
Args:
policies (list): Optional list of policies to return weights for,
or None for all policies.
"""
return self.workers.local_worker().get_weights(policies)
@PublicAPI
def set_weights(self, weights: Dict[PolicyID, dict]):
"""Set policy weights by policy id.
Args:
weights (dict): Map of policy ids to weights to set.
"""
self.workers.local_worker().set_weights(weights)
@DeveloperAPI
def export_policy_model(self,
export_dir: str,
policy_id: PolicyID = DEFAULT_POLICY_ID):
"""Export policy model with given policy_id to local directory.
Args:
export_dir (string): Writable local directory.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_model("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_model(export_dir, policy_id)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir: str,
filename_prefix: str = "model",
policy_id: PolicyID = DEFAULT_POLICY_ID):
"""Export tensorflow policy model checkpoint to local directory.
Args:
export_dir (string): Writable local directory.
filename_prefix (string): file name prefix of checkpoint files.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_checkpoint("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_checkpoint(
export_dir, filename_prefix, policy_id)
@DeveloperAPI
def import_policy_model_from_h5(self,
import_file: str,
policy_id: PolicyID = DEFAULT_POLICY_ID):
"""Imports a policy's model with given policy_id from a local h5 file.
Args:
import_file (str): The h5 file to import from.
policy_id (string): Optional policy id to import into.
Example:
>>> trainer = MyTrainer()
>>> trainer.import_policy_model_from_h5("/tmp/weights.h5")
>>> for _ in range(10):
>>> trainer.train()
"""
self.workers.local_worker().import_policy_model_from_h5(
import_file, policy_id)
@DeveloperAPI
def collect_metrics(self,
selected_workers: List["ActorHandle"] = None) -> dict:
"""Collects metrics from the remote workers of this agent.
This is the same data as returned by a call to train().
"""
return self.optimizer.collect_metrics(
self.config["collect_metrics_timeout"],
min_history=self.config["metrics_smoothing_episodes"],
selected_workers=selected_workers)
@classmethod
def resource_help(cls, config: TrainerConfigDict) -> str:
return ("\n\nYou can adjust the resource requests of RLlib agents by "
"setting `num_workers`, `num_gpus`, and other configs. See "
"the DEFAULT_CONFIG defined by each agent for more info.\n\n"
"The config of this agent is: {}".format(config))
@classmethod
def merge_trainer_configs(cls,
config1: TrainerConfigDict,
config2: PartialTrainerConfigDict,
_allow_unknown_configs: Optional[bool] = None
) -> TrainerConfigDict:
config1 = copy.deepcopy(config1)
if "callbacks" in config2 and type(config2["callbacks"]) is dict:
legacy_callbacks_dict = config2["callbacks"]
def make_callbacks():
# Deprecation warning will be logged by DefaultCallbacks.
return DefaultCallbacks(
legacy_callbacks_dict=legacy_callbacks_dict)
config2["callbacks"] = make_callbacks
if _allow_unknown_configs is None:
_allow_unknown_configs = cls._allow_unknown_configs
return deep_update(config1, config2, _allow_unknown_configs,
cls._allow_unknown_subkeys,
cls._override_all_subkeys_if_type_changes)
@staticmethod
def _validate_config(config: PartialTrainerConfigDict):
if config.get("_use_trajectory_view_api") and \
config.get("framework") != "torch":
raise ValueError(
"`_use_trajectory_view_api` only supported for PyTorch so "
"far!")
elif not config.get("_use_trajectory_view_api") and \
config.get("model", {}).get("_time_major"):
raise ValueError("`model._time_major` only supported "
"iff `_use_trajectory_view_api` is True!")
if type(config["input_evaluation"]) != list:
raise ValueError(
"`input_evaluation` must be a list of strings, got {}".format(
config["input_evaluation"]))
def _try_recover(self):
"""Try to identify and remove any unhealthy workers.
This method is called after an unexpected remote error is encountered
from a worker. It issues check requests to all current workers and
removes any that respond with error. If no healthy workers remain,
an error is raised.
"""
assert hasattr(self, "execution_plan")
workers = self.workers
logger.info("Health checking all workers...")
checks = []
for ev in workers.remote_workers():
_, obj_ref = ev.sample_with_count.remote()
checks.append(obj_ref)
healthy_workers = []
for i, obj_ref in enumerate(checks):
w = workers.remote_workers()[i]
try:
ray.get(obj_ref)
healthy_workers.append(w)
logger.info("Worker {} looks healthy".format(i + 1))
except RayError:
logger.exception("Removing unhealthy worker {}".format(i + 1))
try:
w.__ray_terminate__.remote()
except Exception:
logger.exception("Error terminating unhealthy worker")
if len(healthy_workers) < 1:
raise RuntimeError(
"Not enough healthy workers remain to continue.")
logger.warning("Recreating execution plan after failure")
workers.reset(healthy_workers)
self.train_exec_impl = self.execution_plan(workers, self.config)
@override(Trainable)
def _export_model(self, export_formats: List[str],
export_dir: str) -> Dict[str, str]:
ExportFormat.validate(export_formats)
exported = {}
if ExportFormat.CHECKPOINT in export_formats:
path = os.path.join(export_dir, ExportFormat.CHECKPOINT)
self.export_policy_checkpoint(path)
exported[ExportFormat.CHECKPOINT] = path
if ExportFormat.MODEL in export_formats:
path = os.path.join(export_dir, ExportFormat.MODEL)
self.export_policy_model(path)
exported[ExportFormat.MODEL] = path
return exported
def import_model(self, import_file: str):
"""Imports a model from import_file.
Note: Currently, only h5 files are supported.
Args:
import_file (str): The file to import the model from.
Returns:
A dict that maps ExportFormats to successfully exported models.
"""
# Check for existence.
if not os.path.exists(import_file):
raise FileNotFoundError(
"`import_file` '{}' does not exist! Can't import Model.".
format(import_file))
# Get the format of the given file.
import_format = "h5" # TODO(sven): Support checkpoint loading.
ExportFormat.validate([import_format])
if import_format != ExportFormat.H5:
raise NotImplementedError
else:
return self.import_policy_model_from_h5(import_file)
def __getstate__(self) -> dict:
state = {}
if hasattr(self, "workers"):
state["worker"] = self.workers.local_worker().save()
if hasattr(self, "optimizer") and hasattr(self.optimizer, "save"):
state["optimizer"] = self.optimizer.save()
return state
def __setstate__(self, state: dict):
if "worker" in state:
self.workers.local_worker().restore(state["worker"])
remote_state = ray.put(state["worker"])
for r in self.workers.remote_workers():
r.restore.remote(remote_state)
if "optimizer" in state:
self.optimizer.restore(state["optimizer"])
@staticmethod
def with_updates(**overrides) -> Type["Trainer"]:
raise NotImplementedError(
"`with_updates` may only be called on Trainer sub-classes "
"that were generated via the `ray.rllib.agents.trainer_template."
"build_trainer()` function!")
def _register_if_needed(self, env_object: Union[str, EnvType]):
if isinstance(env_object, str):
return env_object
elif isinstance(env_object, type):
name = env_object.__name__
register_env(name, lambda config: env_object(config))
return name
raise ValueError(
"{} is an invalid env specification. ".format(env_object) +
"You can specify a custom env as either a class "
"(e.g., YourEnvCls) or a registered env id (e.g., \"your_env\").")
| 44.149406
| 79
| 0.633037
|
from datetime import datetime
import numpy as np
import copy
import logging
import math
import os
import pickle
import time
import tempfile
from typing import Callable, Dict, List, Optional, Type, Union
import ray
from ray.exceptions import RayError
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env.normalize_actions import NormalizeActionWrapper
from ray.rllib.env.env_context import EnvContext
from ray.rllib.models import MODEL_DEFAULTS
from ray.rllib.policy import Policy
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.utils import FilterManager, deep_update, merge_dicts
from ray.rllib.utils.spaces import space_utils
from ray.rllib.utils.framework import try_import_tf, TensorStructType
from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.typing import TrainerConfigDict, \
PartialTrainerConfigDict, EnvInfoDict, ResultDict, EnvType, PolicyID
from ray.tune.registry import ENV_CREATOR, register_env, _global_registry
from ray.tune.trainable import Trainable
from ray.tune.trial import ExportFormat
from ray.tune.resources import Resources
from ray.tune.logger import Logger, UnifiedLogger
from ray.tune.result import DEFAULT_RESULTS_DIR
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
# times in a row since that would indicate a persistent cluster issue.
MAX_WORKER_FAILURE_RETRIES = 3
# yapf: disable
# __sphinx_doc_begin__
COMMON_CONFIG: TrainerConfigDict = {
# === Settings for Rollout Worker processes ===
# Number of rollout worker actors to create for parallel sampling. Setting
# this to 0 will force rollouts to be done in the trainer actor.
"num_workers": 2,
# Number of environments to evaluate vectorwise per worker. This enables
# model inference batching, which can improve performance for inference
# bottlenecked workloads.
"num_envs_per_worker": 1,
# Divide episodes into fragments of this many steps each during rollouts.
# Sample batches of this size are collected from rollout workers and
# combined into a larger batch of `train_batch_size` for learning.
#
# For example, given rollout_fragment_length=100 and train_batch_size=1000:
# 1. RLlib collects 10 fragments of 100 steps each from rollout workers.
# 2. These fragments are concatenated and we perform an epoch of SGD.
#
# When using multiple envs per worker, the fragment size is multiplied by
# `num_envs_per_worker`. This is since we are collecting steps from
# multiple envs in parallel. For example, if num_envs_per_worker=5, then
# rollout workers will return experiences in chunks of 5*100 = 500 steps.
#
# The dataflow here can vary per algorithm. For example, PPO further
# divides the train batch into minibatches for multi-epoch SGD.
"rollout_fragment_length": 200,
# Whether to rollout "complete_episodes" or "truncate_episodes" to
# `rollout_fragment_length` length unrolls. Episode truncation guarantees
# evenly sized batches, but increases variance as the reward-to-go will
# need to be estimated at truncation boundaries.
"batch_mode": "truncate_episodes",
# === Settings for the Trainer process ===
# Number of GPUs to allocate to the trainer process. Note that not all
# algorithms can take advantage of trainer GPUs. This can be fractional
# (e.g., 0.3 GPUs).
"num_gpus": 0,
# Training batch size, if applicable. Should be >= rollout_fragment_length.
# Samples batches will be concatenated together to a batch of this size,
# which is then passed to SGD.
"train_batch_size": 200,
# Arguments to pass to the policy model. See models/catalog.py for a full
# list of the available model options.
"model": MODEL_DEFAULTS,
# Arguments to pass to the policy optimizer. These vary by optimizer.
"optimizer": {},
# === Environment Settings ===
# Discount factor of the MDP.
"gamma": 0.99,
# Number of steps after which the episode is forced to terminate. Defaults
# to `env.spec.max_episode_steps` (if present) for Gym envs.
"horizon": None,
# Calculate rewards but don't reset the environment when the horizon is
"soft_horizon": False,
# set this if soft_horizon=True, unless your env is actually running
# forever without returning done=True.
"no_done_at_end": False,
# Arguments to pass to the env creator.
"env_config": {},
# Environment name can also be passed via config.
"env": None,
# Unsquash actions to the upper and lower bounds of env's action space
"normalize_actions": False,
# None (default): Clip for Atari only (r=sign(r)).
# True: r=sign(r): Fixed rewards -1.0, 1.0, or 0.0.
# False: Never clip.
# [float value]: Clip at -value and + value.
# Tuple[value1, value2]: Clip at value1 and value2.
"clip_rewards": None,
# Whether to clip actions to the action space's low/high range spec.
"clip_actions": True,
"preprocessor_pref": "deepmind",
"lr": 0.0001,
"monitor": False,
"log_level": "WARN",
"callbacks": DefaultCallbacks,
"ignore_worker_failures": False,
"log_sys_usage": True,
"fake_sampler": False,
"framework": "tf",
# after the initial eager pass. Only possible if framework=tfe.
"eager_tracing": False,
# === Exploration Settings ===
# Default exploration behavior, iff `explore`=None is passed into
# compute_action(s).
# Set to False for no exploration behavior (e.g., for evaluation).
"explore": True,
# Provide a dict specifying the Exploration object's config.
"exploration_config": {
# EpsilonGreedy").
"type": "StochasticSampling",
},
"evaluation_interval": None,
"evaluation_num_episodes": 10,
"in_evaluation": False,
"evaluation_config": {
},
"evaluation_num_workers": 0,
"custom_eval_function": None,
"sample_async": False,
"_use_trajectory_view_api": False,
"observation_filter": "NoFilter",
"synchronize_filters": True,
"tf_session_args": {
"intra_op_parallelism_threads": 2,
"inter_op_parallelism_threads": 2,
"gpu_options": {
"allow_growth": True,
},
"log_device_placement": False,
"device_count": {
"CPU": 1
},
"allow_soft_placement": True, },
"local_tf_session_args": {
"intra_op_parallelism_threads": 8,
"inter_op_parallelism_threads": 8,
},
"compress_observations": False,
"collect_metrics_timeout": 180,
"metrics_smoothing_episodes": 100,
"remote_worker_envs": False,
"remote_env_batch_wait_ms": 0,
"min_iter_time_s": 0,
"timesteps_per_iteration": 0,
"seed": None,
"extra_python_environs_for_driver": {},
"extra_python_environs_for_worker": {},
"num_cpus_per_worker": 1,
"num_gpus_per_worker": 0,
"custom_resources_per_worker": {},
"num_cpus_for_driver": 1,
"memory": 0,
"object_store_memory": 0,
"memory_per_worker": 0,
"object_store_memory_per_worker": 0,
"input": "sampler",
"input_evaluation": ["is", "wis"],
"postprocess_inputs": False,
"shuffle_buffer_size": 0,
# - "logdir" to save to the agent log dir
# - a path/URI to save to a custom output directory (e.g., "s3://bucket/")
# - a function that returns a rllib.offline.OutputWriter
"output": None,
# What sample batch columns to LZ4 compress in the output data.
"output_compress_columns": ["obs", "new_obs"],
# Max output file size before rolling over to a new file.
"output_max_file_size": 64 * 1024 * 1024,
# === Settings for Multi-Agent Environments ===
"multiagent": {
# Map of type MultiAgentPolicyConfigDict from policy ids to tuples
# of (policy_cls, obs_space, act_space, config). This defines the
# observation and action spaces of the policies and any extra config.
"policies": {},
# Function mapping agent ids to policy ids.
"policy_mapping_fn": None,
# Optional list of policies to train, or None for all policies.
"policies_to_train": None,
# Optional function that can be used to enhance the local agent
# observations to include more state.
# See rllib/evaluation/observation_function.py for more info.
"observation_fn": None,
# When replay_mode=lockstep, RLlib will replay all the agent
# transitions at a particular timestep together in a batch. This allows
# the policy to implement differentiable shared computations between
# agents it controls at that timestep. When replay_mode=independent,
# transitions are replayed independently per policy.
"replay_mode": "independent",
},
# === Logger ===
# Define logger-specific configuration to be used inside Logger
# Default value None allows overwriting with nested dicts
"logger_config": None,
# === Replay Settings ===
# The number of contiguous environment steps to replay at once. This may
# be set to greater than 1 to support recurrent models.
"replay_sequence_length": 1,
}
# __sphinx_doc_end__
# yapf: enable
@DeveloperAPI
def with_common_config(
extra_config: PartialTrainerConfigDict) -> TrainerConfigDict:
return Trainer.merge_trainer_configs(
COMMON_CONFIG, extra_config, _allow_unknown_configs=True)
@PublicAPI
class Trainer(Trainable):
# Whether to allow unknown top-level config keys.
_allow_unknown_configs = False
# List of top-level keys with value=dict, for which new sub-keys are
# allowed to be added to the value dict.
_allow_unknown_subkeys = [
"tf_session_args", "local_tf_session_args", "env_config", "model",
"optimizer", "multiagent", "custom_resources_per_worker",
"evaluation_config", "exploration_config",
"extra_python_environs_for_driver", "extra_python_environs_for_worker"
]
# List of top level keys with value=dict, for which we always override the
# entire value (dict), iff the "type" key in that value dict changes.
_override_all_subkeys_if_type_changes = ["exploration_config"]
@PublicAPI
def __init__(self,
config: TrainerConfigDict = None,
env: str = None,
logger_creator: Callable[[], Logger] = None):
# User provided config (this is w/o the default Trainer's
config = config or {}
self.global_vars = {"timestep": 0}
self._env_id = self._register_if_needed(env or config.get("env"))
if logger_creator is None:
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_{}_{}".format(self._name, self._env_id,
timestr)
def default_logger_creator(config):
if not os.path.exists(DEFAULT_RESULTS_DIR):
os.makedirs(DEFAULT_RESULTS_DIR)
logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
return UnifiedLogger(config, logdir, loggers=None)
logger_creator = default_logger_creator
super().__init__(config, logger_creator)
@classmethod
@override(Trainable)
def default_resource_request(
cls, config: PartialTrainerConfigDict) -> Resources:
cf = dict(cls._default_config, **config)
Trainer._validate_config(cf)
num_workers = cf["num_workers"] + cf["evaluation_num_workers"]
return Resources(
cpu=cf["num_cpus_for_driver"],
gpu=cf["num_gpus"],
memory=cf["memory"],
object_store_memory=cf["object_store_memory"],
extra_cpu=cf["num_cpus_per_worker"] * num_workers,
extra_gpu=cf["num_gpus_per_worker"] * num_workers,
extra_memory=cf["memory_per_worker"] * num_workers,
extra_object_store_memory=cf["object_store_memory_per_worker"] *
num_workers)
@override(Trainable)
@PublicAPI
def train(self) -> ResultDict:
result = None
for _ in range(1 + MAX_WORKER_FAILURE_RETRIES):
try:
result = Trainable.train(self)
except RayError as e:
if self.config["ignore_worker_failures"]:
logger.exception(
"Error in train call, attempting to recover")
self._try_recover()
else:
logger.info(
"Worker crashed during call to train(). To attempt to "
"continue training without the failed worker, set "
"`'ignore_worker_failures': True`.")
raise e
except Exception as e:
time.sleep(0.5) raise e
else:
break
if result is None:
raise RuntimeError("Failed to recover from worker crash")
if hasattr(self, "workers") and isinstance(self.workers, WorkerSet):
self._sync_filters_if_needed(self.workers)
if self.config["evaluation_interval"] == 1 or (
self._iteration > 0 and self.config["evaluation_interval"]
and self._iteration % self.config["evaluation_interval"] == 0):
evaluation_metrics = self._evaluate()
assert isinstance(evaluation_metrics, dict), \
"_evaluate() needs to return a dict."
result.update(evaluation_metrics)
return result
def _sync_filters_if_needed(self, workers: WorkerSet):
if self.config.get("observation_filter", "NoFilter") != "NoFilter":
FilterManager.synchronize(
workers.local_worker().filters,
workers.remote_workers(),
update_remote=self.config["synchronize_filters"])
logger.debug("synchronized filters: {}".format(
workers.local_worker().filters))
@override(Trainable)
def log_result(self, result: ResultDict):
self.callbacks.on_train_result(trainer=self, result=result)
Trainable.log_result(self, result)
@override(Trainable)
def setup(self, config: PartialTrainerConfigDict):
env = self._env_id
if env:
config["env"] = env
if _global_registry.contains(ENV_CREATOR, env):
self.env_creator = _global_registry.get(ENV_CREATOR, env)
elif "." in env:
self.env_creator = \
lambda env_config: from_config(env, env_config)
else:
import gym self.env_creator = \
lambda env_config: gym.make(env, **env_config)
else:
self.env_creator = lambda env_config: None
self.raw_user_config = config
self.config = Trainer.merge_trainer_configs(self._default_config,
config)
if tf1 and self.config["framework"] in ["tf2", "tfe"]:
if self.config["framework"] == "tf2" and tfv < 2:
raise ValueError("`framework`=tf2, but tf-version is < 2.0!")
if not tf1.executing_eagerly():
tf1.enable_eager_execution()
logger.info("Executing eagerly, with eager_tracing={}".format(
self.config["eager_tracing"]))
if tf1 and not tf1.executing_eagerly() and \
self.config["framework"] != "torch":
logger.info("Tip: set framework=tfe or the --eager flag to enable "
"TensorFlow eager execution")
if self.config["normalize_actions"]:
inner = self.env_creator
def normalize(env):
import gym if not isinstance(env, gym.Env):
raise ValueError(
"Cannot apply NormalizeActionActionWrapper to env of "
"type {}, which does not subclass gym.Env.", type(env))
return NormalizeActionWrapper(env)
self.env_creator = lambda env_config: normalize(inner(env_config))
Trainer._validate_config(self.config)
if not callable(self.config["callbacks"]):
raise ValueError(
"`callbacks` must be a callable method that "
"returns a subclass of DefaultCallbacks, got {}".format(
self.config["callbacks"]))
self.callbacks = self.config["callbacks"]()
log_level = self.config.get("log_level")
if log_level in ["WARN", "ERROR"]:
logger.info("Current log_level is {}. For more information, "
"set 'log_level': 'INFO' / 'DEBUG' or use the -v and "
"-vv flags.".format(log_level))
if self.config.get("log_level"):
logging.getLogger("ray.rllib").setLevel(self.config["log_level"])
def get_scope():
if tf1 and not tf1.executing_eagerly():
return tf1.Graph().as_default()
else:
return open(os.devnull)
with get_scope():
self._init(self.config, self.env_creator)
if self.config.get("evaluation_interval"):
extra_config = copy.deepcopy(self.config["evaluation_config"])
assert "in_evaluation" not in extra_config or \
extra_config["in_evaluation"] is True
extra_config.update({
"batch_mode": "complete_episodes",
"rollout_fragment_length": 1,
"in_evaluation": True,
})
logger.debug(
"using evaluation_config: {}".format(extra_config))
self.evaluation_workers = self._make_workers(
self.env_creator,
self._policy_class,
merge_dicts(self.config, extra_config),
num_workers=self.config["evaluation_num_workers"])
self.evaluation_metrics = {}
@override(Trainable)
def cleanup(self):
if hasattr(self, "workers"):
self.workers.stop()
if hasattr(self, "optimizer") and self.optimizer:
self.optimizer.stop()
@override(Trainable)
def save_checkpoint(self, checkpoint_dir: str) -> str:
checkpoint_path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(self.iteration))
pickle.dump(self.__getstate__(), open(checkpoint_path, "wb"))
return checkpoint_path
@override(Trainable)
def load_checkpoint(self, checkpoint_path: str):
extra_data = pickle.load(open(checkpoint_path, "rb"))
self.__setstate__(extra_data)
@DeveloperAPI
def _make_workers(self, env_creator: Callable[[EnvContext], EnvType],
policy_class: Type[Policy], config: TrainerConfigDict,
num_workers: int) -> WorkerSet:
return WorkerSet(
env_creator=env_creator,
policy_class=policy_class,
trainer_config=config,
num_workers=num_workers,
logdir=self.logdir)
@DeveloperAPI
def _init(self, config: TrainerConfigDict,
env_creator: Callable[[EnvContext], EnvType]):
raise NotImplementedError
@DeveloperAPI
def _evaluate(self) -> dict:
self._before_evaluate()
logger.info("Synchronizing weights to evaluation workers.")
weights = ray.put(self.workers.local_worker().save())
self.evaluation_workers.foreach_worker(
lambda w: w.restore(ray.get(weights)))
self._sync_filters_if_needed(self.evaluation_workers)
if self.config["custom_eval_function"]:
logger.info("Running custom eval function {}".format(
self.config["custom_eval_function"]))
metrics = self.config["custom_eval_function"](
self, self.evaluation_workers)
if not metrics or not isinstance(metrics, dict):
raise ValueError("Custom eval function must return "
"dict of metrics, got {}.".format(metrics))
else:
logger.info("Evaluating current policy for {} episodes.".format(
self.config["evaluation_num_episodes"]))
if self.config["evaluation_num_workers"] == 0:
for _ in range(self.config["evaluation_num_episodes"]):
self.evaluation_workers.local_worker().sample()
else:
num_rounds = int(
math.ceil(self.config["evaluation_num_episodes"] /
self.config["evaluation_num_workers"]))
num_workers = len(self.evaluation_workers.remote_workers())
num_episodes = num_rounds * num_workers
for i in range(num_rounds):
logger.info("Running round {} of parallel evaluation "
"({}/{} episodes)".format(
i, (i + 1) * num_workers, num_episodes))
ray.get([
w.sample.remote()
for w in self.evaluation_workers.remote_workers()
])
metrics = collect_metrics(self.evaluation_workers.local_worker(),
self.evaluation_workers.remote_workers())
return {"evaluation": metrics}
@DeveloperAPI
def _before_evaluate(self):
pass
@PublicAPI
def compute_action(self,
observation: TensorStructType,
state: List[TensorStructType] = None,
prev_action: TensorStructType = None,
prev_reward: float = None,
info: EnvInfoDict = None,
policy_id: PolicyID = DEFAULT_POLICY_ID,
full_fetch: bool = False,
explore: bool = None) -> TensorStructType:
if state is None:
state = []
preprocessed = self.workers.local_worker().preprocessors[
policy_id].transform(observation)
filtered_obs = self.workers.local_worker().filters[policy_id](
preprocessed, update=False)
self.global_vars["timestep"] += 1
result = self.get_policy(policy_id).compute_single_action(
filtered_obs,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"],
explore=explore,
timestep=self.global_vars["timestep"])
if state or full_fetch:
return result
else:
return result[0]
def compute_actions(self,
observations,
state=None,
prev_action=None,
prev_reward=None,
info=None,
policy_id=DEFAULT_POLICY_ID,
full_fetch=False,
explore=None):
stateDefined = state is not None
policy = self.get_policy(policy_id)
filtered_obs, filtered_state = [], []
for agent_id, ob in observations.items():
worker = self.workers.local_worker()
preprocessed = worker.preprocessors[policy_id].transform(ob)
filtered = worker.filters[policy_id](preprocessed, update=False)
filtered_obs.append(filtered)
if state is None:
continue
elif agent_id in state:
filtered_state.append(state[agent_id])
else:
filtered_state.append(policy.get_initial_state())
obs_batch = np.stack(filtered_obs)
if state is None:
state = []
else:
state = list(zip(*filtered_state))
state = [np.stack(s) for s in state]
self.global_vars["timestep"] += 1
actions, states, infos = policy.compute_actions(
obs_batch,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"],
explore=explore,
timestep=self.global_vars["timestep"])
atns, actions = space_utils.unbatch(actions), {}
for key, atn in zip(observations, atns):
actions[key] = atn
unbatched_states = {}
for idx, agent_id in enumerate(observations):
unbatched_states[agent_id] = [s[idx] for s in states]
if stateDefined or full_fetch:
return actions, unbatched_states, infos
else:
return actions
@property
def _name(self) -> str:
raise NotImplementedError
@property
def _default_config(self) -> TrainerConfigDict:
raise NotImplementedError
@PublicAPI
def get_policy(self, policy_id: PolicyID = DEFAULT_POLICY_ID) -> Policy:
return self.workers.local_worker().get_policy(policy_id)
@PublicAPI
def get_weights(self, policies: List[PolicyID] = None) -> dict:
return self.workers.local_worker().get_weights(policies)
@PublicAPI
def set_weights(self, weights: Dict[PolicyID, dict]):
self.workers.local_worker().set_weights(weights)
@DeveloperAPI
def export_policy_model(self,
export_dir: str,
policy_id: PolicyID = DEFAULT_POLICY_ID):
self.workers.local_worker().export_policy_model(export_dir, policy_id)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir: str,
filename_prefix: str = "model",
policy_id: PolicyID = DEFAULT_POLICY_ID):
self.workers.local_worker().export_policy_checkpoint(
export_dir, filename_prefix, policy_id)
@DeveloperAPI
def import_policy_model_from_h5(self,
import_file: str,
policy_id: PolicyID = DEFAULT_POLICY_ID):
self.workers.local_worker().import_policy_model_from_h5(
import_file, policy_id)
@DeveloperAPI
def collect_metrics(self,
selected_workers: List["ActorHandle"] = None) -> dict:
return self.optimizer.collect_metrics(
self.config["collect_metrics_timeout"],
min_history=self.config["metrics_smoothing_episodes"],
selected_workers=selected_workers)
@classmethod
def resource_help(cls, config: TrainerConfigDict) -> str:
return ("\n\nYou can adjust the resource requests of RLlib agents by "
"setting `num_workers`, `num_gpus`, and other configs. See "
"the DEFAULT_CONFIG defined by each agent for more info.\n\n"
"The config of this agent is: {}".format(config))
@classmethod
def merge_trainer_configs(cls,
config1: TrainerConfigDict,
config2: PartialTrainerConfigDict,
_allow_unknown_configs: Optional[bool] = None
) -> TrainerConfigDict:
config1 = copy.deepcopy(config1)
if "callbacks" in config2 and type(config2["callbacks"]) is dict:
legacy_callbacks_dict = config2["callbacks"]
def make_callbacks():
return DefaultCallbacks(
legacy_callbacks_dict=legacy_callbacks_dict)
config2["callbacks"] = make_callbacks
if _allow_unknown_configs is None:
_allow_unknown_configs = cls._allow_unknown_configs
return deep_update(config1, config2, _allow_unknown_configs,
cls._allow_unknown_subkeys,
cls._override_all_subkeys_if_type_changes)
@staticmethod
def _validate_config(config: PartialTrainerConfigDict):
if config.get("_use_trajectory_view_api") and \
config.get("framework") != "torch":
raise ValueError(
"`_use_trajectory_view_api` only supported for PyTorch so "
"far!")
elif not config.get("_use_trajectory_view_api") and \
config.get("model", {}).get("_time_major"):
raise ValueError("`model._time_major` only supported "
"iff `_use_trajectory_view_api` is True!")
if type(config["input_evaluation"]) != list:
raise ValueError(
"`input_evaluation` must be a list of strings, got {}".format(
config["input_evaluation"]))
def _try_recover(self):
assert hasattr(self, "execution_plan")
workers = self.workers
logger.info("Health checking all workers...")
checks = []
for ev in workers.remote_workers():
_, obj_ref = ev.sample_with_count.remote()
checks.append(obj_ref)
healthy_workers = []
for i, obj_ref in enumerate(checks):
w = workers.remote_workers()[i]
try:
ray.get(obj_ref)
healthy_workers.append(w)
logger.info("Worker {} looks healthy".format(i + 1))
except RayError:
logger.exception("Removing unhealthy worker {}".format(i + 1))
try:
w.__ray_terminate__.remote()
except Exception:
logger.exception("Error terminating unhealthy worker")
if len(healthy_workers) < 1:
raise RuntimeError(
"Not enough healthy workers remain to continue.")
logger.warning("Recreating execution plan after failure")
workers.reset(healthy_workers)
self.train_exec_impl = self.execution_plan(workers, self.config)
@override(Trainable)
def _export_model(self, export_formats: List[str],
export_dir: str) -> Dict[str, str]:
ExportFormat.validate(export_formats)
exported = {}
if ExportFormat.CHECKPOINT in export_formats:
path = os.path.join(export_dir, ExportFormat.CHECKPOINT)
self.export_policy_checkpoint(path)
exported[ExportFormat.CHECKPOINT] = path
if ExportFormat.MODEL in export_formats:
path = os.path.join(export_dir, ExportFormat.MODEL)
self.export_policy_model(path)
exported[ExportFormat.MODEL] = path
return exported
def import_model(self, import_file: str):
if not os.path.exists(import_file):
raise FileNotFoundError(
"`import_file` '{}' does not exist! Can't import Model.".
format(import_file))
# Get the format of the given file.
import_format = "h5" # TODO(sven): Support checkpoint loading.
ExportFormat.validate([import_format])
if import_format != ExportFormat.H5:
raise NotImplementedError
else:
return self.import_policy_model_from_h5(import_file)
def __getstate__(self) -> dict:
state = {}
if hasattr(self, "workers"):
state["worker"] = self.workers.local_worker().save()
if hasattr(self, "optimizer") and hasattr(self.optimizer, "save"):
state["optimizer"] = self.optimizer.save()
return state
def __setstate__(self, state: dict):
if "worker" in state:
self.workers.local_worker().restore(state["worker"])
remote_state = ray.put(state["worker"])
for r in self.workers.remote_workers():
r.restore.remote(remote_state)
if "optimizer" in state:
self.optimizer.restore(state["optimizer"])
@staticmethod
def with_updates(**overrides) -> Type["Trainer"]:
raise NotImplementedError(
"`with_updates` may only be called on Trainer sub-classes "
"that were generated via the `ray.rllib.agents.trainer_template."
"build_trainer()` function!")
def _register_if_needed(self, env_object: Union[str, EnvType]):
if isinstance(env_object, str):
return env_object
elif isinstance(env_object, type):
name = env_object.__name__
register_env(name, lambda config: env_object(config))
return name
raise ValueError(
"{} is an invalid env specification. ".format(env_object) +
"You can specify a custom env as either a class "
"(e.g., YourEnvCls) or a registered env id (e.g., \"your_env\").")
| true
| true
|
1c48e3476ce49df86e7e1dd858698bb7a98a9695
| 6,028
|
py
|
Python
|
EWR/ab6_v2/ab6.py
|
Koopakiller/Edu
|
575c43dae24a4432e8c8fb2eda96e948cc33ec32
|
[
"MIT"
] | null | null | null |
EWR/ab6_v2/ab6.py
|
Koopakiller/Edu
|
575c43dae24a4432e8c8fb2eda96e948cc33ec32
|
[
"MIT"
] | null | null | null |
EWR/ab6_v2/ab6.py
|
Koopakiller/Edu
|
575c43dae24a4432e8c8fb2eda96e948cc33ec32
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# Author: Tom Lambert
# Content: Hauptprogramm für EWR/ab6 (Sortieralgorithmen)
from __future__ import print_function
from Sort import *
import os.path
import time
import os.path
def print_line():
"""Gibt eine Trennlinie in der Konsole aus."""
print("-------------------------------------------------------------------------------------")
def print_list(lst):
"""Gibt eine Liste aus."""
# Entspricht der Standard-Python-Ausgabe, jedoch mit lesbarer Darstellung von Umlauten
print("[ '{0}' ]".format("', '".join(lst)))
def input_file_name(msg):
"""Fragt einen Dateipfad vom Benutzer ab. Bei Falscheingabe wird er erneut gefragt."""
while True:
user_input = raw_input(msg)
if os.path.isfile(user_input):
return user_input
print("Die Datei existiert nicht!")
def compare_lists(a, b):
"""
Vergleicht 2 Listen mit einander und bestimmt ob diese gleich sind; falls nicht, wie viele Elemente verschieden
sind. Sollten die Längen unterschiedlich sein, werden nur diese zurück gegeben.
"""
if len(a) != len(b):
return "len", len(a), len(b)
else:
counter = 0
for i in range(0, len(a)):
if a[i] != b[i]:
counter += 1
if counter == 0:
return "ok", None, None
else:
return "diff", counter, len(a) - counter
def main():
"""Führt die Logik des Programms aus."""
print("Dieses Programm sortiert eine Liste mit Wörtern mit verschiedenen Algorithmen und "
"wertet die unterschiedlichen Vorgehensweißen statistisch aus.")
print("Die zu sortierenden Wörter werden von einer Datei eingelesen (durch Leerzeichen getrennt).")
if __name__ == "__main__":
path = input_file_name("Geben Sie eine Datei mit zu sortierenden Wörtern an: ")
else:
path = "test.txt"
print("Das Programm wird nicht im Nutzer-Kontext ausgeführt, daher wird 'test.txt' als Datei genutzt.")
if not os.path.isfile(path):
print("Die Datei '{0}' existiert nicht.".format(path))
return
print()
# noinspection PyBroadException
try:
file_obj = open(path, "r")
file_content = file_obj.read()
words = file_content.split(" ")
except:
print("Ein unbekannter Fehler ist aufgetreten. Das Programm wird beendet.")
return
print_line()
print()
words_distinct = list(set(words))
words_sorted = list(words)
time_start = time.time()
words_sorted.sort()
time_end = time.time()
print("Die folgenden {0} Wörter wurden gefunden:".format(len(words)))
print_list(words)
print()
if len(words) == len(words_distinct):
print("Die Liste der Wörter enthält keine doppelten Einträge.")
else:
print("Die Liste der Wörter enthält doppelte Einträge.")
print("Dies sind die {0} eindeutigen Wörter:".format(len(words_distinct)))
print_list(words_distinct)
print()
print("Mit Pythons Standard-sort-Methode sortiert, ergibt sich folgende Liste:")
print_list(words_sorted)
print("Diese Sortierung hat {0}ms gedauert.".format((time_end - time_start) * 1000))
sort_algorithms = {
"Gnome Sort": lambda sort: sort.gnome_sort(words),
"Quick Sort": lambda sort: sort.quick_sort(words),
"Insertion Sort": lambda sort: sort.insertion_sort(words)
}
succeeded = 0
for key in sort_algorithms:
print()
print_line()
print()
# https://stackoverflow.com/a/7370824/1623754
sort = Sort()
time_start = time.time()
result = sort_algorithms[key](sort)
time_end = time.time()
print("'{0}' hat folgende sortierte Liste zurück gegeben:".format(key))
print_list(result)
print("Die Sortierung hat {0}ms gedauert".format((time_end - time_start) * 1000))
print("Statistik über die ausgeführten Operationen:")
print(" - swap (Elemente tauschen): . . . . . . . . . . . {0}".format(sort.counter_swap))
print(" - Element zu einer Liste hinzufügen: . . . . . . {0}".format(sort.counter_add_item_to_result_list))
print(" - Liste kopieren (für gleiche Start-Bedingungen): {0}".format(sort.counter_copy_list))
print(" - Element aus Liste abrufen: . . . . . . . . . . {0}".format(sort.counter_get_item_from_list))
print(" - 2 Elemente vergleichen: . . . . . . . . . . . . {0}".format(sort.counter_item_compare))
print(" - Element in Liste zuweisen: . . . . . . . . . . {0}".format(sort.counter_list_item_assignment))
print(" - Rekursiver Funktionsaufruf: . . . . . . . . . . {0}".format(sort.counter_recursive_call))
print(" - Aufrufe der Sortier-Funktion: . . . . . . . . . {0}".format(sort.counter_sort_call))
print(" - Aufteilen einer Liste: . . . . . . . . . . . . {0}".format(sort.counter_split_list))
print()
print("Die von '{0}' sortierte Liste wird mit der von Python sortierten Liste verglichen.".format(key))
compare = compare_lists(words_sorted, result)
print("Der Vergleich wurde beendet, das Ergebnis lautet:")
if compare[0] == "ok":
print("Die Listen stimmen in allen {0} Elementen überein.".format(len(result)))
succeeded += 1
elif compare[0] == "len":
print("Die Längen der Listen ({0} und {1}) stimmen nicht überein.".format(compare[1], compare[2]))
elif compare[0] == "diff":
print("Die Listen stimmen nicht überein. {0} Elemente sind unterschiedlich, {1} sind gleich."
.format(compare[1], compare[2]))
else:
print("Unbekanntes Ergebnis. Die Listen stimmen vermutlich nicht überein.")
print()
print_line()
print()
print("{0} Sortieralgorithmen arbeiten korrekt, {1} nicht.".format(succeeded, len(sort_algorithms) - succeeded))
main() # immer main() ausführen, __name__ wird (wenn notwendig) im inneren überprüft.
| 38.641026
| 117
| 0.619277
|
from __future__ import print_function
from Sort import *
import os.path
import time
import os.path
def print_line():
print("-------------------------------------------------------------------------------------")
def print_list(lst):
print("[ '{0}' ]".format("', '".join(lst)))
def input_file_name(msg):
while True:
user_input = raw_input(msg)
if os.path.isfile(user_input):
return user_input
print("Die Datei existiert nicht!")
def compare_lists(a, b):
if len(a) != len(b):
return "len", len(a), len(b)
else:
counter = 0
for i in range(0, len(a)):
if a[i] != b[i]:
counter += 1
if counter == 0:
return "ok", None, None
else:
return "diff", counter, len(a) - counter
def main():
print("Dieses Programm sortiert eine Liste mit Wörtern mit verschiedenen Algorithmen und "
"wertet die unterschiedlichen Vorgehensweißen statistisch aus.")
print("Die zu sortierenden Wörter werden von einer Datei eingelesen (durch Leerzeichen getrennt).")
if __name__ == "__main__":
path = input_file_name("Geben Sie eine Datei mit zu sortierenden Wörtern an: ")
else:
path = "test.txt"
print("Das Programm wird nicht im Nutzer-Kontext ausgeführt, daher wird 'test.txt' als Datei genutzt.")
if not os.path.isfile(path):
print("Die Datei '{0}' existiert nicht.".format(path))
return
print()
try:
file_obj = open(path, "r")
file_content = file_obj.read()
words = file_content.split(" ")
except:
print("Ein unbekannter Fehler ist aufgetreten. Das Programm wird beendet.")
return
print_line()
print()
words_distinct = list(set(words))
words_sorted = list(words)
time_start = time.time()
words_sorted.sort()
time_end = time.time()
print("Die folgenden {0} Wörter wurden gefunden:".format(len(words)))
print_list(words)
print()
if len(words) == len(words_distinct):
print("Die Liste der Wörter enthält keine doppelten Einträge.")
else:
print("Die Liste der Wörter enthält doppelte Einträge.")
print("Dies sind die {0} eindeutigen Wörter:".format(len(words_distinct)))
print_list(words_distinct)
print()
print("Mit Pythons Standard-sort-Methode sortiert, ergibt sich folgende Liste:")
print_list(words_sorted)
print("Diese Sortierung hat {0}ms gedauert.".format((time_end - time_start) * 1000))
sort_algorithms = {
"Gnome Sort": lambda sort: sort.gnome_sort(words),
"Quick Sort": lambda sort: sort.quick_sort(words),
"Insertion Sort": lambda sort: sort.insertion_sort(words)
}
succeeded = 0
for key in sort_algorithms:
print()
print_line()
print()
sort = Sort()
time_start = time.time()
result = sort_algorithms[key](sort)
time_end = time.time()
print("'{0}' hat folgende sortierte Liste zurück gegeben:".format(key))
print_list(result)
print("Die Sortierung hat {0}ms gedauert".format((time_end - time_start) * 1000))
print("Statistik über die ausgeführten Operationen:")
print(" - swap (Elemente tauschen): . . . . . . . . . . . {0}".format(sort.counter_swap))
print(" - Element zu einer Liste hinzufügen: . . . . . . {0}".format(sort.counter_add_item_to_result_list))
print(" - Liste kopieren (für gleiche Start-Bedingungen): {0}".format(sort.counter_copy_list))
print(" - Element aus Liste abrufen: . . . . . . . . . . {0}".format(sort.counter_get_item_from_list))
print(" - 2 Elemente vergleichen: . . . . . . . . . . . . {0}".format(sort.counter_item_compare))
print(" - Element in Liste zuweisen: . . . . . . . . . . {0}".format(sort.counter_list_item_assignment))
print(" - Rekursiver Funktionsaufruf: . . . . . . . . . . {0}".format(sort.counter_recursive_call))
print(" - Aufrufe der Sortier-Funktion: . . . . . . . . . {0}".format(sort.counter_sort_call))
print(" - Aufteilen einer Liste: . . . . . . . . . . . . {0}".format(sort.counter_split_list))
print()
print("Die von '{0}' sortierte Liste wird mit der von Python sortierten Liste verglichen.".format(key))
compare = compare_lists(words_sorted, result)
print("Der Vergleich wurde beendet, das Ergebnis lautet:")
if compare[0] == "ok":
print("Die Listen stimmen in allen {0} Elementen überein.".format(len(result)))
succeeded += 1
elif compare[0] == "len":
print("Die Längen der Listen ({0} und {1}) stimmen nicht überein.".format(compare[1], compare[2]))
elif compare[0] == "diff":
print("Die Listen stimmen nicht überein. {0} Elemente sind unterschiedlich, {1} sind gleich."
.format(compare[1], compare[2]))
else:
print("Unbekanntes Ergebnis. Die Listen stimmen vermutlich nicht überein.")
print()
print_line()
print()
print("{0} Sortieralgorithmen arbeiten korrekt, {1} nicht.".format(succeeded, len(sort_algorithms) - succeeded))
main()
| true
| true
|
1c48e366c9660d0f0f4689b1ce94304822ae6b30
| 2,813
|
py
|
Python
|
resources/recipe.py
|
guilhermegouw/smilecook
|
a91937b329e5d0f9bd6d9700c97547bcda9a2564
|
[
"MIT"
] | null | null | null |
resources/recipe.py
|
guilhermegouw/smilecook
|
a91937b329e5d0f9bd6d9700c97547bcda9a2564
|
[
"MIT"
] | null | null | null |
resources/recipe.py
|
guilhermegouw/smilecook
|
a91937b329e5d0f9bd6d9700c97547bcda9a2564
|
[
"MIT"
] | null | null | null |
import http
from flask import request
from flask_restful import Resource
from http import HTTPStatus
from models.recipe import Recipe, recipe_list
class RecipeListResource(Resource):
def get(self):
data = []
for recipe in recipe_list:
if recipe in recipe_list:
if recipe.is_publish is True:
data.append(recipe.data)
return {"data": data}, HTTPStatus.OK
def post(self):
data = request.get_json()
recipe = Recipe(
name=data["name"],
description=data["description"],
num_of_servings=data["num_of_servings"],
cook_time=data["cook_time"],
directions=data["directions"],
)
recipe_list.append(recipe)
return recipe.data, HTTPStatus.CREATED
class RecipeResource(Resource):
def get(self, recipe_id):
recipe = next(
(
recipe
for recipe in recipe_list
if recipe.id == recipe_id and recipe.is_publish == True
),
None,
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
return recipe.data, HTTPStatus.OK
def put(self, recipe_id):
data = request.get_json()
recipe = next(
(recipe for recipe in recipe_list if recipe_id == recipe_id), None
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
recipe.name = data["name"]
recipe.description = data["description"]
recipe.num_of_servings = data["num_of_servings"]
recipe.cook_time = data["cook_time"]
recipe.directions = data["directions"]
return recipe.data, HTTPStatus.OK
def delete(self, recipe_id):
recipe = next(
(recipe for recipe in recipe_list if recipe_id == recipe_id), None
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
recipe_list.remove(recipe)
return {}, HTTPStatus.NO_CONTENT
class RecipePublishResource(Resource):
def put(self, recipe_id):
recipe = next(
(recipe for recipe in recipe_list if recipe.id == recipe_id), None
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
recipe.is_publish = True
return {}, HTTPStatus.NO_CONTENT
def delete(self, recipe_id):
recipe = next(
(recipe for recipe in recipe_list if recipe.id == recipe_id), None
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
recipe.is_publish = False
return {}, HTTPStatus.NO_CONTENT
| 26.046296
| 78
| 0.589051
|
import http
from flask import request
from flask_restful import Resource
from http import HTTPStatus
from models.recipe import Recipe, recipe_list
class RecipeListResource(Resource):
def get(self):
data = []
for recipe in recipe_list:
if recipe in recipe_list:
if recipe.is_publish is True:
data.append(recipe.data)
return {"data": data}, HTTPStatus.OK
def post(self):
data = request.get_json()
recipe = Recipe(
name=data["name"],
description=data["description"],
num_of_servings=data["num_of_servings"],
cook_time=data["cook_time"],
directions=data["directions"],
)
recipe_list.append(recipe)
return recipe.data, HTTPStatus.CREATED
class RecipeResource(Resource):
def get(self, recipe_id):
recipe = next(
(
recipe
for recipe in recipe_list
if recipe.id == recipe_id and recipe.is_publish == True
),
None,
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
return recipe.data, HTTPStatus.OK
def put(self, recipe_id):
data = request.get_json()
recipe = next(
(recipe for recipe in recipe_list if recipe_id == recipe_id), None
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
recipe.name = data["name"]
recipe.description = data["description"]
recipe.num_of_servings = data["num_of_servings"]
recipe.cook_time = data["cook_time"]
recipe.directions = data["directions"]
return recipe.data, HTTPStatus.OK
def delete(self, recipe_id):
recipe = next(
(recipe for recipe in recipe_list if recipe_id == recipe_id), None
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
recipe_list.remove(recipe)
return {}, HTTPStatus.NO_CONTENT
class RecipePublishResource(Resource):
def put(self, recipe_id):
recipe = next(
(recipe for recipe in recipe_list if recipe.id == recipe_id), None
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
recipe.is_publish = True
return {}, HTTPStatus.NO_CONTENT
def delete(self, recipe_id):
recipe = next(
(recipe for recipe in recipe_list if recipe.id == recipe_id), None
)
if recipe is None:
return {"message": "recipe not found"}, HTTPStatus.NOT_FOUND
recipe.is_publish = False
return {}, HTTPStatus.NO_CONTENT
| true
| true
|
1c48e39c58ac9d644b19ec2f1635415e7f59c198
| 5,445
|
py
|
Python
|
AppServer/_php_runtime.py
|
Honcharov12/appscale
|
be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9
|
[
"Apache-2.0"
] | null | null | null |
AppServer/_php_runtime.py
|
Honcharov12/appscale
|
be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9
|
[
"Apache-2.0"
] | null | null | null |
AppServer/_php_runtime.py
|
Honcharov12/appscale
|
be1cf90fcd24f1a5a88848f7eb73331b6e4e66d9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.7.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 7):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.7.\n' % version_tuple)
sys.exit(1)
def _get_dir_path(sibling):
"""Get a path to the directory of this script.
By default, the canonical path (symlinks resolved) will be returned. In some
environments the canonical directory is not sufficient because different
parts of the SDK are referenced by symlinks, including this very module's
file. In this case, the non-canonical path to this file's directory will be
returned (i.e., the directory where the symlink lives, not the directory
where it points).
Args:
sibling: Relative path to a sibiling of this module file. Choose a sibling
that is potentially symlinked into the parent directory.
Returns:
A directory name.
Raises:
ValueError: If no proper path could be determined.
"""
py_file = __file__.replace('.pyc', '.py')
dir_paths = [os.path.abspath(os.path.dirname(os.path.realpath(py_file))),
os.path.abspath(os.path.dirname(py_file))]
for dir_path in dir_paths:
sibling_path = os.path.join(dir_path, sibling)
if os.path.exists(sibling_path):
return dir_path
raise ValueError('Could not determine directory that contains both, this '
'file and %s.' % sibling)
_DIR_PATH = _get_dir_path(os.path.join('lib', 'ipaddr'))
_SCRIPT_DIR = os.path.join(_DIR_PATH, 'google', 'appengine', 'tools')
_DEVAPPSERVER2_DIR = os.path.join(
_DIR_PATH, 'google', 'appengine', 'tools', 'devappserver2')
_PHP_RUNTIME_DIR = os.path.join(_DEVAPPSERVER2_DIR, 'php')
_PYTHON_RUNTIME_DIR = os.path.join(_DEVAPPSERVER2_DIR, 'python')
_STUB_DEPENDENCIES = [
os.path.join(_DIR_PATH, 'lib', 'antlr3'),
os.path.join(_DIR_PATH, 'lib', 'fancy_urllib'),
os.path.join(_DIR_PATH, 'lib', 'ipaddr'),
os.path.join(_DIR_PATH, 'lib', 'yaml-3.10'),
]
EXTRA_PATHS = _STUB_DEPENDENCIES + [
_DIR_PATH,
os.path.join(_DIR_PATH, 'lib', 'simplejson'),
os.path.join(_DIR_PATH, 'lib', 'django-1.4'),
os.path.join(_DIR_PATH, 'lib', 'jinja2-2.6'),
os.path.join(_DIR_PATH, 'lib', 'protorpc'),
os.path.join(_DIR_PATH, 'lib', 'PyAMF-0.6.1'),
os.path.join(_DIR_PATH, 'lib', 'markupsafe-0.15'),
os.path.join(_DIR_PATH, 'lib', 'webob-1.2.3'),
os.path.join(_DIR_PATH, 'lib', 'webapp2-2.5.2'),
]
_DEVAPPSERVER2_PATHS = _STUB_DEPENDENCIES + [
_DIR_PATH,
os.path.join(_DIR_PATH, 'lib', 'concurrent'),
os.path.join(_DIR_PATH, 'lib', 'cherrypy'),
os.path.join(_DIR_PATH, 'lib', 'jinja2-2.6'),
os.path.join(_DIR_PATH, 'lib', 'webob-1.2.3'),
os.path.join(_DIR_PATH, 'lib', 'webapp2-2.5.1'),
]
_PHP_RUNTIME_PATHS = [
_DIR_PATH,
os.path.join(_DIR_PATH, 'lib', 'concurrent'),
os.path.join(_DIR_PATH, 'lib', 'cherrypy'),
os.path.join(_DIR_PATH, 'lib', 'yaml-3.10'),
]
_PYTHON_RUNTIME_PATHS = [
_DIR_PATH,
os.path.join(_DIR_PATH, 'lib', 'concurrent'),
os.path.join(_DIR_PATH, 'lib', 'cherrypy'),
os.path.join(_DIR_PATH, 'lib', 'fancy_urllib'),
os.path.join(_DIR_PATH, 'lib', 'protorpc'),
os.path.join(_DIR_PATH, 'lib', 'yaml-3.10'),
]
_BOOTSTAP_NAME_TO_REAL_NAME = {
'dev_appserver.py': 'devappserver2.py',
'_php_runtime.py': 'runtime.py',
'_python_runtime.py': 'runtime.py',
}
_SCRIPT_TO_DIR = {
'dev_appserver.py': _DEVAPPSERVER2_DIR,
'_php_runtime.py': _PHP_RUNTIME_DIR,
'_python_runtime.py': _PYTHON_RUNTIME_DIR,
}
_SYS_PATH_ADDITIONS = {
'dev_appserver.py': _DEVAPPSERVER2_PATHS,
'_php_runtime.py': _PHP_RUNTIME_PATHS,
'_python_runtime.py': _PYTHON_RUNTIME_PATHS,
}
def fix_sys_path(extra_extra_paths=()):
"""Fix the sys.path to include our extra paths.
fix_sys_path should be called before running testbed-based unit tests so that
third-party modules are correctly added to sys.path.
"""
sys.path[1:1] = EXTRA_PATHS
def _run_file(file_path, globals_, script_dir=_SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
script_name = os.path.basename(file_path)
sys.path = _SYS_PATH_ADDITIONS[script_name] + sys.path
if 'google' in sys.modules:
del sys.modules['google']
script_dir = _SCRIPT_TO_DIR.get(script_name, script_dir)
script_name = _BOOTSTAP_NAME_TO_REAL_NAME.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
_run_file(__file__, globals())
| 29.432432
| 79
| 0.69146
|
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.7.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 7):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.7.\n' % version_tuple)
sys.exit(1)
def _get_dir_path(sibling):
py_file = __file__.replace('.pyc', '.py')
dir_paths = [os.path.abspath(os.path.dirname(os.path.realpath(py_file))),
os.path.abspath(os.path.dirname(py_file))]
for dir_path in dir_paths:
sibling_path = os.path.join(dir_path, sibling)
if os.path.exists(sibling_path):
return dir_path
raise ValueError('Could not determine directory that contains both, this '
'file and %s.' % sibling)
_DIR_PATH = _get_dir_path(os.path.join('lib', 'ipaddr'))
_SCRIPT_DIR = os.path.join(_DIR_PATH, 'google', 'appengine', 'tools')
_DEVAPPSERVER2_DIR = os.path.join(
_DIR_PATH, 'google', 'appengine', 'tools', 'devappserver2')
_PHP_RUNTIME_DIR = os.path.join(_DEVAPPSERVER2_DIR, 'php')
_PYTHON_RUNTIME_DIR = os.path.join(_DEVAPPSERVER2_DIR, 'python')
_STUB_DEPENDENCIES = [
os.path.join(_DIR_PATH, 'lib', 'antlr3'),
os.path.join(_DIR_PATH, 'lib', 'fancy_urllib'),
os.path.join(_DIR_PATH, 'lib', 'ipaddr'),
os.path.join(_DIR_PATH, 'lib', 'yaml-3.10'),
]
EXTRA_PATHS = _STUB_DEPENDENCIES + [
_DIR_PATH,
os.path.join(_DIR_PATH, 'lib', 'simplejson'),
os.path.join(_DIR_PATH, 'lib', 'django-1.4'),
os.path.join(_DIR_PATH, 'lib', 'jinja2-2.6'),
os.path.join(_DIR_PATH, 'lib', 'protorpc'),
os.path.join(_DIR_PATH, 'lib', 'PyAMF-0.6.1'),
os.path.join(_DIR_PATH, 'lib', 'markupsafe-0.15'),
os.path.join(_DIR_PATH, 'lib', 'webob-1.2.3'),
os.path.join(_DIR_PATH, 'lib', 'webapp2-2.5.2'),
]
_DEVAPPSERVER2_PATHS = _STUB_DEPENDENCIES + [
_DIR_PATH,
os.path.join(_DIR_PATH, 'lib', 'concurrent'),
os.path.join(_DIR_PATH, 'lib', 'cherrypy'),
os.path.join(_DIR_PATH, 'lib', 'jinja2-2.6'),
os.path.join(_DIR_PATH, 'lib', 'webob-1.2.3'),
os.path.join(_DIR_PATH, 'lib', 'webapp2-2.5.1'),
]
_PHP_RUNTIME_PATHS = [
_DIR_PATH,
os.path.join(_DIR_PATH, 'lib', 'concurrent'),
os.path.join(_DIR_PATH, 'lib', 'cherrypy'),
os.path.join(_DIR_PATH, 'lib', 'yaml-3.10'),
]
_PYTHON_RUNTIME_PATHS = [
_DIR_PATH,
os.path.join(_DIR_PATH, 'lib', 'concurrent'),
os.path.join(_DIR_PATH, 'lib', 'cherrypy'),
os.path.join(_DIR_PATH, 'lib', 'fancy_urllib'),
os.path.join(_DIR_PATH, 'lib', 'protorpc'),
os.path.join(_DIR_PATH, 'lib', 'yaml-3.10'),
]
_BOOTSTAP_NAME_TO_REAL_NAME = {
'dev_appserver.py': 'devappserver2.py',
'_php_runtime.py': 'runtime.py',
'_python_runtime.py': 'runtime.py',
}
_SCRIPT_TO_DIR = {
'dev_appserver.py': _DEVAPPSERVER2_DIR,
'_php_runtime.py': _PHP_RUNTIME_DIR,
'_python_runtime.py': _PYTHON_RUNTIME_DIR,
}
_SYS_PATH_ADDITIONS = {
'dev_appserver.py': _DEVAPPSERVER2_PATHS,
'_php_runtime.py': _PHP_RUNTIME_PATHS,
'_python_runtime.py': _PYTHON_RUNTIME_PATHS,
}
def fix_sys_path(extra_extra_paths=()):
sys.path[1:1] = EXTRA_PATHS
def _run_file(file_path, globals_, script_dir=_SCRIPT_DIR):
script_name = os.path.basename(file_path)
sys.path = _SYS_PATH_ADDITIONS[script_name] + sys.path
if 'google' in sys.modules:
del sys.modules['google']
script_dir = _SCRIPT_TO_DIR.get(script_name, script_dir)
script_name = _BOOTSTAP_NAME_TO_REAL_NAME.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
_run_file(__file__, globals())
| true
| true
|
1c48e444517f9faa08b4651a0a1ec63b8eea2012
| 2,323
|
py
|
Python
|
events/auth.py
|
hamk-uas/TavastiaEventsOld
|
b808a1418ee89ba1e774c814364e5b55ea4f9a2c
|
[
"MIT"
] | null | null | null |
events/auth.py
|
hamk-uas/TavastiaEventsOld
|
b808a1418ee89ba1e774c814364e5b55ea4f9a2c
|
[
"MIT"
] | null | null | null |
events/auth.py
|
hamk-uas/TavastiaEventsOld
|
b808a1418ee89ba1e774c814364e5b55ea4f9a2c
|
[
"MIT"
] | null | null | null |
from rest_framework import authentication
from rest_framework import exceptions
from events.models import DataSource
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis.db import models
from django.contrib.auth import get_user_model
from .permissions import UserModelPermissionMixin
class ApiKeyAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
# django converts 'apikey' to 'HTTP_APIKEY' outside runserver
api_key = request.META.get('apikey') or request.META.get('HTTP_APIKEY')
if not api_key:
return None
data_source = self.get_data_source(api_key=api_key)
user = ApiKeyUser.objects.get_or_create(data_source=data_source)[0]
return user, ApiKeyAuth(data_source)
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
return "Api key authentication failed."
@staticmethod
def get_data_source(api_key):
try:
data_source = DataSource.objects.get(api_key=api_key)
except DataSource.DoesNotExist:
raise exceptions.AuthenticationFailed(_(
"Provided API key does not match any organization on record. "
"Please contact the API support staff to obtain a valid API key "
"and organization identifier for POSTing your events."))
return data_source
class ApiKeyUser(get_user_model(), UserModelPermissionMixin):
data_source = models.OneToOneField(DataSource, primary_key=True)
def get_display_name(self):
return 'API key from data source %s' % self.data_source
def __str__(self):
return self.get_display_name()
def get_default_organization(self):
return self.data_source.owner
def is_admin(self, publisher):
return self.data_source.owner == publisher
def is_regular_user(self, publisher):
return False
class ApiKeyAuth(object):
def __init__(self, data_source):
self.data_source = data_source
def get_authenticated_data_source(self):
return self.data_source
| 35.19697
| 81
| 0.708567
|
from rest_framework import authentication
from rest_framework import exceptions
from events.models import DataSource
from django.utils.translation import ugettext_lazy as _
from django.contrib.gis.db import models
from django.contrib.auth import get_user_model
from .permissions import UserModelPermissionMixin
class ApiKeyAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
api_key = request.META.get('apikey') or request.META.get('HTTP_APIKEY')
if not api_key:
return None
data_source = self.get_data_source(api_key=api_key)
user = ApiKeyUser.objects.get_or_create(data_source=data_source)[0]
return user, ApiKeyAuth(data_source)
def authenticate_header(self, request):
return "Api key authentication failed."
@staticmethod
def get_data_source(api_key):
try:
data_source = DataSource.objects.get(api_key=api_key)
except DataSource.DoesNotExist:
raise exceptions.AuthenticationFailed(_(
"Provided API key does not match any organization on record. "
"Please contact the API support staff to obtain a valid API key "
"and organization identifier for POSTing your events."))
return data_source
class ApiKeyUser(get_user_model(), UserModelPermissionMixin):
data_source = models.OneToOneField(DataSource, primary_key=True)
def get_display_name(self):
return 'API key from data source %s' % self.data_source
def __str__(self):
return self.get_display_name()
def get_default_organization(self):
return self.data_source.owner
def is_admin(self, publisher):
return self.data_source.owner == publisher
def is_regular_user(self, publisher):
return False
class ApiKeyAuth(object):
def __init__(self, data_source):
self.data_source = data_source
def get_authenticated_data_source(self):
return self.data_source
| true
| true
|
1c48e4a551fe56da4659b2751e18321b2bd23989
| 884
|
py
|
Python
|
torchnmf/metrics.py
|
akashpalrecha/pytorch-NMF
|
21f6589bf25e2ec3e90edf7d3f7eec538ce04fa0
|
[
"MIT"
] | null | null | null |
torchnmf/metrics.py
|
akashpalrecha/pytorch-NMF
|
21f6589bf25e2ec3e90edf7d3f7eec538ce04fa0
|
[
"MIT"
] | null | null | null |
torchnmf/metrics.py
|
akashpalrecha/pytorch-NMF
|
21f6589bf25e2ec3e90edf7d3f7eec538ce04fa0
|
[
"MIT"
] | null | null | null |
import torch
from operator import mul
from functools import reduce
from torch.nn import functional as F
def KL_divergence(predict, target):
return (target * (target / predict).log()).sum() - target.sum() + predict.sum()
def Euclidean(predict, target):
return F.mse_loss(predict, target, reduction='sum') / 2
def IS_divergence(predict, target):
div = target / predict
return div.sum() - div.log().sum() - reduce(mul, target.shape)
def Beta_divergence(predict, target, beta=2):
if beta == 2:
return Euclidean(predict, target)
elif beta == 1:
return KL_divergence(predict, target)
elif beta == 0:
return IS_divergence(predict, target)
else:
bminus = beta - 1
return (target.pow(beta).sum() + bminus * predict.pow(beta).sum() - beta * (
target * predict.pow(bminus)).sum()) / (beta * bminus)
| 28.516129
| 84
| 0.644796
|
import torch
from operator import mul
from functools import reduce
from torch.nn import functional as F
def KL_divergence(predict, target):
return (target * (target / predict).log()).sum() - target.sum() + predict.sum()
def Euclidean(predict, target):
return F.mse_loss(predict, target, reduction='sum') / 2
def IS_divergence(predict, target):
div = target / predict
return div.sum() - div.log().sum() - reduce(mul, target.shape)
def Beta_divergence(predict, target, beta=2):
if beta == 2:
return Euclidean(predict, target)
elif beta == 1:
return KL_divergence(predict, target)
elif beta == 0:
return IS_divergence(predict, target)
else:
bminus = beta - 1
return (target.pow(beta).sum() + bminus * predict.pow(beta).sum() - beta * (
target * predict.pow(bminus)).sum()) / (beta * bminus)
| true
| true
|
1c48e4b6f11915b14be23ef1c55ce7a160489247
| 2,701
|
py
|
Python
|
backend/tests/__init__.py
|
Nuqlear/voila
|
05ada753425ee62e1edd06f945e58e29e808409b
|
[
"MIT"
] | 2
|
2017-12-12T14:28:43.000Z
|
2018-01-24T10:58:27.000Z
|
backend/tests/__init__.py
|
Nuqlear/voila
|
05ada753425ee62e1edd06f945e58e29e808409b
|
[
"MIT"
] | 21
|
2020-03-05T18:58:11.000Z
|
2022-02-02T20:00:34.000Z
|
backend/tests/__init__.py
|
Nuqlear/voila
|
05ada753425ee62e1edd06f945e58e29e808409b
|
[
"MIT"
] | 2
|
2017-12-13T22:43:56.000Z
|
2018-01-24T17:14:29.000Z
|
import asyncio
import json
import psycopg2
import tornado.ioloop
import tornado.platform.asyncio
from sqlalchemy.schema import CreateTable, DropTable
from sqlalchemy.ext.compiler import compiles
from tornado.httpclient import AsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase
from vobla.app import TornadoApplication
from vobla.db import metadata
@compiles(DropTable, "postgresql")
def _compile_drop_table(element, compiler, **kwargs):
return compiler.visit_drop_table(element) + " CASCADE"
class TestMixin(AsyncHTTPTestCase):
async def recreate_tables(self):
async with self.pg.acquire() as conn:
for table in metadata.tables.values():
drop_expr = DropTable(table)
try:
await conn.execute(drop_expr)
except psycopg2.ProgrammingError:
pass
async with self.pg.acquire() as conn:
for table in metadata.tables.values():
create_expr = CreateTable(table)
await conn.execute(create_expr)
@classmethod
def setUpClass(cls):
super(TestMixin, cls).setUpClass()
def setUp(self):
super(TestMixin, self).setUp()
self.pg = self._app.pg
self.minio = self._app.minio
asyncio.get_event_loop().run_until_complete(self.recreate_tables())
def get_new_ioloop(self):
io_loop = tornado.platform.asyncio.AsyncIOLoop()
asyncio.set_event_loop(io_loop.asyncio_loop)
return io_loop
def get_app(self):
return TornadoApplication()
async def fetch(self, url, *ar, **kw):
client = AsyncHTTPClient(self.io_loop)
if 'raise_error' not in kw:
kw['raise_error'] = False
resp = await client.fetch(self.get_url(url), *ar, **kw)
return resp
async def fetch_json(self, url, *ar, **kw):
if 'body' in kw:
kw['body'] = json.dumps(kw['body'])
if 'headers' not in kw:
kw['headers'] = {}
kw['headers']['Content-Type'] = 'application/json'
kw['headers']['Accept'] = 'application/json'
resp = await self.fetch(url, *ar, **kw)
resp._body = json.loads(resp.body)
return resp
@staticmethod
def assertValidationError(resp, nonvalidated_fields, code=422):
assert resp.code == code
assert 'error' in resp.body
assert 'fields' in resp.body['error']
if isinstance(nonvalidated_fields, list):
for field in nonvalidated_fields:
assert field in resp.body['error']['fields']
else:
assert nonvalidated_fields in resp.body['error']['fields']
| 32.939024
| 75
| 0.632358
|
import asyncio
import json
import psycopg2
import tornado.ioloop
import tornado.platform.asyncio
from sqlalchemy.schema import CreateTable, DropTable
from sqlalchemy.ext.compiler import compiles
from tornado.httpclient import AsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase
from vobla.app import TornadoApplication
from vobla.db import metadata
@compiles(DropTable, "postgresql")
def _compile_drop_table(element, compiler, **kwargs):
return compiler.visit_drop_table(element) + " CASCADE"
class TestMixin(AsyncHTTPTestCase):
async def recreate_tables(self):
async with self.pg.acquire() as conn:
for table in metadata.tables.values():
drop_expr = DropTable(table)
try:
await conn.execute(drop_expr)
except psycopg2.ProgrammingError:
pass
async with self.pg.acquire() as conn:
for table in metadata.tables.values():
create_expr = CreateTable(table)
await conn.execute(create_expr)
@classmethod
def setUpClass(cls):
super(TestMixin, cls).setUpClass()
def setUp(self):
super(TestMixin, self).setUp()
self.pg = self._app.pg
self.minio = self._app.minio
asyncio.get_event_loop().run_until_complete(self.recreate_tables())
def get_new_ioloop(self):
io_loop = tornado.platform.asyncio.AsyncIOLoop()
asyncio.set_event_loop(io_loop.asyncio_loop)
return io_loop
def get_app(self):
return TornadoApplication()
async def fetch(self, url, *ar, **kw):
client = AsyncHTTPClient(self.io_loop)
if 'raise_error' not in kw:
kw['raise_error'] = False
resp = await client.fetch(self.get_url(url), *ar, **kw)
return resp
async def fetch_json(self, url, *ar, **kw):
if 'body' in kw:
kw['body'] = json.dumps(kw['body'])
if 'headers' not in kw:
kw['headers'] = {}
kw['headers']['Content-Type'] = 'application/json'
kw['headers']['Accept'] = 'application/json'
resp = await self.fetch(url, *ar, **kw)
resp._body = json.loads(resp.body)
return resp
@staticmethod
def assertValidationError(resp, nonvalidated_fields, code=422):
assert resp.code == code
assert 'error' in resp.body
assert 'fields' in resp.body['error']
if isinstance(nonvalidated_fields, list):
for field in nonvalidated_fields:
assert field in resp.body['error']['fields']
else:
assert nonvalidated_fields in resp.body['error']['fields']
| true
| true
|
1c48e57d0a928db30d85d6b55d28985d7463fada
| 5,564
|
py
|
Python
|
pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxr/interface/get.py
|
rohit04saluja/genielibs
|
e3a89932b807075f45a611cb46ca41a4fa6fe240
|
[
"Apache-2.0"
] | null | null | null |
pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxr/interface/get.py
|
rohit04saluja/genielibs
|
e3a89932b807075f45a611cb46ca41a4fa6fe240
|
[
"Apache-2.0"
] | null | null | null |
pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxr/interface/get.py
|
rohit04saluja/genielibs
|
e3a89932b807075f45a611cb46ca41a4fa6fe240
|
[
"Apache-2.0"
] | null | null | null |
"""Common get info functions for interface"""
# Python
import re
import logging
# unicon
from unicon.core.errors import SubCommandFailure
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.utils.common import Common
log = logging.getLogger(__name__)
def get_interface_ip_address(device, interface):
""" Get interface ip_address from device
Args:
interface('str'): Interface to get address
device ('obj'): Device object
Returns:
None
interface ip_address ('str')
Raises:
None
"""
log.info("Getting interface address for {interface} on {device}"
.format(interface=interface, device=device.name))
cmd = "show ip interface brief"
try:
out = device.parse(cmd)
except SubCommandFailure:
log.error("Invalid command")
except Exception as e:
log.error("Failed to parse '{cmd}': {e}".format(cmd=cmd, e=e))
return
address = out["interface"].get(interface, {}).get("ip_address", None)
if interface not in out["interface"]:
return
elif (address == "unassigned" or
"ip_address" not in out["interface"][interface]):
return
return address
def get_interface_information(device, interface_list):
"""Get interface information from device for a list of interfaces
Args:
List['string']: Interfaces to query information on
device ('obj'): Device object
Returns:
List containing Dictionaries for sucesses
"""
results = {}
empty_ints = []
for interface in interface_list:
try:
data = device.parse('show interfaces ' + interface)
except SchemaEmptyParserError:
empty_ints.append(interface)
data = None
results[interface] = data
if empty_ints:
log.error('No interface information found for {}'.format(empty_ints))
return results
def get_interface_ipv4_address(device, interface):
"""Get the ip address for an interface on target device
Args:
interface ('string'): interface to get address for
device: ('obj'): Device Object
Returns:
None
String with interface ip address
"""
try:
data = device.parse('show interfaces ' + interface)
except SchemaEmptyParserError as e:
log.error('No interface information found for {}: {}'.format(interface, e))
return None
interface = Common.convert_intf_name (interface)
ip_dict = data[interface].get('ipv4')
ip = None
if ip_dict:
ip = list(ip_dict)[0]
return ip
def get_ipv6_interface_ip_address(device, interface, link_local=False):
""" Get interface ip address from device
Args:
interface('str'): Interface to get address
device ('obj'): Device object
link_local ('bool'): Link local address. Default: False
Returns:
None
ip_address ('str'): If has multiple addresses
will return the first one.
Raises:
None
"""
try:
if '.' in interface and interface.split('.')[1]=='0':
interface = interface.split('.')[0]
out=device.parse('show ipv6 interface {interface}'.format(interface=interface))
except SchemaEmptyParserError as e:
log.error('No interface information found for {}: {}'.format(interface, e))
return None
# Example output
# {
# 'GigabitEthernet0/0/0/0': {
# 'enabled': True,
# 'oper_status': 'up',
# 'vrf': 'default',
# 'int_status': 'up',
# 'ipv6': {
# 'incomplete_protocol_adj': '0',
# 'complete_glean_adj': '0',
# 'dropped_protocol_req': '0',
# 'dropped_glean_req': '0',
# 'nd_router_adv': '1800',
# 'complete_protocol_adj': '0',
# 'icmp_unreachables': 'enabled',
# 'ipv6_link_local': 'fe80::250:56ff:fe8d:8d58',
# 'incomplete_glean_adj': '0',
# 'nd_adv_duration': '160-240',
# 'ipv6_groups': ['ff02::1:ff00:1', 'ff02::1:ff8d:8d58', 'ff02::2', 'ff02::1'],
# 'nd_adv_retrans_int': '0',
# 'nd_cache_limit': '1000000000',
# 'stateless_autoconfig': True,
# 'icmp_redirects': 'disabled',
# 'dad_attempts': '1',
# 'ipv6_mtu': '1514',
# 'ipv6_mtu_available': '1500',
# '2001:112::1/64': {
# 'ipv6_subnet': '2001:112::',
# 'ipv6_prefix_length': '64',
# 'ipv6': '2001:112::1',
# },
# 'nd_dad': 'enabled',
# 'nd_reachable_time': '0',
# 'table_id': '0xe0800000',
# },
# 'vrf_id': '0x60000000',
# 'ipv6_enabled': True,
# },
# }
# get the interface
intf = list(out.keys())[0]
intf = Common.convert_intf_name (intf)
if link_local:
return out[intf]['ipv6']['ipv6_link_local']
for sub_key, sub_value in out[intf]['ipv6'].items():
if type(sub_value) == dict:
sub_value_keys = list(sub_value.keys())
if 'ipv6' in sub_value_keys:
return sub_value['ipv6']
return None
| 31.794286
| 95
| 0.550683
|
import re
import logging
from unicon.core.errors import SubCommandFailure
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.utils.common import Common
log = logging.getLogger(__name__)
def get_interface_ip_address(device, interface):
log.info("Getting interface address for {interface} on {device}"
.format(interface=interface, device=device.name))
cmd = "show ip interface brief"
try:
out = device.parse(cmd)
except SubCommandFailure:
log.error("Invalid command")
except Exception as e:
log.error("Failed to parse '{cmd}': {e}".format(cmd=cmd, e=e))
return
address = out["interface"].get(interface, {}).get("ip_address", None)
if interface not in out["interface"]:
return
elif (address == "unassigned" or
"ip_address" not in out["interface"][interface]):
return
return address
def get_interface_information(device, interface_list):
results = {}
empty_ints = []
for interface in interface_list:
try:
data = device.parse('show interfaces ' + interface)
except SchemaEmptyParserError:
empty_ints.append(interface)
data = None
results[interface] = data
if empty_ints:
log.error('No interface information found for {}'.format(empty_ints))
return results
def get_interface_ipv4_address(device, interface):
try:
data = device.parse('show interfaces ' + interface)
except SchemaEmptyParserError as e:
log.error('No interface information found for {}: {}'.format(interface, e))
return None
interface = Common.convert_intf_name (interface)
ip_dict = data[interface].get('ipv4')
ip = None
if ip_dict:
ip = list(ip_dict)[0]
return ip
def get_ipv6_interface_ip_address(device, interface, link_local=False):
try:
if '.' in interface and interface.split('.')[1]=='0':
interface = interface.split('.')[0]
out=device.parse('show ipv6 interface {interface}'.format(interface=interface))
except SchemaEmptyParserError as e:
log.error('No interface information found for {}: {}'.format(interface, e))
return None
intf = list(out.keys())[0]
intf = Common.convert_intf_name (intf)
if link_local:
return out[intf]['ipv6']['ipv6_link_local']
for sub_key, sub_value in out[intf]['ipv6'].items():
if type(sub_value) == dict:
sub_value_keys = list(sub_value.keys())
if 'ipv6' in sub_value_keys:
return sub_value['ipv6']
return None
| true
| true
|
1c48e6ef9f0740fa89cde36c128c69f2475f34f2
| 6,480
|
py
|
Python
|
pcdet/models/detectors/self_voxel_scconv.py
|
EmiyaNing/OpenPCDet
|
41ff28209cb000b51626a0ed8593b0adbe3dd447
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/detectors/self_voxel_scconv.py
|
EmiyaNing/OpenPCDet
|
41ff28209cb000b51626a0ed8593b0adbe3dd447
|
[
"Apache-2.0"
] | null | null | null |
pcdet/models/detectors/self_voxel_scconv.py
|
EmiyaNing/OpenPCDet
|
41ff28209cb000b51626a0ed8593b0adbe3dd447
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .detector3d_template import Detector3DTemplate
from ..model_utils.meter_utils import AverageMeter
from .. import roi_heads
class Voxel_SCCONV(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.forward_ret_dict = {}
self.main_consistency_meter = AverageMeter()
self.voxel_head_rcnn_cls_meter = AverageMeter()
def forward(self, batch_dict):
if self.training:
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
batch_dict = self.backbone_2d(batch_dict)
batch_dict = self.dense_head(batch_dict)
batch_dict = self.roi_head(batch_dict)
self.forward_ret_dict['stage_one_box'] = batch_dict['stage_one_box']
self.forward_ret_dict['stage_one_cls'] = batch_dict['stage_one_cls']
self.forward_ret_dict['cur_epoch'] = batch_dict['cur_epoch']
main_batch_cls_preds, main_batch_box_preds = self.roi_head.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=batch_dict['rcnn_cls'], box_preds=batch_dict['rcnn_reg']
)
self.forward_ret_dict['main_stage_two_box'] = main_batch_box_preds
self.forward_ret_dict['main_stage_two_cls'] = main_batch_cls_preds
self.forward_ret_dict['main_stage_two_labels'] = batch_dict['roi_labels']
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
batch_dict = self.backbone_2d(batch_dict)
batch_dict = self.dense_head(batch_dict)
#batch_dict = self.roi_head(batch_dict)
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss = 0
if not self.model_cfg.SELFKD:
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
else:
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
self.voxel_head_rcnn_cls_meter.update(tb_dict['rcnn_loss_cls'])
loss_main_kd, tb_mainkd_dict = self.get_main_roihead_self_distillation_loss()
loss = loss + loss_rpn + loss_rcnn + loss_main_kd * self.model_cfg.SELFWEIGHT
tb_dict.update(tb_mainkd_dict)
tb_dict['mean_voxel_rcnn_cls']= self.voxel_head_rcnn_cls_meter.avg
return loss, tb_dict, disp_dict
def get_main_roihead_self_distillation_loss(self):
# get parameters.....
#cost_function = nn.BCELoss(reduction='none')
cost_function = nn.KLDivLoss(reduction='none')
epoch = self.forward_ret_dict['cur_epoch']
stage_one_cls = F.sigmoid(self.forward_ret_dict['stage_one_cls'])
stage_one_box = self.forward_ret_dict['stage_one_box']
stage_two_cls = F.sigmoid(self.forward_ret_dict['main_stage_two_cls'])
stage_two_box = self.forward_ret_dict['main_stage_two_box']
stage_two_label = self.forward_ret_dict['main_stage_two_labels']
batch_sz = stage_one_box.shape[0]
# filter once by confidence
one_confidence, _ = torch.max(stage_one_cls, dim=-1)
two_confidence, _ = torch.max(stage_two_cls, dim=-1)
if epoch < 10:
THRESH = (epoch + 1) / 20 + 0.05
else:
THRESH = 0.7
one_mask = one_confidence > THRESH
two_mask = two_confidence > THRESH
stage_one_cls, stage_one_box = stage_one_cls[one_mask], stage_one_box[one_mask]
if stage_one_cls.shape == 0:
tb_dict = {
'Self-kd-cls-loss': 0
}
return 0, tb_dict
stage_two_cls, stage_two_box, stage_two_label = stage_two_cls[two_mask], stage_two_box[two_mask], stage_two_label[two_mask]
one_hot_targets = torch.zeros(
*list(stage_two_cls.squeeze(-1).shape), self.num_class + 1, dtype=stage_two_cls.dtype, device=stage_two_cls.device
)
one_hot_targets.scatter_(-1, stage_two_label.unsqueeze(dim=-1).long(), stage_two_cls)
one_hot_targets = one_hot_targets[:, 1:]
# soft the one_hot_targets
zero_mask = one_hot_targets == 0
one_hot_targets[zero_mask] = 0.01
# match the one_stage_box and two_stage_box
num_teacher_box = stage_two_box.shape[0]
teacher_centers = stage_two_box[:, :3]
student_centers = stage_one_box[:, :3]
with torch.no_grad():
teacher_class = stage_two_label.unsqueeze(-1)
student_class = torch.max(stage_one_cls, dim=-1, keepdim=True)[1]
not_same_class = (teacher_class != student_class.T).float() # [Nt, Ns]
MAX_DISTANCE = 1000000
dist = teacher_centers[:, None, :] - student_centers[None, :, :] # [Nt, Ns, 3]
dist = (dist ** 2).sum(-1) # [Nt, Ns]
dist += not_same_class * MAX_DISTANCE # penalty on different classes
student_dist_of_teacher, student_index_of_teacher = dist.min(1) # [Nt]
# different from standard sess, we only consider distance<1m as matching
MATCHED_DISTANCE = 1
matched_student_mask = (student_dist_of_teacher < MATCHED_DISTANCE).float().unsqueeze(-1) # [Nt, 1]
matched_student_cls_preds = stage_one_cls[student_index_of_teacher]
cls_loss = cost_function(matched_student_cls_preds, one_hot_targets)
cls_loss = (cls_loss * matched_student_mask).sum() / (num_teacher_box * batch_sz)
self.main_consistency_meter.update(cls_loss.item())
tb_dict = {
'Self-kd-cls-loss': cls_loss.item(),
'Self-kd-main-cls-mean-loss': self.main_consistency_meter.avg,
}
return cls_loss, tb_dict
| 47.29927
| 144
| 0.655093
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .detector3d_template import Detector3DTemplate
from ..model_utils.meter_utils import AverageMeter
from .. import roi_heads
class Voxel_SCCONV(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
self.forward_ret_dict = {}
self.main_consistency_meter = AverageMeter()
self.voxel_head_rcnn_cls_meter = AverageMeter()
def forward(self, batch_dict):
if self.training:
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
batch_dict = self.backbone_2d(batch_dict)
batch_dict = self.dense_head(batch_dict)
batch_dict = self.roi_head(batch_dict)
self.forward_ret_dict['stage_one_box'] = batch_dict['stage_one_box']
self.forward_ret_dict['stage_one_cls'] = batch_dict['stage_one_cls']
self.forward_ret_dict['cur_epoch'] = batch_dict['cur_epoch']
main_batch_cls_preds, main_batch_box_preds = self.roi_head.generate_predicted_boxes(
batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=batch_dict['rcnn_cls'], box_preds=batch_dict['rcnn_reg']
)
self.forward_ret_dict['main_stage_two_box'] = main_batch_box_preds
self.forward_ret_dict['main_stage_two_cls'] = main_batch_cls_preds
self.forward_ret_dict['main_stage_two_labels'] = batch_dict['roi_labels']
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
batch_dict = self.vfe(batch_dict)
batch_dict = self.backbone_3d(batch_dict)
batch_dict = self.map_to_bev_module(batch_dict)
batch_dict = self.backbone_2d(batch_dict)
batch_dict = self.dense_head(batch_dict)
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
disp_dict = {}
loss = 0
if not self.model_cfg.SELFKD:
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
loss = loss_rpn + loss_rcnn
else:
loss_rpn, tb_dict = self.dense_head.get_loss()
loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict)
self.voxel_head_rcnn_cls_meter.update(tb_dict['rcnn_loss_cls'])
loss_main_kd, tb_mainkd_dict = self.get_main_roihead_self_distillation_loss()
loss = loss + loss_rpn + loss_rcnn + loss_main_kd * self.model_cfg.SELFWEIGHT
tb_dict.update(tb_mainkd_dict)
tb_dict['mean_voxel_rcnn_cls']= self.voxel_head_rcnn_cls_meter.avg
return loss, tb_dict, disp_dict
def get_main_roihead_self_distillation_loss(self):
cost_function = nn.KLDivLoss(reduction='none')
epoch = self.forward_ret_dict['cur_epoch']
stage_one_cls = F.sigmoid(self.forward_ret_dict['stage_one_cls'])
stage_one_box = self.forward_ret_dict['stage_one_box']
stage_two_cls = F.sigmoid(self.forward_ret_dict['main_stage_two_cls'])
stage_two_box = self.forward_ret_dict['main_stage_two_box']
stage_two_label = self.forward_ret_dict['main_stage_two_labels']
batch_sz = stage_one_box.shape[0]
one_confidence, _ = torch.max(stage_one_cls, dim=-1)
two_confidence, _ = torch.max(stage_two_cls, dim=-1)
if epoch < 10:
THRESH = (epoch + 1) / 20 + 0.05
else:
THRESH = 0.7
one_mask = one_confidence > THRESH
two_mask = two_confidence > THRESH
stage_one_cls, stage_one_box = stage_one_cls[one_mask], stage_one_box[one_mask]
if stage_one_cls.shape == 0:
tb_dict = {
'Self-kd-cls-loss': 0
}
return 0, tb_dict
stage_two_cls, stage_two_box, stage_two_label = stage_two_cls[two_mask], stage_two_box[two_mask], stage_two_label[two_mask]
one_hot_targets = torch.zeros(
*list(stage_two_cls.squeeze(-1).shape), self.num_class + 1, dtype=stage_two_cls.dtype, device=stage_two_cls.device
)
one_hot_targets.scatter_(-1, stage_two_label.unsqueeze(dim=-1).long(), stage_two_cls)
one_hot_targets = one_hot_targets[:, 1:]
zero_mask = one_hot_targets == 0
one_hot_targets[zero_mask] = 0.01
num_teacher_box = stage_two_box.shape[0]
teacher_centers = stage_two_box[:, :3]
student_centers = stage_one_box[:, :3]
with torch.no_grad():
teacher_class = stage_two_label.unsqueeze(-1)
student_class = torch.max(stage_one_cls, dim=-1, keepdim=True)[1]
not_same_class = (teacher_class != student_class.T).float() MAX_DISTANCE = 1000000
dist = teacher_centers[:, None, :] - student_centers[None, :, :] dist = (dist ** 2).sum(-1) dist += not_same_class * MAX_DISTANCE student_dist_of_teacher, student_index_of_teacher = dist.min(1) MATCHED_DISTANCE = 1
matched_student_mask = (student_dist_of_teacher < MATCHED_DISTANCE).float().unsqueeze(-1)
matched_student_cls_preds = stage_one_cls[student_index_of_teacher]
cls_loss = cost_function(matched_student_cls_preds, one_hot_targets)
cls_loss = (cls_loss * matched_student_mask).sum() / (num_teacher_box * batch_sz)
self.main_consistency_meter.update(cls_loss.item())
tb_dict = {
'Self-kd-cls-loss': cls_loss.item(),
'Self-kd-main-cls-mean-loss': self.main_consistency_meter.avg,
}
return cls_loss, tb_dict
| true
| true
|
1c48e9b3730119f4ed2b80f93c177cfdbda69294
| 6,949
|
py
|
Python
|
build_site.py
|
tjweisman/math_website
|
d51d8d9437769117d0f9ad80c372b5f8a1575e96
|
[
"MIT"
] | null | null | null |
build_site.py
|
tjweisman/math_website
|
d51d8d9437769117d0f9ad80c372b5f8a1575e96
|
[
"MIT"
] | null | null | null |
build_site.py
|
tjweisman/math_website
|
d51d8d9437769117d0f9ad80c372b5f8a1575e96
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import os
import re
import shutil
from argparse import ArgumentParser
import markdown
from markdown_include.include import MarkdownInclude
from mako.template import Template
from mako.lookup import TemplateLookup
from ruamel.yaml import YAML
SCRIPT_DIR = "/home/teddy/math/web/personal"
SITES_LIST = "sites.yaml"
DEFAULT_SITE = "weisman"
TEMPLATE_DIR = "templates"
SITE_DATA = "site_data.yaml"
SITE_DIR = "site"
OUTPUT_DIR = "public_html"
DEFAULT_MARKDOWN_TEMPLATE="markdown_page.html"
HTMLFILE_REGEX = r".+\.html?$"
MDFILE_REGEX = r".+\.md$"
IGNORE_REGEX = ".*~$"
yaml = YAML(typ="safe")
class SiteBuilder:
def __init__(self, site_dir):
self.site_dir = os.path.join(SCRIPT_DIR, site_dir)
self.load_site_data()
self.ignore_patterns = [IGNORE_REGEX]
self.lookup_dirs = TemplateLookup(
directories=[os.path.join(self.site_dir, TEMPLATE_DIR),
os.path.join(self.site_dir, SITE_DIR)],
input_encoding='utf-8'
)
self.markdown_include = MarkdownInclude(
configs={'base_path':
os.path.join(self.site_dir, SITE_DIR)
}
)
def add_ignore_pattern(self, pattern):
self.ignore_patterns.append(pattern)
def add_ignores(self, ignores):
self.ignore_patterns += ignores
def mkoputdir(self, filename):
try:
os.makedirs(os.path.join(self.site_dir,
OUTPUT_DIR,
os.path.dirname(filename)))
except os.error:
pass
def load_site_data(self):
with open(os.path.join(self.site_dir, SITE_DATA), "r") as site_data_file:
self.site_data = yaml.load(site_data_file)
def process_markdown_file(self, filedir, filename):
title, template_file, html_output = get_mdfile_data(
os.path.join(self.site_dir, SITE_DIR, filename),
extensions=[self.markdown_include]
)
self.mkoputdir(filename)
template = self.lookup_dirs.get_template(template_file)
output_filename = change_ext(filename, ".html")
page_data = {"title": title,
"contents": html_output,
"directory":filedir,
"filename": output_filename}
with open(os.path.join(self.site_dir, OUTPUT_DIR,
output_filename),
"w", encoding='utf-8') as html_file:
html_file.write(template.render(site_data=self.site_data,
page_data=page_data))
def process_html_file(self, filename):
self.mkoputdir(filename)
template = self.lookup_dirs.get_template(filename)
with open(os.path.join(self.site_dir, OUTPUT_DIR, filename), "w",
encoding='utf-8') as html_oput:
html_oput.write(template.render(site_data=self.site_data))
def process_other_file(self, filename):
self.mkoputdir(filename)
shutil.copyfile(os.path.join(self.site_dir, SITE_DIR, filename),
os.path.join(self.site_dir, OUTPUT_DIR, filename))
def ignore_file(self, filename):
for regex in self.ignore_patterns:
if re.match(regex, filename):
return True
return False
def process_file(self, filedir, filename):
if self.ignore_file(filename):
pass
elif re.match(HTMLFILE_REGEX, filename):
self.process_html_file(filename)
elif re.match(MDFILE_REGEX, filename):
self.process_markdown_file(filedir, filename)
else:
self.process_other_file(filename)
def build_site(self):
site_files = os.walk(os.path.join(self.site_dir, SITE_DIR),
followlinks=True)
for dirpath, dirnames, filenames in site_files:
filedir = os.path.relpath(dirpath, os.path.join(self.site_dir, SITE_DIR))
if filedir == ".":
filedir = ""
for filename in filenames:
self.process_file(filedir, os.path.join(filedir, filename))
def clean_site(self):
try:
shutil.rmtree(os.path.join(self.site_dir, OUTPUT_DIR))
except FileNotFoundError:
pass
os.mkdir(os.path.join(self.site_dir, OUTPUT_DIR))
def get_mdfile_data(abspath, extensions=[]):
with open(abspath, "r", encoding='utf-8') as md_file:
line = md_file.readline()
title = None
template_file = DEFAULT_MARKDOWN_TEMPLATE
while line:
match = re.match("%\s*(.*)", line)
if match:
if not title:
title = match.group(1).strip()
else:
template_file = match.group(1).strip()
else:
break
line = md_file.readline()
html_output = markdown.markdown(md_file.read(), extensions=extensions)
return (title, template_file, html_output)
def change_ext(filename, new_ext):
"""return a new filename, with the extension changed.
"""
return re.sub(r"\.\w+$", new_ext, filename)
def ignore_file(filename, regex=IGNORE_REGEX):
return re.match(IGNORE_REGEX, filename)
def load_sites():
with open(os.path.join(SCRIPT_DIR, SITES_LIST), "r") as sites_list:
return yaml.load(sites_list)
def build_argument_parser():
parser = ArgumentParser()
parser.add_argument("-c", "--clean", action="store_true",
help="""Clean the site output rather than rebuilding the site""")
parser.add_argument("-r", "--rebuild", action="store_true",
help="""Clean the site output and then rebuild the site""")
parser.add_argument("--mkv", action="store_true",
help="""don't ignore mkv files when copying""")
parser.add_argument("--exclude", action="append",
help="""regex to exclude when copying files""")
parser.add_argument("--site", default=DEFAULT_SITE,
help="""which site to build/clean""")
parser.add_argument("-a", "--all", action="store_true",
help="""apply action to all sites""")
return parser
def main():
parser = build_argument_parser()
args = parser.parse_args(sys.argv[1:])
if args.all:
sites = load_sites()
else:
sites = [args.site]
print(sites)
for site in sites:
sitebuilder = SiteBuilder(site)
if args.exclude:
sitebuilder.add_ignores(args.exclude)
if not args.mkv:
sitebuilder.add_ignore_pattern(r".*\.mkv")
if args.clean or args.rebuild:
sitebuilder.clean_site()
if not args.clean:
sitebuilder.build_site()
if __name__ == "__main__":
main()
| 30.884444
| 89
| 0.60095
|
import sys
import os
import re
import shutil
from argparse import ArgumentParser
import markdown
from markdown_include.include import MarkdownInclude
from mako.template import Template
from mako.lookup import TemplateLookup
from ruamel.yaml import YAML
SCRIPT_DIR = "/home/teddy/math/web/personal"
SITES_LIST = "sites.yaml"
DEFAULT_SITE = "weisman"
TEMPLATE_DIR = "templates"
SITE_DATA = "site_data.yaml"
SITE_DIR = "site"
OUTPUT_DIR = "public_html"
DEFAULT_MARKDOWN_TEMPLATE="markdown_page.html"
HTMLFILE_REGEX = r".+\.html?$"
MDFILE_REGEX = r".+\.md$"
IGNORE_REGEX = ".*~$"
yaml = YAML(typ="safe")
class SiteBuilder:
def __init__(self, site_dir):
self.site_dir = os.path.join(SCRIPT_DIR, site_dir)
self.load_site_data()
self.ignore_patterns = [IGNORE_REGEX]
self.lookup_dirs = TemplateLookup(
directories=[os.path.join(self.site_dir, TEMPLATE_DIR),
os.path.join(self.site_dir, SITE_DIR)],
input_encoding='utf-8'
)
self.markdown_include = MarkdownInclude(
configs={'base_path':
os.path.join(self.site_dir, SITE_DIR)
}
)
def add_ignore_pattern(self, pattern):
self.ignore_patterns.append(pattern)
def add_ignores(self, ignores):
self.ignore_patterns += ignores
def mkoputdir(self, filename):
try:
os.makedirs(os.path.join(self.site_dir,
OUTPUT_DIR,
os.path.dirname(filename)))
except os.error:
pass
def load_site_data(self):
with open(os.path.join(self.site_dir, SITE_DATA), "r") as site_data_file:
self.site_data = yaml.load(site_data_file)
def process_markdown_file(self, filedir, filename):
title, template_file, html_output = get_mdfile_data(
os.path.join(self.site_dir, SITE_DIR, filename),
extensions=[self.markdown_include]
)
self.mkoputdir(filename)
template = self.lookup_dirs.get_template(template_file)
output_filename = change_ext(filename, ".html")
page_data = {"title": title,
"contents": html_output,
"directory":filedir,
"filename": output_filename}
with open(os.path.join(self.site_dir, OUTPUT_DIR,
output_filename),
"w", encoding='utf-8') as html_file:
html_file.write(template.render(site_data=self.site_data,
page_data=page_data))
def process_html_file(self, filename):
self.mkoputdir(filename)
template = self.lookup_dirs.get_template(filename)
with open(os.path.join(self.site_dir, OUTPUT_DIR, filename), "w",
encoding='utf-8') as html_oput:
html_oput.write(template.render(site_data=self.site_data))
def process_other_file(self, filename):
self.mkoputdir(filename)
shutil.copyfile(os.path.join(self.site_dir, SITE_DIR, filename),
os.path.join(self.site_dir, OUTPUT_DIR, filename))
def ignore_file(self, filename):
for regex in self.ignore_patterns:
if re.match(regex, filename):
return True
return False
def process_file(self, filedir, filename):
if self.ignore_file(filename):
pass
elif re.match(HTMLFILE_REGEX, filename):
self.process_html_file(filename)
elif re.match(MDFILE_REGEX, filename):
self.process_markdown_file(filedir, filename)
else:
self.process_other_file(filename)
def build_site(self):
site_files = os.walk(os.path.join(self.site_dir, SITE_DIR),
followlinks=True)
for dirpath, dirnames, filenames in site_files:
filedir = os.path.relpath(dirpath, os.path.join(self.site_dir, SITE_DIR))
if filedir == ".":
filedir = ""
for filename in filenames:
self.process_file(filedir, os.path.join(filedir, filename))
def clean_site(self):
try:
shutil.rmtree(os.path.join(self.site_dir, OUTPUT_DIR))
except FileNotFoundError:
pass
os.mkdir(os.path.join(self.site_dir, OUTPUT_DIR))
def get_mdfile_data(abspath, extensions=[]):
with open(abspath, "r", encoding='utf-8') as md_file:
line = md_file.readline()
title = None
template_file = DEFAULT_MARKDOWN_TEMPLATE
while line:
match = re.match("%\s*(.*)", line)
if match:
if not title:
title = match.group(1).strip()
else:
template_file = match.group(1).strip()
else:
break
line = md_file.readline()
html_output = markdown.markdown(md_file.read(), extensions=extensions)
return (title, template_file, html_output)
def change_ext(filename, new_ext):
return re.sub(r"\.\w+$", new_ext, filename)
def ignore_file(filename, regex=IGNORE_REGEX):
return re.match(IGNORE_REGEX, filename)
def load_sites():
with open(os.path.join(SCRIPT_DIR, SITES_LIST), "r") as sites_list:
return yaml.load(sites_list)
def build_argument_parser():
parser = ArgumentParser()
parser.add_argument("-c", "--clean", action="store_true",
help="""Clean the site output rather than rebuilding the site""")
parser.add_argument("-r", "--rebuild", action="store_true",
help="""Clean the site output and then rebuild the site""")
parser.add_argument("--mkv", action="store_true",
help="""don't ignore mkv files when copying""")
parser.add_argument("--exclude", action="append",
help="""regex to exclude when copying files""")
parser.add_argument("--site", default=DEFAULT_SITE,
help="""which site to build/clean""")
parser.add_argument("-a", "--all", action="store_true",
help="""apply action to all sites""")
return parser
def main():
parser = build_argument_parser()
args = parser.parse_args(sys.argv[1:])
if args.all:
sites = load_sites()
else:
sites = [args.site]
print(sites)
for site in sites:
sitebuilder = SiteBuilder(site)
if args.exclude:
sitebuilder.add_ignores(args.exclude)
if not args.mkv:
sitebuilder.add_ignore_pattern(r".*\.mkv")
if args.clean or args.rebuild:
sitebuilder.clean_site()
if not args.clean:
sitebuilder.build_site()
if __name__ == "__main__":
main()
| true
| true
|
1c48ea3bf83a5985c69cad2d01b99bc77a90d0d2
| 20,679
|
py
|
Python
|
helper_tf_util.py
|
Archer-pro666/BAAF-Net
|
87238df296aa4a78b619affc8fb5e0197c49176d
|
[
"MIT"
] | 323
|
2020-09-07T19:06:10.000Z
|
2022-03-29T20:34:08.000Z
|
helper_tf_util.py
|
whuhxb/BAAF-Net
|
663d1681d4d05ad3caaacd98e6dedfdc9caa4930
|
[
"MIT"
] | 38
|
2020-09-09T02:56:46.000Z
|
2022-03-28T08:15:10.000Z
|
helper_tf_util.py
|
whuhxb/BAAF-Net
|
663d1681d4d05ad3caaacd98e6dedfdc9caa4930
|
[
"MIT"
] | 39
|
2020-09-08T02:25:57.000Z
|
2022-03-24T06:15:00.000Z
|
""" Wrapper functions for TensorFlow layers.
Author: Charles R. Qi
Date: November 2016
"""
import numpy as np
import tensorflow as tf
def _variable_on_cpu(name, shape, initializer, use_fp16=False):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
use_xavier: bool, whether to use xavier initializer
Returns:
Variable Tensor
"""
if use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
var = _variable_on_cpu(name, shape, initializer)
else:
# initializer = tf.truncated_normal_initializer(stddev=stddev)
with tf.device('/cpu:0'):
var = tf.truncated_normal(shape, stddev=np.sqrt(2 / shape[-1]))
var = tf.round(var * tf.constant(1000, dtype=tf.float32)) / tf.constant(1000, dtype=tf.float32)
var = tf.Variable(var, name='weights')
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def conv1d(inputs,
num_output_channels,
kernel_size,
scope,
stride=1,
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" 1D convolution with non-linear operation.
Args:
inputs: 3-D tensor variable BxLxC
num_output_channels: int
kernel_size: int
scope: string
stride: int
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_size,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.nn.conv1d(inputs, kernel,
stride=stride,
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv1d(outputs, is_training,
bn_decay=bn_decay, scope='bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv2d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
bn=False,
is_training=None,
use_xavier=False,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn_decay=None):
""" 2D convolution with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
outputs = tf.nn.conv2d(inputs, kernel,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def conv2d_transpose(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
use_xavier=False,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" 2D convolution transpose with non-linear operation.
Args:
inputs: 4-D tensor variable BxHxWxC
num_output_channels: int
kernel_size: a list of 2 ints
scope: string
stride: a list of 2 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_output_channels, num_in_channels] # reversed to conv2d
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
# from slim.convolution2d_transpose
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
# caculate output shape
batch_size = tf.shape(inputs)[0]
height = tf.shape(inputs)[1]
width = tf.shape(inputs)[2]
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0)
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
# outputs = batch_norm_for_conv2d(outputs, is_training,
# bn_decay=bn_decay, scope='bn')
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
# outputs = activation_fn(outputs)
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def conv3d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" 3D convolution with non-linear operation.
Args:
inputs: 5-D tensor variable BxDxHxWxC
num_output_channels: int
kernel_size: a list of 3 ints
scope: string
stride: a list of 3 ints
padding: 'SAME' or 'VALID'
use_xavier: bool, use xavier_initializer if true
stddev: float, stddev for truncated_normal init
weight_decay: float
activation_fn: function
bn: bool, whether to use batch norm
bn_decay: float or float tensor variable in [0,1]
is_training: bool Tensor variable
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_d, kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_d, stride_h, stride_w = stride
outputs = tf.nn.conv3d(inputs, kernel,
[1, stride_d, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv3d(outputs, is_training,
bn_decay=bn_decay, scope='bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def fully_connected(inputs,
num_outputs,
scope,
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" Fully connected layer with non-linear operation.
Args:
inputs: 2-D tensor BxN
num_outputs: int
Returns:
Variable tensor of size B x num_outputs.
"""
with tf.variable_scope(scope) as sc:
num_input_units = inputs.get_shape()[-1].value
weights = _variable_with_weight_decay('weights',
shape=[num_input_units, num_outputs],
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.matmul(inputs, weights)
biases = _variable_on_cpu('biases', [num_outputs],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn')
if activation_fn is not None:
# outputs = activation_fn(outputs)
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def max_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
""" 2D max pooling.
Args:
inputs: 4-D tensor BxHxWxC
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
""" 2D avg pooling.
Args:
inputs: 4-D tensor BxHxWxC
kernel_size: a list of 2 ints
stride: a list of 2 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def max_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
""" 3D max pooling.
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.max_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
""" 3D avg pooling.
Args:
inputs: 5-D tensor BxDxHxWxC
kernel_size: a list of 3 ints
stride: a list of 3 ints
Returns:
Variable tensor
"""
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.avg_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
""" Batch normalization on convolutional maps and beyond...
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
is_training: boolean tf.Varialbe, true indicates training phase
scope: string, variable scope
moments_dims: a list of ints, indicating dimensions for moments calculation
bn_decay: float or float tensor variable, controling moving average weight
Return:
normed: batch-normalized maps
"""
with tf.variable_scope(scope) as sc:
num_channels = inputs.get_shape()[-1].value
beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
decay = bn_decay if bn_decay is not None else 0.9
ema = tf.train.ExponentialMovingAverage(decay=decay)
# Operator that maintains moving averages of variables.
ema_apply_op = tf.cond(is_training,
lambda: ema.apply([batch_mean, batch_var]),
lambda: tf.no_op())
# Update moving average and return current batch's avg and var.
def mean_var_with_update():
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
# ema.average returns the Variable holding the average of var.
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
return normed
def batch_norm_for_fc(inputs, is_training, bn_decay, scope):
""" Batch normalization on FC data.
Args:
inputs: Tensor, 2D BxC input
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, ], bn_decay)
def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 1D convolutional maps.
Args:
inputs: Tensor, 3D BLC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, 1], bn_decay)
def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 2D convolutional maps.
Args:
inputs: Tensor, 4D BHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay)
def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope):
""" Batch normalization on 3D convolutional maps.
Args:
inputs: Tensor, 5D BDHWC input maps
is_training: boolean tf.Varialbe, true indicates training phase
bn_decay: float or float tensor variable, controling moving average weight
scope: string, variable scope
Return:
normed: batch-normalized maps
"""
return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay)
def dropout(inputs,
is_training,
scope,
keep_prob=0.5,
noise_shape=None):
""" Dropout layer.
Args:
inputs: tensor
is_training: boolean tf.Variable
scope: string
keep_prob: float in [0,1]
noise_shape: list of ints
Returns:
tensor variable
"""
with tf.variable_scope(scope) as sc:
outputs = tf.cond(is_training,
lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),
lambda: inputs)
return outputs
| 35.963478
| 111
| 0.561487
|
import numpy as np
import tensorflow as tf
def _variable_on_cpu(name, shape, initializer, use_fp16=False):
with tf.device('/cpu:0'):
dtype = tf.float16 if use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
if use_xavier:
initializer = tf.contrib.layers.xavier_initializer()
var = _variable_on_cpu(name, shape, initializer)
else:
with tf.device('/cpu:0'):
var = tf.truncated_normal(shape, stddev=np.sqrt(2 / shape[-1]))
var = tf.round(var * tf.constant(1000, dtype=tf.float32)) / tf.constant(1000, dtype=tf.float32)
var = tf.Variable(var, name='weights')
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def conv1d(inputs,
num_output_channels,
kernel_size,
scope,
stride=1,
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
with tf.variable_scope(scope) as sc:
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_size,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.nn.conv1d(inputs, kernel,
stride=stride,
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv1d(outputs, is_training,
bn_decay=bn_decay, scope='bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def conv2d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
bn=False,
is_training=None,
use_xavier=False,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn_decay=None):
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
outputs = tf.nn.conv2d(inputs, kernel,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def conv2d_transpose(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1],
padding='SAME',
use_xavier=False,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_h, kernel_w,
num_output_channels, num_in_channels] kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_h, stride_w = stride
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
dim_size *= stride_size
if padding == 'VALID' and dim_size is not None:
dim_size += max(kernel_size - stride_size, 0)
return dim_size
batch_size = tf.shape(inputs)[0]
height = tf.shape(inputs)[1]
width = tf.shape(inputs)[2]
out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0)
outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
[1, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)
if activation_fn is not None:
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def conv3d(inputs,
num_output_channels,
kernel_size,
scope,
stride=[1, 1, 1],
padding='SAME',
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
num_in_channels = inputs.get_shape()[-1].value
kernel_shape = [kernel_d, kernel_h, kernel_w,
num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights',
shape=kernel_shape,
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
stride_d, stride_h, stride_w = stride
outputs = tf.nn.conv3d(inputs, kernel,
[1, stride_d, stride_h, stride_w, 1],
padding=padding)
biases = _variable_on_cpu('biases', [num_output_channels],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_conv3d(outputs, is_training,
bn_decay=bn_decay, scope='bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
def fully_connected(inputs,
num_outputs,
scope,
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
with tf.variable_scope(scope) as sc:
num_input_units = inputs.get_shape()[-1].value
weights = _variable_with_weight_decay('weights',
shape=[num_input_units, num_outputs],
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.matmul(inputs, weights)
biases = _variable_on_cpu('biases', [num_outputs],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn')
if activation_fn is not None:
outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
return outputs
def max_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool2d(inputs,
kernel_size,
scope,
stride=[2, 2],
padding='VALID'):
with tf.variable_scope(scope) as sc:
kernel_h, kernel_w = kernel_size
stride_h, stride_w = stride
outputs = tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def max_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.max_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def avg_pool3d(inputs,
kernel_size,
scope,
stride=[2, 2, 2],
padding='VALID'):
with tf.variable_scope(scope) as sc:
kernel_d, kernel_h, kernel_w = kernel_size
stride_d, stride_h, stride_w = stride
outputs = tf.nn.avg_pool3d(inputs,
ksize=[1, kernel_d, kernel_h, kernel_w, 1],
strides=[1, stride_d, stride_h, stride_w, 1],
padding=padding,
name=sc.name)
return outputs
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
with tf.variable_scope(scope) as sc:
num_channels = inputs.get_shape()[-1].value
beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),
name='gamma', trainable=True)
batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
decay = bn_decay if bn_decay is not None else 0.9
ema = tf.train.ExponentialMovingAverage(decay=decay)
ema_apply_op = tf.cond(is_training,
lambda: ema.apply([batch_mean, batch_var]),
lambda: tf.no_op())
def mean_var_with_update():
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
# ema.average returns the Variable holding the average of var.
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
return normed
def batch_norm_for_fc(inputs, is_training, bn_decay, scope):
return batch_norm_template(inputs, is_training, scope, [0, ], bn_decay)
def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope):
return batch_norm_template(inputs, is_training, scope, [0, 1], bn_decay)
def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope):
return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay)
def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope):
return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay)
def dropout(inputs,
is_training,
scope,
keep_prob=0.5,
noise_shape=None):
with tf.variable_scope(scope) as sc:
outputs = tf.cond(is_training,
lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),
lambda: inputs)
return outputs
| true
| true
|
1c48ea441ce4fb5bf52edefac9ce32cb7c79897a
| 663
|
bzl
|
Python
|
modules/benchmarks/benchmark_test.bzl
|
duluca/angular
|
b7385a77ad5300f0add3643a479426b834d49fc5
|
[
"MIT"
] | 3
|
2019-11-19T11:07:22.000Z
|
2020-03-31T06:38:01.000Z
|
modules/benchmarks/benchmark_test.bzl
|
duluca/angular
|
b7385a77ad5300f0add3643a479426b834d49fc5
|
[
"MIT"
] | 23
|
2022-02-15T06:06:27.000Z
|
2022-03-02T13:04:37.000Z
|
modules/benchmarks/benchmark_test.bzl
|
duluca/angular
|
b7385a77ad5300f0add3643a479426b834d49fc5
|
[
"MIT"
] | 1
|
2018-10-12T14:09:39.000Z
|
2018-10-12T14:09:39.000Z
|
load("//tools:defaults.bzl", "protractor_web_test_suite")
"""
Macro that can be used to define a benchmark test. This differentiates from
a normal Protractor test suite because we specify a custom "perf" configuration
that sets up "@angular/benchpress".
"""
def benchmark_test(name, server, deps, tags = []):
protractor_web_test_suite(
name = name,
configuration = "//:protractor-perf.conf.js",
data = [
"//packages/benchpress",
],
on_prepare = "//modules/benchmarks:start-server.js",
server = server,
tags = tags,
deps = [
"@npm//yargs",
] + deps,
)
| 28.826087
| 81
| 0.60181
|
load("//tools:defaults.bzl", "protractor_web_test_suite")
def benchmark_test(name, server, deps, tags = []):
protractor_web_test_suite(
name = name,
configuration = "//:protractor-perf.conf.js",
data = [
"//packages/benchpress",
],
on_prepare = "//modules/benchmarks:start-server.js",
server = server,
tags = tags,
deps = [
"@npm//yargs",
] + deps,
)
| true
| true
|
1c48ea56d006f86fc353cba1ce9805d7a2268459
| 19,936
|
py
|
Python
|
tensorflow/python/data/util/nest_test.py
|
AdrienCorenflos/tensorflow
|
1b5220e89fecca70375b372a5bddc7f961c6a736
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/util/nest_test.py
|
AdrienCorenflos/tensorflow
|
1b5220e89fecca70375b372a5bddc7f961c6a736
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/util/nest_test.py
|
AdrienCorenflos/tensorflow
|
1b5220e89fecca70375b372a5bddc7f961c6a736
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import attr
import numpy as np
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
class NestTest(test.TestCase):
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
point = collections.namedtuple("Point", ["x", "y"])
structure = (point(x=4, y=2), ((point(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
@attr.s
class PointAttr:
x = attr.ib()
y = attr.ib()
structure = (PointAttr(x=4, y=2), ((PointAttr(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
def testFlattenDictOrder(self):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
def testPackDictOrder(self):
"""Packing orders dicts by key, including OrderedDicts."""
ordered = collections.OrderedDict([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
ordered_reconstruction = nest.pack_sequence_as(ordered, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertEqual(
collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
ordered_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
def testFlattenAndPackWithDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
named_tuple = collections.namedtuple("A", ("b", "c"))
mess = (
"z",
named_tuple(3, 4),
{
"c": (
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
),
"b": 5
},
17
)
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 17])
structure_of_mess = (
14,
named_tuple("a", True),
{
"c": (
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
),
"b": 3
},
"hi everybody",
)
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
def testFlattenSparseValue(self):
st = sparse_tensor.SparseTensorValue([[0]], [0], [1])
single_value = st
list_of_values = [st, st, st]
nest_of_values = ((st), ((st), (st)))
dict_of_values = {"foo": st, "bar": st, "baz": st}
self.assertEqual([st], nest.flatten(single_value))
self.assertEqual([[st, st, st]], nest.flatten(list_of_values))
self.assertEqual([st, st, st], nest.flatten(nest_of_values))
self.assertEqual([st, st, st], nest.flatten(dict_of_values))
def testFlattenRaggedValue(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]]])
single_value = rt
list_of_values = [rt, rt, rt]
nest_of_values = ((rt), ((rt), (rt)))
dict_of_values = {"foo": rt, "bar": rt, "baz": rt}
self.assertEqual([rt], nest.flatten(single_value))
self.assertEqual([[rt, rt, rt]], nest.flatten(list_of_values))
self.assertEqual([rt, rt, rt], nest.flatten(nest_of_values))
self.assertEqual([rt, rt, rt], nest.flatten(dict_of_values))
def testIsSequence(self):
self.assertFalse(nest.is_sequence("1234"))
self.assertFalse(nest.is_sequence([1, 3, [4, 5]]))
self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))
self.assertFalse(nest.is_sequence([]))
self.assertFalse(nest.is_sequence(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_sequence(ones))
self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))
self.assertFalse(nest.is_sequence(np.ones((4, 5))))
self.assertTrue(nest.is_sequence({"foo": 1, "bar": 2}))
self.assertFalse(
nest.is_sequence(sparse_tensor.SparseTensorValue([[0]], [0], [1])))
self.assertFalse(
nest.is_sequence(ragged_factory_ops.constant_value([[[0]], [[1]]])))
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
structure_dictionary = {"foo": 2, "bar": 4, "baz": {"foo": 5, "bar": 6}}
structure_dictionary_diff_nested = {
"foo": 2,
"bar": 4,
"baz": {
"foo": 5,
"baz": 6
}
}
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure((0, 1), np.array([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(0, (0, 1))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_nesting)
named_type_0 = collections.namedtuple("named_0", ("a", "b"))
named_type_1 = collections.namedtuple("named_1", ("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
named_type_0("a", "b"))
nest.assert_same_structure(named_type_0(3, 4), named_type_0("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
named_type_0(3, 4), named_type_1(3, 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(named_type_0(3, 4), named_type_0((3,), 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(((3,), 4), (3, (4,)))
structure1_list = {"a": ((1, 2), 3), "b": 4, "c": (5, 6)}
structure2_list = {"a": ((1, 2), 3), "b": 4, "d": (5, 6)}
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
nest.assert_same_structure(structure1_list, structure2_list)
with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
nest.assert_same_structure(structure_dictionary,
structure_dictionary_diff_nested)
nest.assert_same_structure(
structure_dictionary,
structure_dictionary_diff_nested,
check_types=False)
nest.assert_same_structure(
structure1_list, structure2_list, check_types=False)
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), {"a": (3, 4), "b": 5})
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
def testAssertShallowStructure(self):
inp_ab = ("a", "b")
inp_abc = ("a", "b", "c")
expected_message = (
"The two structures don't have the same sequence length. Input "
"structure has length 2, while shallow structure has length 3.")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = ((1, 1), (2, 2))
inp_ab2 = {"a": (1, 1), "b": (2, 2)}
expected_message = (
"The two structures don't have the same sequence type. Input structure "
"has type <(type|class) 'tuple'>, while shallow structure has type "
"<(type|class) 'dict'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
expected_message = (
r"The two structures don't have the same keys. Input "
r"structure has keys \['c'\], while shallow structure has "
r"keys \['d'\].")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
def testFlattenUpTo(self):
input_tree = (((2, 2), (3, 3)), ((4, 9), (5, 5)))
shallow_tree = ((True, True), (False, True))
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
input_tree = ((("a", 1), (("b", 2), (("c", 3), (("d", 4))))))
shallow_tree = (("level_1", ("level_2", ("level_3", ("level_4")))))
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ("input_tree_0", "input_tree_1")
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = (0,)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = (0, 1)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ("shallow_tree",)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = "input_tree"
shallow_tree = ("shallow_tree_9", "shallow_tree_8")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
# Using non-iterable elements.
input_tree = 0
shallow_tree = (9,)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = 0
shallow_tree = (9, 8)
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
# Using dict.
input_tree = {"a": ((2, 2), (3, 3)), "b": ((4, 9), (5, 5))}
shallow_tree = {"a": (True, True), "b": (False, True)}
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
def testMapStructureUpTo(self):
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
data_list = ((2, 4, 6, 8), ((1, 3, 5, 7, 9), (3, 5, 7)))
name_list = ("evens", ("odds", "primes"))
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ("first_4_evens", ("first_5_odds", "first_3_primes")))
if __name__ == "__main__":
test.main()
| 43.058315
| 80
| 0.649579
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import attr
import numpy as np
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
class NestTest(test.TestCase):
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
point = collections.namedtuple("Point", ["x", "y"])
structure = (point(x=4, y=2), ((point(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
@attr.s
class PointAttr:
x = attr.ib()
y = attr.ib()
structure = (PointAttr(x=4, y=2), ((PointAttr(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
def testFlattenDictOrder(self):
ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
def testPackDictOrder(self):
ordered = collections.OrderedDict([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
ordered_reconstruction = nest.pack_sequence_as(ordered, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertEqual(
collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
ordered_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
def testFlattenAndPackWithDicts(self):
named_tuple = collections.namedtuple("A", ("b", "c"))
mess = (
"z",
named_tuple(3, 4),
{
"c": (
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
),
"b": 5
},
17
)
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 17])
structure_of_mess = (
14,
named_tuple("a", True),
{
"c": (
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
),
"b": 3
},
"hi everybody",
)
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
def testFlattenSparseValue(self):
st = sparse_tensor.SparseTensorValue([[0]], [0], [1])
single_value = st
list_of_values = [st, st, st]
nest_of_values = ((st), ((st), (st)))
dict_of_values = {"foo": st, "bar": st, "baz": st}
self.assertEqual([st], nest.flatten(single_value))
self.assertEqual([[st, st, st]], nest.flatten(list_of_values))
self.assertEqual([st, st, st], nest.flatten(nest_of_values))
self.assertEqual([st, st, st], nest.flatten(dict_of_values))
def testFlattenRaggedValue(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]]])
single_value = rt
list_of_values = [rt, rt, rt]
nest_of_values = ((rt), ((rt), (rt)))
dict_of_values = {"foo": rt, "bar": rt, "baz": rt}
self.assertEqual([rt], nest.flatten(single_value))
self.assertEqual([[rt, rt, rt]], nest.flatten(list_of_values))
self.assertEqual([rt, rt, rt], nest.flatten(nest_of_values))
self.assertEqual([rt, rt, rt], nest.flatten(dict_of_values))
def testIsSequence(self):
self.assertFalse(nest.is_sequence("1234"))
self.assertFalse(nest.is_sequence([1, 3, [4, 5]]))
self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))
self.assertFalse(nest.is_sequence([]))
self.assertFalse(nest.is_sequence(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_sequence(ones))
self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))
self.assertFalse(nest.is_sequence(np.ones((4, 5))))
self.assertTrue(nest.is_sequence({"foo": 1, "bar": 2}))
self.assertFalse(
nest.is_sequence(sparse_tensor.SparseTensorValue([[0]], [0], [1])))
self.assertFalse(
nest.is_sequence(ragged_factory_ops.constant_value([[[0]], [[1]]])))
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
structure_dictionary = {"foo": 2, "bar": 4, "baz": {"foo": 5, "bar": 6}}
structure_dictionary_diff_nested = {
"foo": 2,
"bar": 4,
"baz": {
"foo": 5,
"baz": 6
}
}
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure((0, 1), np.array([0, 1]))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(0, (0, 1))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(structure1, structure_different_nesting)
named_type_0 = collections.namedtuple("named_0", ("a", "b"))
named_type_1 = collections.namedtuple("named_1", ("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
named_type_0("a", "b"))
nest.assert_same_structure(named_type_0(3, 4), named_type_0("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
named_type_0(3, 4), named_type_1(3, 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(named_type_0(3, 4), named_type_0((3,), 4))
with self.assertRaisesRegexp(ValueError,
"don't have the same nested structure"):
nest.assert_same_structure(((3,), 4), (3, (4,)))
structure1_list = {"a": ((1, 2), 3), "b": 4, "c": (5, 6)}
structure2_list = {"a": ((1, 2), 3), "b": 4, "d": (5, 6)}
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
nest.assert_same_structure(structure1_list, structure2_list)
with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
nest.assert_same_structure(structure_dictionary,
structure_dictionary_diff_nested)
nest.assert_same_structure(
structure_dictionary,
structure_dictionary_diff_nested,
check_types=False)
nest.assert_same_structure(
structure1_list, structure2_list, check_types=False)
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), {"a": (3, 4), "b": 5})
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
def testAssertShallowStructure(self):
inp_ab = ("a", "b")
inp_abc = ("a", "b", "c")
expected_message = (
"The two structures don't have the same sequence length. Input "
"structure has length 2, while shallow structure has length 3.")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_abc, inp_ab)
inp_ab1 = ((1, 1), (2, 2))
inp_ab2 = {"a": (1, 1), "b": (2, 2)}
expected_message = (
"The two structures don't have the same sequence type. Input structure "
"has type <(type|class) 'tuple'>, while shallow structure has type "
"<(type|class) 'dict'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
expected_message = (
r"The two structures don't have the same keys. Input "
r"structure has keys \['c'\], while shallow structure has "
r"keys \['d'\].")
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
def testFlattenUpTo(self):
input_tree = (((2, 2), (3, 3)), ((4, 9), (5, 5)))
shallow_tree = ((True, True), (False, True))
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
input_tree = ((("a", 1), (("b", 2), (("c", 3), (("d", 4))))))
shallow_tree = (("level_1", ("level_2", ("level_3", ("level_4")))))
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ("input_tree_0", "input_tree_1")
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = (0,)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = (0, 1)
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = "input_tree"
shallow_tree = ("shallow_tree",)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = "input_tree"
shallow_tree = ("shallow_tree_9", "shallow_tree_8")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = 0
shallow_tree = (9,)
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = 0
shallow_tree = (9, 8)
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, list(shallow_tree))
input_tree = {"a": ((2, 2), (3, 3)), "b": ((4, 9), (5, 5))}
shallow_tree = {"a": (True, True), "b": (False, True)}
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
def testMapStructureUpTo(self):
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
data_list = ((2, 4, 6, 8), ((1, 3, 5, 7, 9), (3, 5, 7)))
name_list = ("evens", ("odds", "primes"))
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ("first_4_evens", ("first_5_odds", "first_3_primes")))
if __name__ == "__main__":
test.main()
| true
| true
|
1c48ea802ae5ab745387f8732bb696001ad3948d
| 12,626
|
py
|
Python
|
Image Classification/CGIAR Wheat Growth Stage Challenge/neurofitting/zindi_cgiar_wheat_growth_stage_challenge/src_wd/trainer.py
|
ZindiAfrica/Computer-Vision
|
bf4c00a0633506270dc6d07df938a100a10ee799
|
[
"MIT"
] | null | null | null |
Image Classification/CGIAR Wheat Growth Stage Challenge/neurofitting/zindi_cgiar_wheat_growth_stage_challenge/src_wd/trainer.py
|
ZindiAfrica/Computer-Vision
|
bf4c00a0633506270dc6d07df938a100a10ee799
|
[
"MIT"
] | null | null | null |
Image Classification/CGIAR Wheat Growth Stage Challenge/neurofitting/zindi_cgiar_wheat_growth_stage_challenge/src_wd/trainer.py
|
ZindiAfrica/Computer-Vision
|
bf4c00a0633506270dc6d07df938a100a10ee799
|
[
"MIT"
] | null | null | null |
import os
import numba
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from optimizer import *
from trainer_callbacks import *
from utils import *
#%% #################################### Model Trainer Class ####################################
class ModelTrainer():
def __init__(self,
model=None,
Loaders=[None,[]],
metrics=None,
fold=None,
lr=None,
epochsTorun=None,
checkpoint_saving_path=None,
resume_train_from_checkpoint=False,
resume_checkpoint_path=None,
test_run_for_error=False,
batch_size=None,
do_grad_accum=False,
grad_accum_steps=4,
use_fp16=True,
problem_name=None
):
super(ModelTrainer, self).__init__()
self.problem_name = problem_name
self.model = model.cuda()
self.trainLoader = Loaders[0]
self.valLoader = Loaders[1]
self.info_bbx = store_info(metrics)
self.fold = fold
if self.fold != None:
self.checkpoint_saving_path = checkpoint_saving_path + '/fold' + str(self.fold) + '/'
else:
self.checkpoint_saving_path = checkpoint_saving_path + '/'
self.fold = 0
os.makedirs(self.checkpoint_saving_path,exist_ok=True)
self.lr = lr
self.epochsTorun = epochsTorun
self.init_epoch = -1
self.test_run_for_error = test_run_for_error
self.current_checkpoint_save_count = 1
self.resume_checkpoint_path = resume_checkpoint_path
self.best_loss = 9999
self.best_f1_score = -9999
self.best_rmse = 9999
self.batch_size = batch_size
self.optimizer = Over9000(params=self.model.parameters(),lr=self.lr)
self.scheduler = ReduceLROnPlateau(self.optimizer, factor=0.5, mode='min', patience=5, verbose=True)
self.do_grad_accum = do_grad_accum
self.grad_accum_steps = grad_accum_steps
self.trainer_settings_dict = {
'do_grad_accum': self.do_grad_accum,
'grad_accum_steps':self.grad_accum_steps,
'epochsTorun':self.epochsTorun,
'lr':self.lr,
'batch_size':batch_size,
}
self.use_fp16 = use_fp16
self.scheduler_flag = 9999
self.criterion = RMSELoss().cuda()
self.criterion_2 = nn.CrossEntropyLoss().cuda()
self.scaler = torch.cuda.amp.GradScaler()
if resume_train_from_checkpoint:
if os.path.isfile(resume_checkpoint_path):
print("=> Loading checkpoint from '{}'".format(resume_checkpoint_path))
checkpoint_dict = torch.load(resume_checkpoint_path)
self.model.load_state_dict(checkpoint_dict['Model_state_dict'])
self.scheduler.load_state_dict(checkpoint_dict['Scheduler_state_dict'])
self.optimizer.load_state_dict(checkpoint_dict['Optimizer_state_dict'])
self.best_loss = checkpoint_dict['Best_val_loss']
self.best_f1_score = checkpoint_dict['Best_val_f1_score']
self.info_bbx.all_info = checkpoint_dict['All_info']
self.init_epoch = checkpoint_dict['Epoch']
print('Best Val loss is {}'.format(self.best_loss))
print('Best Val f1_score is {}'.format(self.best_f1_score))
print('Current val loss is {}'.format(checkpoint_dict['Current_val_Loss']))
print('Current val f1 score is {}'.format(checkpoint_dict['Current_val_f1_score']))
self.scheduler_flag = checkpoint_dict['Scheduler_flag']
del checkpoint_dict
torch.cuda.empty_cache()
else:
print("=> No checkpoint found at '{}' !".format(resume_checkpoint_path))
#%% train part starts here
def fit(self):
with TQDM() as pbar:
pbar.on_train_begin({'num_batches':len(self.trainLoader),'num_epoch':self.epochsTorun})
pbar.on_val_begin({'num_batches':len(self.valLoader),'num_epoch':self.epochsTorun})
self.train_metric_meter = Metric_Meter()
self.val_metric_meter = Metric_Meter()
for epoch in range(self.epochsTorun):
current_epoch_no = epoch+1
if current_epoch_no <= self.init_epoch:
continue
pbar.on_epoch_train_begin(self.fold,current_epoch_no)
self.info_bbx._init_new_epoch(current_epoch_no)
self.model.train()
torch.set_grad_enabled(True)
#self.optimizer.zero_grad()
self.train_metric_meter.reset()
self.val_metric_meter.reset()
for itera_no, data in enumerate(self.trainLoader):
pbar.on_train_batch_begin()
self.optimizer.zero_grad()
images, targets = data
images = images.cuda()
targets = targets.cuda()
with torch.cuda.amp.autocast():
out = self.model(images)
batch_loss = self.criterion(out['LOGITS'], targets[:,None]) + self.criterion_2(out['LOGITS_2'], targets.long())
self.scaler.scale(batch_loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.train_metric_meter.update(out['LOGITS'].clone(), targets, 'single')
self.info_bbx.update_train_info({'Loss':[(batch_loss.detach().item()),images.shape[0]]})
pbar.on_train_batch_end(logs=self.info_bbx.request_current_epoch_train_metric_info())
torch.cuda.empty_cache()
if self.test_run_for_error:
if itera_no==5:
break
#%% validation part starts here
f1_score, rmse = self.train_metric_meter.feedback()
self.info_bbx.update_train_info({'f1_score': f1_score, 'rmse': rmse})
pbar.on_epoch_train_end(self.info_bbx.request_current_epoch_train_metric_info())
pbar.on_epoch_val_begin(self.fold,current_epoch_no)
self.model.eval()
torch.set_grad_enabled(False)
with torch.no_grad():
for itera_no, data in enumerate(self.valLoader):
pbar.on_val_batch_begin()
images, targets = data
images = images.cuda()
targets = targets.cuda()
with torch.cuda.amp.autocast():
out = self.model(images)
batch_loss = self.criterion(out['LOGITS'], targets[:,None]) + self.criterion_2(out['LOGITS_2'], targets.long())
self.val_metric_meter.update(out['LOGITS'].clone(), targets, 'single')
self.info_bbx.update_val_info({'Loss':[(batch_loss.detach().item()),images.shape[0]]})
pbar.on_val_batch_end(logs=self.info_bbx.request_current_epoch_val_metric_info())
torch.cuda.empty_cache()
if self.test_run_for_error:
if itera_no==5:
break
f1_score, rmse = self.val_metric_meter.feedback()
self.info_bbx.update_val_info({'f1_score': f1_score, 'rmse': rmse})
pbar.on_epoch_val_end(self.info_bbx.request_current_epoch_val_metric_info())
#%% Update best parameters
if self.best_loss > self.info_bbx.get_info(current_epoch_no,'Loss','Val'):
print( ' Val Loss is improved from {:.4f} to {:.4f}! '.format(self.best_loss,self.info_bbx.get_info(current_epoch_no,'Loss','Val')) )
self.best_loss = self.info_bbx.get_info(current_epoch_no,'Loss','Val')
is_best_loss = True
else:
print( ' Val Loss is not improved from {:.4f}! '.format(self.best_loss))
is_best_loss = False
if self.best_f1_score < self.info_bbx.get_info(current_epoch_no,'f1_score','Val'):
print( ' Val f1 score is improved from {:.4f} to {:.4f}! '.format(self.best_f1_score,self.info_bbx.get_info(current_epoch_no,'f1_score','Val')) )
self.best_f1_score = self.info_bbx.get_info(current_epoch_no,'f1_score','Val')
is_best_f1_score = True
else:
print( ' Val f1 score is not improved from {:.4f}! '.format(self.best_f1_score))
is_best_f1_score = False
#%%Learning Rate Schedulers
if is_best_loss or is_best_f1_score:
self.scheduler_flag = self.scheduler_flag - 1
self.scheduler.step(self.scheduler_flag)
else:
self.scheduler.step(self.scheduler_flag+1)
#%%checkpoint dict creation
checkpoint_dict = {
'Epoch': current_epoch_no,
'Model_state_dict': self.model.state_dict(),
'Current_val_Loss': self.info_bbx.get_info(current_epoch_no,'Loss','Val'),
'Current_train_Loss': self.info_bbx.get_info(current_epoch_no,'Loss','Train'),
'Current_val_f1_score':self.info_bbx.get_info(current_epoch_no,'f1_score','Val'),
'Current_train_f1_score':self.info_bbx.get_info(current_epoch_no,'f1_score','Train'),
'Current_val_rmse':self.info_bbx.get_info(current_epoch_no,'rmse','Val'),
'Current_train_rmse':self.info_bbx.get_info(current_epoch_no,'rmse','Train'),
'Best_val_loss' : self.best_loss,
'Best_val_f1_score': self.best_f1_score,
'Best_val_rmse': self.best_rmse,
}
#%%checkpoint dict saving
if is_best_f1_score:
torch.save(checkpoint_dict, self.checkpoint_saving_path+'checkpoint_best_f1_score_fold{}.pth'.format(self.fold))
del checkpoint_dict
torch.cuda.empty_cache()
| 55.621145
| 166
| 0.475923
|
import os
import numba
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from optimizer import *
from trainer_callbacks import *
from utils import *
class ModelTrainer():
def __init__(self,
model=None,
Loaders=[None,[]],
metrics=None,
fold=None,
lr=None,
epochsTorun=None,
checkpoint_saving_path=None,
resume_train_from_checkpoint=False,
resume_checkpoint_path=None,
test_run_for_error=False,
batch_size=None,
do_grad_accum=False,
grad_accum_steps=4,
use_fp16=True,
problem_name=None
):
super(ModelTrainer, self).__init__()
self.problem_name = problem_name
self.model = model.cuda()
self.trainLoader = Loaders[0]
self.valLoader = Loaders[1]
self.info_bbx = store_info(metrics)
self.fold = fold
if self.fold != None:
self.checkpoint_saving_path = checkpoint_saving_path + '/fold' + str(self.fold) + '/'
else:
self.checkpoint_saving_path = checkpoint_saving_path + '/'
self.fold = 0
os.makedirs(self.checkpoint_saving_path,exist_ok=True)
self.lr = lr
self.epochsTorun = epochsTorun
self.init_epoch = -1
self.test_run_for_error = test_run_for_error
self.current_checkpoint_save_count = 1
self.resume_checkpoint_path = resume_checkpoint_path
self.best_loss = 9999
self.best_f1_score = -9999
self.best_rmse = 9999
self.batch_size = batch_size
self.optimizer = Over9000(params=self.model.parameters(),lr=self.lr)
self.scheduler = ReduceLROnPlateau(self.optimizer, factor=0.5, mode='min', patience=5, verbose=True)
self.do_grad_accum = do_grad_accum
self.grad_accum_steps = grad_accum_steps
self.trainer_settings_dict = {
'do_grad_accum': self.do_grad_accum,
'grad_accum_steps':self.grad_accum_steps,
'epochsTorun':self.epochsTorun,
'lr':self.lr,
'batch_size':batch_size,
}
self.use_fp16 = use_fp16
self.scheduler_flag = 9999
self.criterion = RMSELoss().cuda()
self.criterion_2 = nn.CrossEntropyLoss().cuda()
self.scaler = torch.cuda.amp.GradScaler()
if resume_train_from_checkpoint:
if os.path.isfile(resume_checkpoint_path):
print("=> Loading checkpoint from '{}'".format(resume_checkpoint_path))
checkpoint_dict = torch.load(resume_checkpoint_path)
self.model.load_state_dict(checkpoint_dict['Model_state_dict'])
self.scheduler.load_state_dict(checkpoint_dict['Scheduler_state_dict'])
self.optimizer.load_state_dict(checkpoint_dict['Optimizer_state_dict'])
self.best_loss = checkpoint_dict['Best_val_loss']
self.best_f1_score = checkpoint_dict['Best_val_f1_score']
self.info_bbx.all_info = checkpoint_dict['All_info']
self.init_epoch = checkpoint_dict['Epoch']
print('Best Val loss is {}'.format(self.best_loss))
print('Best Val f1_score is {}'.format(self.best_f1_score))
print('Current val loss is {}'.format(checkpoint_dict['Current_val_Loss']))
print('Current val f1 score is {}'.format(checkpoint_dict['Current_val_f1_score']))
self.scheduler_flag = checkpoint_dict['Scheduler_flag']
del checkpoint_dict
torch.cuda.empty_cache()
else:
print("=> No checkpoint found at '{}' !".format(resume_checkpoint_path))
def fit(self):
with TQDM() as pbar:
pbar.on_train_begin({'num_batches':len(self.trainLoader),'num_epoch':self.epochsTorun})
pbar.on_val_begin({'num_batches':len(self.valLoader),'num_epoch':self.epochsTorun})
self.train_metric_meter = Metric_Meter()
self.val_metric_meter = Metric_Meter()
for epoch in range(self.epochsTorun):
current_epoch_no = epoch+1
if current_epoch_no <= self.init_epoch:
continue
pbar.on_epoch_train_begin(self.fold,current_epoch_no)
self.info_bbx._init_new_epoch(current_epoch_no)
self.model.train()
torch.set_grad_enabled(True)
self.train_metric_meter.reset()
self.val_metric_meter.reset()
for itera_no, data in enumerate(self.trainLoader):
pbar.on_train_batch_begin()
self.optimizer.zero_grad()
images, targets = data
images = images.cuda()
targets = targets.cuda()
with torch.cuda.amp.autocast():
out = self.model(images)
batch_loss = self.criterion(out['LOGITS'], targets[:,None]) + self.criterion_2(out['LOGITS_2'], targets.long())
self.scaler.scale(batch_loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.train_metric_meter.update(out['LOGITS'].clone(), targets, 'single')
self.info_bbx.update_train_info({'Loss':[(batch_loss.detach().item()),images.shape[0]]})
pbar.on_train_batch_end(logs=self.info_bbx.request_current_epoch_train_metric_info())
torch.cuda.empty_cache()
if self.test_run_for_error:
if itera_no==5:
break
f1_score, rmse = self.train_metric_meter.feedback()
self.info_bbx.update_train_info({'f1_score': f1_score, 'rmse': rmse})
pbar.on_epoch_train_end(self.info_bbx.request_current_epoch_train_metric_info())
pbar.on_epoch_val_begin(self.fold,current_epoch_no)
self.model.eval()
torch.set_grad_enabled(False)
with torch.no_grad():
for itera_no, data in enumerate(self.valLoader):
pbar.on_val_batch_begin()
images, targets = data
images = images.cuda()
targets = targets.cuda()
with torch.cuda.amp.autocast():
out = self.model(images)
batch_loss = self.criterion(out['LOGITS'], targets[:,None]) + self.criterion_2(out['LOGITS_2'], targets.long())
self.val_metric_meter.update(out['LOGITS'].clone(), targets, 'single')
self.info_bbx.update_val_info({'Loss':[(batch_loss.detach().item()),images.shape[0]]})
pbar.on_val_batch_end(logs=self.info_bbx.request_current_epoch_val_metric_info())
torch.cuda.empty_cache()
if self.test_run_for_error:
if itera_no==5:
break
f1_score, rmse = self.val_metric_meter.feedback()
self.info_bbx.update_val_info({'f1_score': f1_score, 'rmse': rmse})
pbar.on_epoch_val_end(self.info_bbx.request_current_epoch_val_metric_info())
if self.best_loss > self.info_bbx.get_info(current_epoch_no,'Loss','Val'):
print( ' Val Loss is improved from {:.4f} to {:.4f}! '.format(self.best_loss,self.info_bbx.get_info(current_epoch_no,'Loss','Val')) )
self.best_loss = self.info_bbx.get_info(current_epoch_no,'Loss','Val')
is_best_loss = True
else:
print( ' Val Loss is not improved from {:.4f}! '.format(self.best_loss))
is_best_loss = False
if self.best_f1_score < self.info_bbx.get_info(current_epoch_no,'f1_score','Val'):
print( ' Val f1 score is improved from {:.4f} to {:.4f}! '.format(self.best_f1_score,self.info_bbx.get_info(current_epoch_no,'f1_score','Val')) )
self.best_f1_score = self.info_bbx.get_info(current_epoch_no,'f1_score','Val')
is_best_f1_score = True
else:
print( ' Val f1 score is not improved from {:.4f}! '.format(self.best_f1_score))
is_best_f1_score = False
if is_best_loss or is_best_f1_score:
self.scheduler_flag = self.scheduler_flag - 1
self.scheduler.step(self.scheduler_flag)
else:
self.scheduler.step(self.scheduler_flag+1)
checkpoint_dict = {
'Epoch': current_epoch_no,
'Model_state_dict': self.model.state_dict(),
'Current_val_Loss': self.info_bbx.get_info(current_epoch_no,'Loss','Val'),
'Current_train_Loss': self.info_bbx.get_info(current_epoch_no,'Loss','Train'),
'Current_val_f1_score':self.info_bbx.get_info(current_epoch_no,'f1_score','Val'),
'Current_train_f1_score':self.info_bbx.get_info(current_epoch_no,'f1_score','Train'),
'Current_val_rmse':self.info_bbx.get_info(current_epoch_no,'rmse','Val'),
'Current_train_rmse':self.info_bbx.get_info(current_epoch_no,'rmse','Train'),
'Best_val_loss' : self.best_loss,
'Best_val_f1_score': self.best_f1_score,
'Best_val_rmse': self.best_rmse,
}
if is_best_f1_score:
torch.save(checkpoint_dict, self.checkpoint_saving_path+'checkpoint_best_f1_score_fold{}.pth'.format(self.fold))
del checkpoint_dict
torch.cuda.empty_cache()
| true
| true
|
1c48eab421d71fb3dd130c7dbe6529475759591f
| 16,694
|
py
|
Python
|
metalibm_functions/ml_atan.py
|
metalibm/metalibm
|
e3133bb95e13f797bb902ef7cd1d2f8f352c4454
|
[
"MIT"
] | 12
|
2019-10-29T21:30:58.000Z
|
2022-02-05T16:28:01.000Z
|
metalibm_functions/ml_atan.py
|
metalibm/metalibm
|
e3133bb95e13f797bb902ef7cd1d2f8f352c4454
|
[
"MIT"
] | 20
|
2021-03-11T19:46:48.000Z
|
2022-02-05T16:03:29.000Z
|
metalibm_functions/ml_atan.py
|
metalibm/metalibm
|
e3133bb95e13f797bb902ef7cd1d2f8f352c4454
|
[
"MIT"
] | 4
|
2021-03-10T15:06:58.000Z
|
2021-07-14T17:39:53.000Z
|
# -*- coding: utf-8 -*-
""" meta-implementation of arc-tangent (atan) function """
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# created: Mar 7th, 2018
# last-modified: Mar 18th, 2020
# Author(s): Nicolas Brunie <nbrunie@kalray.eu>
###############################################################################
import sollya
from sollya import Interval
from metalibm_core.core.ml_function import DefaultArgTemplate
from metalibm_core.core.simple_scalar_function import (
ScalarBinaryFunction, ScalarUnaryFunction)
from metalibm_core.core.ml_operations import (
Abs, Constant,
Select, Statement, Return,
LogicalOr, LogicalAnd, LogicalNot,
BitLogicXor, TypeCast, Equal,
)
from metalibm_core.core.ml_formats import ML_Binary32, ML_Bool
from metalibm_core.core.precisions import ML_Faithful
from metalibm_core.code_generation.generic_processor import GenericProcessor
from metalibm_core.core.polynomials import Polynomial, PolynomialSchemeEvaluator
from metalibm_core.core.approximation import (
generate_piecewise_poly_approx,
load_piecewese_poly_params_from_axf,
generate_piecewise_poly_approx_from_params)
from metalibm_core.core.indexing import SubUniformIntervalIndexing
from metalibm_core.utility.ml_template import ML_NewArgTemplate
from metalibm_core.utility.log_report import Log
from metalibm_core.utility.debug_utils import debug_multi
from metalibm_core.utility.axf_utils import AXF_JSON_Exporter, AXF_JSON_Importer
S2 = sollya.SollyaObject(2)
# Disabling Sollya's rounding warnings
sollya.roundingwarnings = sollya.off
sollya.verbosity = 0
sollya.showmessagenumbers = sollya.on
class MetaAtan(ScalarUnaryFunction):
""" Meta implementation of arctangent function """
function_name = "ml_atan"
default_args_atan = {
"output_file": "my_atan.c",
"function_name": "my_atan",
"precision": ML_Binary32,
"accuracy": ML_Faithful,
"num_sub_intervals": 8,
"method": "piecewise",
"dump_axf_approx": False,
"load_axf_approx": False,
"target": GenericProcessor.get_target_instance()
}
def __init__(self, args):
super().__init__(args)
self.method = args.method
self.num_sub_intervals = args.num_sub_intervals
self.dump_axf_approx = args.dump_axf_approx
self.load_axf_approx = args.load_axf_approx
@classmethod
def get_default_args(cls, **kw):
""" Return a structure containing the arguments for MetaAtan,
builtin from a default argument mapping overloaded with @p kw
"""
arg_dict = cls.default_args_atan.copy()
arg_dict.update(kw)
return DefaultArgTemplate(**arg_dict)
def generate_scalar_scheme(self, vx):
""" Evaluation scheme generation """
return self.generic_atan2_generate(vx)
def generic_atan2_generate(self, _vx, vy=None):
""" if vy is None, compute atan(_vx), else compute atan2(vy / vx) """
if vy is None:
# approximation
# if abs_vx <= 1.0 then atan(abx_vx) is directly approximated
# if abs_vx > 1.0 then atan(abs_vx) = pi/2 - atan(1 / abs_vx)
#
# for vx >= 0, atan(vx) = atan(abs_vx)
#
# for vx < 0, atan(vx) = -atan(abs_vx) for vx < 0
# = -pi/2 + atan(1 / abs_vx)
vx = _vx
sign_cond = vx < 0
abs_vx = Select(vx < 0, -vx, vx, tag="abs_vx", debug=debug_multi)
bound_cond = abs_vx > 1
inv_abs_vx = 1 / abs_vx
# condition to select subtraction
cond = LogicalOr(
LogicalAnd(vx < 0, LogicalNot(bound_cond)),
vx > 1,
tag="cond", debug=debug_multi
)
# reduced argument
red_vx = Select(bound_cond, inv_abs_vx, abs_vx, tag="red_vx", debug=debug_multi, precision=self.precision)
offset = None
else:
# bound_cond is True iff Abs(vy / _vx) > 1.0
bound_cond = Abs(vy) > Abs(_vx)
bound_cond.set_attributes(tag="bound_cond", debug=debug_multi)
# vx and vy are of opposite signs
#sign_cond = (_vx * vy) < 0
# using cast to int(signed) and bitwise xor
# to determine if _vx and vy are of opposite sign rapidly
fast_sign_cond = BitLogicXor(
TypeCast(_vx, precision=self.precision.get_integer_format()),
TypeCast(vy, precision=self.precision.get_integer_format()),
precision=self.precision.get_integer_format()
) < 0
# sign_cond = (_vx * vy) < 0
sign_cond = fast_sign_cond
sign_cond.set_attributes(tag="sign_cond", debug=debug_multi)
# condition to select subtraction
# TODO: could be accelerated if LogicalXor existed
slow_cond = LogicalOr(
LogicalAnd(sign_cond, LogicalNot(bound_cond)), # 1 < (vy / _vx) < 0
LogicalAnd(bound_cond, LogicalNot(sign_cond)), # (vy / _vx) > 1
tag="cond", debug=debug_multi
)
cond = slow_cond
numerator = Select(bound_cond, _vx, vy, tag="numerator", debug=debug_multi)
denominator = Select(bound_cond, vy, _vx, tag="denominator", debug=debug_multi)
# reduced argument
red_vx = Abs(numerator) / Abs(denominator)
red_vx.set_attributes(tag="red_vx", debug=debug_multi, precision=self.precision)
offset = Select(_vx > 0,
Constant(0, precision=self.precision),
# vx < 0
Select(sign_cond,
# vy > 0
Constant(sollya.pi, precision=self.precision),
Constant(-sollya.pi, precision=self.precision),
precision=self.precision
),
precision=self.precision,
tag="offset"
)
approx_fct = sollya.atan(sollya.x)
if self.method == "piecewise":
sign_vx = Select(cond, -1, 1, precision=self.precision, tag="sign_vx", debug=debug_multi)
cst_sign = Select(sign_cond, -1, 1, precision=self.precision, tag="cst_sign", debug=debug_multi)
cst = cst_sign * Select(bound_cond, sollya.pi / 2, 0, precision=self.precision)
cst.set_attributes(tag="cst", debug=debug_multi)
bound_low = 0.0
bound_high = 1.0
num_intervals = self.num_sub_intervals
error_threshold = S2**-(self.precision.get_mantissa_size() + 8)
uniform_indexing = SubUniformIntervalIndexing(Interval(bound_low, bound_high), num_intervals)
if self.load_axf_approx:
assert not self.dump_axf_approx
[axf_approx] = AXF_JSON_Importer.from_file(self.load_axf_approx)
approx_offset_table, approx_poly_max_degree, approx_poly_table, approx_max_error = load_piecewese_poly_params_from_axf(axf_approx, uniform_indexing)
approx = generate_piecewise_poly_approx_from_params(approx_offset_table,
approx_poly_max_degree,
approx_poly_table,
uniform_indexing,
self.precision,
red_vx)
else:
approx, axf_approx = generate_piecewise_poly_approx(
lambda offset: sollya.atan(sollya.x + offset),
uniform_indexing,
error_threshold,
self.precision,
red_vx,
error_target_type=sollya.absolute,
axf_export=not self.dump_axf_approx is False)
if self.dump_axf_approx:
axf_approx.tag = "atan"
AXF_JSON_Exporter.to_file(self.dump_axf_approx,
[axf_approx.serialize_to_dict()])
# approx, eval_error = piecewise_approximation(approx_fct,
# red_vx,
# self.precision,
# bound_low=bound_low,
# bound_high=bound_high,
# max_degree=None,
# num_intervals=num_intervals,
# error_threshold=error_threshold,
# odd=True)
result = cst + sign_vx * approx
result.set_attributes(tag="result", precision=self.precision, debug=debug_multi)
elif self.method == "single":
approx_interval = Interval(0, 1.0)
# determining the degree of the polynomial approximation
poly_degree_range = sollya.guessdegree(approx_fct / sollya.x,
approx_interval,
S2**-(self.precision.get_field_size() + 2))
poly_degree = int(sollya.sup(poly_degree_range)) + 4
Log.report(Log.Info, "poly_degree={}".format(poly_degree))
# arctan is an odd function, so only odd coefficient must be non-zero
poly_degree_list = list(range(1, poly_degree+1, 2))
poly_object, poly_error = Polynomial.build_from_approximation_with_error(
approx_fct, poly_degree_list,
[1] + [self.precision.get_sollya_object()] * (len(poly_degree_list)-1),
approx_interval)
odd_predicate = lambda index, _: ((index-1) % 4 != 0)
even_predicate = lambda index, _: (index != 1 and (index-1) % 4 == 0)
poly_odd_object = poly_object.sub_poly_cond(odd_predicate, offset=1)
poly_even_object = poly_object.sub_poly_cond(even_predicate, offset=1)
sollya.settings.display = sollya.hexadecimal
Log.report(Log.Info, "poly_error: {}".format(poly_error))
Log.report(Log.Info, "poly_odd: {}".format(poly_odd_object))
Log.report(Log.Info, "poly_even: {}".format(poly_even_object))
poly_odd = PolynomialSchemeEvaluator.generate_horner_scheme(poly_odd_object, abs_vx)
poly_odd.set_attributes(tag="poly_odd", debug=debug_multi)
poly_even = PolynomialSchemeEvaluator.generate_horner_scheme(poly_even_object, abs_vx)
poly_even.set_attributes(tag="poly_even", debug=debug_multi)
exact_sum = poly_odd + poly_even
exact_sum.set_attributes(tag="exact_sum", debug=debug_multi)
# poly_even should be (1 + poly_even)
result = vx + vx * exact_sum
result.set_attributes(tag="result", precision=self.precision, debug=debug_multi)
else:
raise NotImplementedError
if not offset is None:
result = result + offset
std_scheme = Statement(
Return(result)
)
scheme = std_scheme
return scheme
def numeric_emulate(self, input_value):
return sollya.atan(input_value)
standard_test_cases = [[sollya.parse(x)] for x in ["0x1.107a78p+0", "0x1.9e75a6p+0"]]
class MetaAtan2(ScalarBinaryFunction, MetaAtan):
""" Meta-function for 2-argument arc tangent (atan2) """
arity = 2
function_name = "ml_atan2"
def __init__(self, args):
ScalarBinaryFunction.__init__(self, args)
self.method = args.method
@classmethod
def get_default_args(cls, **kw):
""" Return a structure containing the arguments for MetaAtan,
builtin from a default argument mapping overloaded with @p kw
"""
arg_dict = cls.default_args_atan.copy()
arg_dict.update({
"output_file": "my_atan2.c",
"function_name": "my_atan2",
"input_intervals": [Interval(-5, 5)] * 2,
})
arg_dict.update(kw)
return DefaultArgTemplate(**arg_dict)
def generate_scalar_scheme(self, vy, vx):
# as in standard library atan2(y, x), take y as first
# parameter and x as second, we inverse vy and vx in method
# argument list
# extract of atan2 specification from man page
# If y is +0 (-0) and x is less than 0, +pi (-pi) is returned.
# If y is +0 (-0) and x is greater than 0, +0 (-0) is returned.
# If y is less than 0 and x is +0 or -0, -pi/2 is returned.
# If y is greater than 0 and x is +0 or -0, pi/2 is returned.
# If either x or y is NaN, a NaN is returned.
# If y is +0 (-0) and x is -0, +pi (-pi) is returned.
# If y is +0 (-0) and x is +0, +0 (-0) is returned.
# If y is a finite value greater (less) than 0, and x is negative infinity, +pi (-pi) is
# returned.
# If y is a finite value greater (less) than 0, and x is positive infinity, +0 (-0) is returned.
# If y is positive infinity (negative infinity), and x is finite, pi/2 (-pi/2) is returned.
# If y is positive infinity (negative infinity) and x is negative infinity, +3*pi/4 (-3*pi/4) is
# returned.
# If y is positive infinity (negative infinity) and x is positive infinity, +pi/4 (-pi/4) is
# returned.
vy.set_attributes(tag="y")
vx.set_attributes(tag="x")
return self.generic_atan2_generate(vx, vy)
def numeric_emulate(self, vy, vx):
if vx > 0:
return sollya.atan(vy / vx)
elif vy < 0:
# vy / vx > 0
return -sollya.pi + sollya.atan(vy / vx)
else:
# vy > 0, vy / vx < 0
return sollya.pi + sollya.atan(vy / vx)
standard_test_cases = [
(sollya.parse("0x1.08495cp+2"), sollya.parse("-0x1.88569ep+1")),
(sollya.parse("0x1.08495cp+2"), sollya.parse("-0x1.88569ep+1")),
(sollya.parse("0x1.08495cp+2"), sollya.parse("-0x1.88569ep+1")),
(sollya.parse("0x1.08495cp+2"), sollya.parse("-0x1.88569ep+1")),
]
if __name__ == "__main__":
# auto-test
arg_template = ML_NewArgTemplate(default_arg=MetaAtan.get_default_args())
# extra options
arg_template.get_parser().add_argument(
"--method", dest="method", default="piecewise", choices=["piecewise", "single"],
action="store", help="select approximation method")
arg_template.get_parser().add_argument(
"--num-sub-intervals", default=8, type=int,
action="store", help="set the number of sub-intervals in piecewise method")
arg_template.get_parser().add_argument(
"--dump-axf-approx", default=False,
action="store", help="dump approximations in AXF format")
arg_template.get_parser().add_argument(
"--load-axf-approx", default=False,
action="store", help="load approximations from file in AXF format")
args = arg_template.arg_extraction()
ml_atan = MetaAtan(args)
ml_atan.gen_implementation()
| 43.701571
| 164
| 0.593567
|
import sollya
from sollya import Interval
from metalibm_core.core.ml_function import DefaultArgTemplate
from metalibm_core.core.simple_scalar_function import (
ScalarBinaryFunction, ScalarUnaryFunction)
from metalibm_core.core.ml_operations import (
Abs, Constant,
Select, Statement, Return,
LogicalOr, LogicalAnd, LogicalNot,
BitLogicXor, TypeCast, Equal,
)
from metalibm_core.core.ml_formats import ML_Binary32, ML_Bool
from metalibm_core.core.precisions import ML_Faithful
from metalibm_core.code_generation.generic_processor import GenericProcessor
from metalibm_core.core.polynomials import Polynomial, PolynomialSchemeEvaluator
from metalibm_core.core.approximation import (
generate_piecewise_poly_approx,
load_piecewese_poly_params_from_axf,
generate_piecewise_poly_approx_from_params)
from metalibm_core.core.indexing import SubUniformIntervalIndexing
from metalibm_core.utility.ml_template import ML_NewArgTemplate
from metalibm_core.utility.log_report import Log
from metalibm_core.utility.debug_utils import debug_multi
from metalibm_core.utility.axf_utils import AXF_JSON_Exporter, AXF_JSON_Importer
S2 = sollya.SollyaObject(2)
sollya.roundingwarnings = sollya.off
sollya.verbosity = 0
sollya.showmessagenumbers = sollya.on
class MetaAtan(ScalarUnaryFunction):
function_name = "ml_atan"
default_args_atan = {
"output_file": "my_atan.c",
"function_name": "my_atan",
"precision": ML_Binary32,
"accuracy": ML_Faithful,
"num_sub_intervals": 8,
"method": "piecewise",
"dump_axf_approx": False,
"load_axf_approx": False,
"target": GenericProcessor.get_target_instance()
}
def __init__(self, args):
super().__init__(args)
self.method = args.method
self.num_sub_intervals = args.num_sub_intervals
self.dump_axf_approx = args.dump_axf_approx
self.load_axf_approx = args.load_axf_approx
@classmethod
def get_default_args(cls, **kw):
arg_dict = cls.default_args_atan.copy()
arg_dict.update(kw)
return DefaultArgTemplate(**arg_dict)
def generate_scalar_scheme(self, vx):
return self.generic_atan2_generate(vx)
def generic_atan2_generate(self, _vx, vy=None):
if vy is None:
# approximation
# if abs_vx <= 1.0 then atan(abx_vx) is directly approximated
# if abs_vx > 1.0 then atan(abs_vx) = pi/2 - atan(1 / abs_vx)
#
# for vx >= 0, atan(vx) = atan(abs_vx)
#
# for vx < 0, atan(vx) = -atan(abs_vx) for vx < 0
# = -pi/2 + atan(1 / abs_vx)
vx = _vx
sign_cond = vx < 0
abs_vx = Select(vx < 0, -vx, vx, tag="abs_vx", debug=debug_multi)
bound_cond = abs_vx > 1
inv_abs_vx = 1 / abs_vx
# condition to select subtraction
cond = LogicalOr(
LogicalAnd(vx < 0, LogicalNot(bound_cond)),
vx > 1,
tag="cond", debug=debug_multi
)
# reduced argument
red_vx = Select(bound_cond, inv_abs_vx, abs_vx, tag="red_vx", debug=debug_multi, precision=self.precision)
offset = None
else:
# bound_cond is True iff Abs(vy / _vx) > 1.0
bound_cond = Abs(vy) > Abs(_vx)
bound_cond.set_attributes(tag="bound_cond", debug=debug_multi)
# vx and vy are of opposite signs
#sign_cond = (_vx * vy) < 0
# using cast to int(signed) and bitwise xor
# to determine if _vx and vy are of opposite sign rapidly
fast_sign_cond = BitLogicXor(
TypeCast(_vx, precision=self.precision.get_integer_format()),
TypeCast(vy, precision=self.precision.get_integer_format()),
precision=self.precision.get_integer_format()
) < 0
# sign_cond = (_vx * vy) < 0
sign_cond = fast_sign_cond
sign_cond.set_attributes(tag="sign_cond", debug=debug_multi)
# condition to select subtraction
# TODO: could be accelerated if LogicalXor existed
slow_cond = LogicalOr(
LogicalAnd(sign_cond, LogicalNot(bound_cond)), # 1 < (vy / _vx) < 0
LogicalAnd(bound_cond, LogicalNot(sign_cond)), # (vy / _vx) > 1
tag="cond", debug=debug_multi
)
cond = slow_cond
numerator = Select(bound_cond, _vx, vy, tag="numerator", debug=debug_multi)
denominator = Select(bound_cond, vy, _vx, tag="denominator", debug=debug_multi)
# reduced argument
red_vx = Abs(numerator) / Abs(denominator)
red_vx.set_attributes(tag="red_vx", debug=debug_multi, precision=self.precision)
offset = Select(_vx > 0,
Constant(0, precision=self.precision),
# vx < 0
Select(sign_cond,
# vy > 0
Constant(sollya.pi, precision=self.precision),
Constant(-sollya.pi, precision=self.precision),
precision=self.precision
),
precision=self.precision,
tag="offset"
)
approx_fct = sollya.atan(sollya.x)
if self.method == "piecewise":
sign_vx = Select(cond, -1, 1, precision=self.precision, tag="sign_vx", debug=debug_multi)
cst_sign = Select(sign_cond, -1, 1, precision=self.precision, tag="cst_sign", debug=debug_multi)
cst = cst_sign * Select(bound_cond, sollya.pi / 2, 0, precision=self.precision)
cst.set_attributes(tag="cst", debug=debug_multi)
bound_low = 0.0
bound_high = 1.0
num_intervals = self.num_sub_intervals
error_threshold = S2**-(self.precision.get_mantissa_size() + 8)
uniform_indexing = SubUniformIntervalIndexing(Interval(bound_low, bound_high), num_intervals)
if self.load_axf_approx:
assert not self.dump_axf_approx
[axf_approx] = AXF_JSON_Importer.from_file(self.load_axf_approx)
approx_offset_table, approx_poly_max_degree, approx_poly_table, approx_max_error = load_piecewese_poly_params_from_axf(axf_approx, uniform_indexing)
approx = generate_piecewise_poly_approx_from_params(approx_offset_table,
approx_poly_max_degree,
approx_poly_table,
uniform_indexing,
self.precision,
red_vx)
else:
approx, axf_approx = generate_piecewise_poly_approx(
lambda offset: sollya.atan(sollya.x + offset),
uniform_indexing,
error_threshold,
self.precision,
red_vx,
error_target_type=sollya.absolute,
axf_export=not self.dump_axf_approx is False)
if self.dump_axf_approx:
axf_approx.tag = "atan"
AXF_JSON_Exporter.to_file(self.dump_axf_approx,
[axf_approx.serialize_to_dict()])
# approx, eval_error = piecewise_approximation(approx_fct,
# red_vx,
# self.precision,
# bound_low=bound_low,
# bound_high=bound_high,
# max_degree=None,
# num_intervals=num_intervals,
# error_threshold=error_threshold,
# odd=True)
result = cst + sign_vx * approx
result.set_attributes(tag="result", precision=self.precision, debug=debug_multi)
elif self.method == "single":
approx_interval = Interval(0, 1.0)
# determining the degree of the polynomial approximation
poly_degree_range = sollya.guessdegree(approx_fct / sollya.x,
approx_interval,
S2**-(self.precision.get_field_size() + 2))
poly_degree = int(sollya.sup(poly_degree_range)) + 4
Log.report(Log.Info, "poly_degree={}".format(poly_degree))
# arctan is an odd function, so only odd coefficient must be non-zero
poly_degree_list = list(range(1, poly_degree+1, 2))
poly_object, poly_error = Polynomial.build_from_approximation_with_error(
approx_fct, poly_degree_list,
[1] + [self.precision.get_sollya_object()] * (len(poly_degree_list)-1),
approx_interval)
odd_predicate = lambda index, _: ((index-1) % 4 != 0)
even_predicate = lambda index, _: (index != 1 and (index-1) % 4 == 0)
poly_odd_object = poly_object.sub_poly_cond(odd_predicate, offset=1)
poly_even_object = poly_object.sub_poly_cond(even_predicate, offset=1)
sollya.settings.display = sollya.hexadecimal
Log.report(Log.Info, "poly_error: {}".format(poly_error))
Log.report(Log.Info, "poly_odd: {}".format(poly_odd_object))
Log.report(Log.Info, "poly_even: {}".format(poly_even_object))
poly_odd = PolynomialSchemeEvaluator.generate_horner_scheme(poly_odd_object, abs_vx)
poly_odd.set_attributes(tag="poly_odd", debug=debug_multi)
poly_even = PolynomialSchemeEvaluator.generate_horner_scheme(poly_even_object, abs_vx)
poly_even.set_attributes(tag="poly_even", debug=debug_multi)
exact_sum = poly_odd + poly_even
exact_sum.set_attributes(tag="exact_sum", debug=debug_multi)
# poly_even should be (1 + poly_even)
result = vx + vx * exact_sum
result.set_attributes(tag="result", precision=self.precision, debug=debug_multi)
else:
raise NotImplementedError
if not offset is None:
result = result + offset
std_scheme = Statement(
Return(result)
)
scheme = std_scheme
return scheme
def numeric_emulate(self, input_value):
return sollya.atan(input_value)
standard_test_cases = [[sollya.parse(x)] for x in ["0x1.107a78p+0", "0x1.9e75a6p+0"]]
class MetaAtan2(ScalarBinaryFunction, MetaAtan):
arity = 2
function_name = "ml_atan2"
def __init__(self, args):
ScalarBinaryFunction.__init__(self, args)
self.method = args.method
@classmethod
def get_default_args(cls, **kw):
arg_dict = cls.default_args_atan.copy()
arg_dict.update({
"output_file": "my_atan2.c",
"function_name": "my_atan2",
"input_intervals": [Interval(-5, 5)] * 2,
})
arg_dict.update(kw)
return DefaultArgTemplate(**arg_dict)
def generate_scalar_scheme(self, vy, vx):
# as in standard library atan2(y, x), take y as first
# parameter and x as second, we inverse vy and vx in method
# argument list
# extract of atan2 specification from man page
# If y is +0 (-0) and x is less than 0, +pi (-pi) is returned.
# If y is +0 (-0) and x is greater than 0, +0 (-0) is returned.
# If y is less than 0 and x is +0 or -0, -pi/2 is returned.
# If y is greater than 0 and x is +0 or -0, pi/2 is returned.
# If either x or y is NaN, a NaN is returned.
# If y is +0 (-0) and x is -0, +pi (-pi) is returned.
# If y is +0 (-0) and x is +0, +0 (-0) is returned.
# If y is a finite value greater (less) than 0, and x is negative infinity, +pi (-pi) is
# returned.
# If y is a finite value greater (less) than 0, and x is positive infinity, +0 (-0) is returned.
# If y is positive infinity (negative infinity), and x is finite, pi/2 (-pi/2) is returned.
# If y is positive infinity (negative infinity) and x is negative infinity, +3*pi/4 (-3*pi/4) is
# returned.
# If y is positive infinity (negative infinity) and x is positive infinity, +pi/4 (-pi/4) is
# returned.
vy.set_attributes(tag="y")
vx.set_attributes(tag="x")
return self.generic_atan2_generate(vx, vy)
def numeric_emulate(self, vy, vx):
if vx > 0:
return sollya.atan(vy / vx)
elif vy < 0:
# vy / vx > 0
return -sollya.pi + sollya.atan(vy / vx)
else:
# vy > 0, vy / vx < 0
return sollya.pi + sollya.atan(vy / vx)
standard_test_cases = [
(sollya.parse("0x1.08495cp+2"), sollya.parse("-0x1.88569ep+1")),
(sollya.parse("0x1.08495cp+2"), sollya.parse("-0x1.88569ep+1")),
(sollya.parse("0x1.08495cp+2"), sollya.parse("-0x1.88569ep+1")),
(sollya.parse("0x1.08495cp+2"), sollya.parse("-0x1.88569ep+1")),
]
if __name__ == "__main__":
# auto-test
arg_template = ML_NewArgTemplate(default_arg=MetaAtan.get_default_args())
# extra options
arg_template.get_parser().add_argument(
"--method", dest="method", default="piecewise", choices=["piecewise", "single"],
action="store", help="select approximation method")
arg_template.get_parser().add_argument(
"--num-sub-intervals", default=8, type=int,
action="store", help="set the number of sub-intervals in piecewise method")
arg_template.get_parser().add_argument(
"--dump-axf-approx", default=False,
action="store", help="dump approximations in AXF format")
arg_template.get_parser().add_argument(
"--load-axf-approx", default=False,
action="store", help="load approximations from file in AXF format")
args = arg_template.arg_extraction()
ml_atan = MetaAtan(args)
ml_atan.gen_implementation()
| true
| true
|
1c48ebe9d69317233718bd0fbd0507d9693df525
| 161
|
py
|
Python
|
bin/sticks/one-sided-tetrasticks-x-2.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/sticks/one-sided-tetrasticks-x-2.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | null | null | null |
bin/sticks/one-sided-tetrasticks-x-2.py
|
tiwo/puzzler
|
7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e
|
[
"Intel"
] | 1
|
2022-01-02T16:54:14.000Z
|
2022-01-02T16:54:14.000Z
|
#!/usr/bin/env python
# $Id$
""" solutions"""
import puzzler
from puzzler.puzzles.tetrasticks import OneSidedTetrasticksX2
puzzler.run(OneSidedTetrasticksX2)
| 16.1
| 61
| 0.782609
|
import puzzler
from puzzler.puzzles.tetrasticks import OneSidedTetrasticksX2
puzzler.run(OneSidedTetrasticksX2)
| true
| true
|
1c48ed47667b08b16198666d4c1fe028765fde70
| 4,334
|
py
|
Python
|
python/ray/tests/aws/utils/stubs.py
|
kifarid/ray
|
43c97c2afb979987be82fa50048674e9b6776d5d
|
[
"Apache-2.0"
] | 5
|
2019-12-23T07:48:13.000Z
|
2020-01-03T12:42:38.000Z
|
python/ray/tests/aws/utils/stubs.py
|
tjcommV2X/ray
|
3965310f939cfbb0d700174529ee5bc7d4871de8
|
[
"Apache-2.0"
] | 70
|
2021-07-10T07:05:24.000Z
|
2022-03-26T07:05:20.000Z
|
python/ray/tests/aws/utils/stubs.py
|
majacQ/ray
|
bc08c6cdcc7ddf4da751ca2a972defd3db509061
|
[
"Apache-2.0"
] | 1
|
2021-05-20T22:00:15.000Z
|
2021-05-20T22:00:15.000Z
|
import ray
from ray.tests.aws.utils.mocks import mock_path_exists_key_pair
from ray.tests.aws.utils.constants import DEFAULT_INSTANCE_PROFILE, \
DEFAULT_KEY_PAIR, DEFAULT_SUBNET, A_THOUSAND_SUBNETS_IN_DIFFERENT_VPCS
from unittest import mock
from botocore.stub import ANY
def configure_iam_role_default(iam_client_stub):
iam_client_stub.add_response(
"get_instance_profile",
expected_params={
"InstanceProfileName": ray.autoscaler._private.aws.config.
DEFAULT_RAY_INSTANCE_PROFILE
},
service_response={"InstanceProfile": DEFAULT_INSTANCE_PROFILE})
def configure_key_pair_default(ec2_client_stub):
patcher = mock.patch("os.path.exists")
os_path_exists_mock = patcher.start()
os_path_exists_mock.side_effect = mock_path_exists_key_pair
ec2_client_stub.add_response(
"describe_key_pairs",
expected_params={
"Filters": [{
"Name": "key-name",
"Values": [DEFAULT_KEY_PAIR["KeyName"]]
}]
},
service_response={"KeyPairs": [DEFAULT_KEY_PAIR]})
def configure_subnet_default(ec2_client_stub):
ec2_client_stub.add_response(
"describe_subnets",
expected_params={},
service_response={"Subnets": [DEFAULT_SUBNET]})
def describe_a_thousand_subnets_in_different_vpcs(ec2_client_stub):
ec2_client_stub.add_response(
"describe_subnets",
expected_params={},
service_response={"Subnets": A_THOUSAND_SUBNETS_IN_DIFFERENT_VPCS})
def skip_to_configure_sg(ec2_client_stub, iam_client_stub):
configure_iam_role_default(iam_client_stub)
configure_key_pair_default(ec2_client_stub)
configure_subnet_default(ec2_client_stub)
def describe_subnets_echo(ec2_client_stub, subnet):
ec2_client_stub.add_response(
"describe_subnets",
expected_params={
"Filters": [{
"Name": "subnet-id",
"Values": [subnet["SubnetId"]]
}]
},
service_response={"Subnets": [subnet]})
def describe_no_security_groups(ec2_client_stub):
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={"Filters": ANY},
service_response={})
def describe_a_security_group(ec2_client_stub, security_group):
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={
"Filters": [{
"Name": "group-id",
"Values": [security_group["GroupId"]]
}]
},
service_response={"SecurityGroups": [security_group]})
def describe_an_sg_2(ec2_client_stub, security_group):
"""Same as last function, different input param format.
A call with this input parameter format is made when sg.ip_permissions is
accessed in aws/config.py.
"""
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={"GroupIds": [security_group["GroupId"]]},
service_response={"SecurityGroups": [security_group]})
def create_sg_echo(ec2_client_stub, security_group):
ec2_client_stub.add_response(
"create_security_group",
expected_params={
"Description": security_group["Description"],
"GroupName": security_group["GroupName"],
"VpcId": security_group["VpcId"]
},
service_response={"GroupId": security_group["GroupId"]})
def describe_sgs_on_vpc(ec2_client_stub, vpc_ids, security_groups):
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={"Filters": [{
"Name": "vpc-id",
"Values": vpc_ids
}]},
service_response={"SecurityGroups": security_groups})
def authorize_sg_ingress(ec2_client_stub, security_group):
ec2_client_stub.add_response(
"authorize_security_group_ingress",
expected_params={
"GroupId": security_group["GroupId"],
"IpPermissions": security_group["IpPermissions"]
},
service_response={})
def describe_sg_echo(ec2_client_stub, security_group):
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={"GroupIds": [security_group["GroupId"]]},
service_response={"SecurityGroups": [security_group]})
| 31.867647
| 77
| 0.67928
|
import ray
from ray.tests.aws.utils.mocks import mock_path_exists_key_pair
from ray.tests.aws.utils.constants import DEFAULT_INSTANCE_PROFILE, \
DEFAULT_KEY_PAIR, DEFAULT_SUBNET, A_THOUSAND_SUBNETS_IN_DIFFERENT_VPCS
from unittest import mock
from botocore.stub import ANY
def configure_iam_role_default(iam_client_stub):
iam_client_stub.add_response(
"get_instance_profile",
expected_params={
"InstanceProfileName": ray.autoscaler._private.aws.config.
DEFAULT_RAY_INSTANCE_PROFILE
},
service_response={"InstanceProfile": DEFAULT_INSTANCE_PROFILE})
def configure_key_pair_default(ec2_client_stub):
patcher = mock.patch("os.path.exists")
os_path_exists_mock = patcher.start()
os_path_exists_mock.side_effect = mock_path_exists_key_pair
ec2_client_stub.add_response(
"describe_key_pairs",
expected_params={
"Filters": [{
"Name": "key-name",
"Values": [DEFAULT_KEY_PAIR["KeyName"]]
}]
},
service_response={"KeyPairs": [DEFAULT_KEY_PAIR]})
def configure_subnet_default(ec2_client_stub):
ec2_client_stub.add_response(
"describe_subnets",
expected_params={},
service_response={"Subnets": [DEFAULT_SUBNET]})
def describe_a_thousand_subnets_in_different_vpcs(ec2_client_stub):
ec2_client_stub.add_response(
"describe_subnets",
expected_params={},
service_response={"Subnets": A_THOUSAND_SUBNETS_IN_DIFFERENT_VPCS})
def skip_to_configure_sg(ec2_client_stub, iam_client_stub):
configure_iam_role_default(iam_client_stub)
configure_key_pair_default(ec2_client_stub)
configure_subnet_default(ec2_client_stub)
def describe_subnets_echo(ec2_client_stub, subnet):
ec2_client_stub.add_response(
"describe_subnets",
expected_params={
"Filters": [{
"Name": "subnet-id",
"Values": [subnet["SubnetId"]]
}]
},
service_response={"Subnets": [subnet]})
def describe_no_security_groups(ec2_client_stub):
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={"Filters": ANY},
service_response={})
def describe_a_security_group(ec2_client_stub, security_group):
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={
"Filters": [{
"Name": "group-id",
"Values": [security_group["GroupId"]]
}]
},
service_response={"SecurityGroups": [security_group]})
def describe_an_sg_2(ec2_client_stub, security_group):
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={"GroupIds": [security_group["GroupId"]]},
service_response={"SecurityGroups": [security_group]})
def create_sg_echo(ec2_client_stub, security_group):
ec2_client_stub.add_response(
"create_security_group",
expected_params={
"Description": security_group["Description"],
"GroupName": security_group["GroupName"],
"VpcId": security_group["VpcId"]
},
service_response={"GroupId": security_group["GroupId"]})
def describe_sgs_on_vpc(ec2_client_stub, vpc_ids, security_groups):
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={"Filters": [{
"Name": "vpc-id",
"Values": vpc_ids
}]},
service_response={"SecurityGroups": security_groups})
def authorize_sg_ingress(ec2_client_stub, security_group):
ec2_client_stub.add_response(
"authorize_security_group_ingress",
expected_params={
"GroupId": security_group["GroupId"],
"IpPermissions": security_group["IpPermissions"]
},
service_response={})
def describe_sg_echo(ec2_client_stub, security_group):
ec2_client_stub.add_response(
"describe_security_groups",
expected_params={"GroupIds": [security_group["GroupId"]]},
service_response={"SecurityGroups": [security_group]})
| true
| true
|
1c48ee010c125b192f89896803c81d8882bb00a2
| 2,744
|
py
|
Python
|
mysqls/connect_database.py
|
marxlee/py-tools
|
4c3699b2a5dd5cb4477a4e339b8f91161cbe3bef
|
[
"Apache-2.0"
] | null | null | null |
mysqls/connect_database.py
|
marxlee/py-tools
|
4c3699b2a5dd5cb4477a4e339b8f91161cbe3bef
|
[
"Apache-2.0"
] | null | null | null |
mysqls/connect_database.py
|
marxlee/py-tools
|
4c3699b2a5dd5cb4477a4e339b8f91161cbe3bef
|
[
"Apache-2.0"
] | null | null | null |
import pymysql, re
def connect():
db = pymysql.connect(host="212.64.15.***", port=3306, user="hadoop",password="hadoop", database="py_tech", charset="utf8" )
return db
def show_database(db):
with db.cursor() as cursor:
cursor.execute('show databases;')
data = cursor.fetchall()
print(data)
def create_table(db):
sql = """CREATE TABLE user (
`id` BIGINT(32) NOT NULL AUTO_INCREMENT COMMENT 'ID' ,
`user_name` VARCHAR(255) DEFAULT NULL COMMENT '用户名',
`gender` INT(4) NOT NULL COMMENT '性别',
`age` INT(4) NOT NULL COMMENT '年龄',
`is_del` INT(4) NOT NULL COMMENT '删除',
`create_time` timestamp NOT NULL COMMENT 'CREATE_TIME',
`update_time` timestamp NOT NULL COMMENT 'UPDATA_TIME',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT '用户表';
-- 5.6版本只能有一个默认的current_timestamp
"""
with db.cursor() as cursor:
cursor.execute(sql)
data = cursor.fetchall()
print(data)
def select_data(db):
sql = """
select * from user;
"""
with db.cursor() as cursor:
cursor.execute(sql)
data = cursor.fetchall()
for row in data:
id = row[0]
name = row[1]
gender = row[2]
age = row[3]
is_del = row[4]
ctime = row[5]
utime = row[6]
print('数据信息: %d, %s, %d, %d, %d, %s, %s' % (id, name, gender, age, is_del, ctime, utime))
# print(data)
def insert_data(db):
sql = "INSERT INTO user( \
user_name, gender, age, is_del, create_time, update_time) \
VALUES ('%s', '%d', %d, 0, now(), now())" % \
('Maria', 1, 26)
with db.cursor() as cursor:
cursor.execute(sql)
db.commit()
data = cursor.fetchall()
print(data)
db.close()
def excutor_sql(sql):
sql = "DELETE FROM EMPLOYEE WHERE AGE > %s" % (20)
db = pymysql.connect(host="212.64.15.224", port=3306, user="hadoop", password="hadoop", database="py_tech",
charset="utf8")
cursor = db.cursor()
# 执行SQL语句
cursor.execute(sql)
try:
ret = re.search('select', sql, re.I)
if ret != None:
data = cursor.fetchall()
print(data)
else:
# 向数据库提交 针对update , create, insert, delete 操作
db.commit()
except:
cursor.close()
# 发生错误时回滚
db.rollback()
db.close()
if __name__ == '__main__':
# db = connect()
# show_database(db)
# create_table(db, sql)
# select_data(db)
# insert_data(db)
pass
| 27.717172
| 127
| 0.525875
|
import pymysql, re
def connect():
db = pymysql.connect(host="212.64.15.***", port=3306, user="hadoop",password="hadoop", database="py_tech", charset="utf8" )
return db
def show_database(db):
with db.cursor() as cursor:
cursor.execute('show databases;')
data = cursor.fetchall()
print(data)
def create_table(db):
sql = """CREATE TABLE user (
`id` BIGINT(32) NOT NULL AUTO_INCREMENT COMMENT 'ID' ,
`user_name` VARCHAR(255) DEFAULT NULL COMMENT '用户名',
`gender` INT(4) NOT NULL COMMENT '性别',
`age` INT(4) NOT NULL COMMENT '年龄',
`is_del` INT(4) NOT NULL COMMENT '删除',
`create_time` timestamp NOT NULL COMMENT 'CREATE_TIME',
`update_time` timestamp NOT NULL COMMENT 'UPDATA_TIME',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT '用户表';
-- 5.6版本只能有一个默认的current_timestamp
"""
with db.cursor() as cursor:
cursor.execute(sql)
data = cursor.fetchall()
print(data)
def select_data(db):
sql = """
select * from user;
"""
with db.cursor() as cursor:
cursor.execute(sql)
data = cursor.fetchall()
for row in data:
id = row[0]
name = row[1]
gender = row[2]
age = row[3]
is_del = row[4]
ctime = row[5]
utime = row[6]
print('数据信息: %d, %s, %d, %d, %d, %s, %s' % (id, name, gender, age, is_del, ctime, utime))
def insert_data(db):
sql = "INSERT INTO user( \
user_name, gender, age, is_del, create_time, update_time) \
VALUES ('%s', '%d', %d, 0, now(), now())" % \
('Maria', 1, 26)
with db.cursor() as cursor:
cursor.execute(sql)
db.commit()
data = cursor.fetchall()
print(data)
db.close()
def excutor_sql(sql):
sql = "DELETE FROM EMPLOYEE WHERE AGE > %s" % (20)
db = pymysql.connect(host="212.64.15.224", port=3306, user="hadoop", password="hadoop", database="py_tech",
charset="utf8")
cursor = db.cursor()
cursor.execute(sql)
try:
ret = re.search('select', sql, re.I)
if ret != None:
data = cursor.fetchall()
print(data)
else:
db.commit()
except:
cursor.close()
db.rollback()
db.close()
if __name__ == '__main__':
pass
| true
| true
|
1c48ef48c6102362cb796f1f3e20287c41044d04
| 3,398
|
py
|
Python
|
setup.py
|
lfdelphino/WebWhatsapp-Wrapper
|
377edb35d8143de9de4939883d64933e0909173b
|
[
"MIT"
] | 7
|
2019-03-10T17:37:07.000Z
|
2021-05-14T13:28:13.000Z
|
setup.py
|
lfdelphino/WebWhatsapp-Wrapper
|
377edb35d8143de9de4939883d64933e0909173b
|
[
"MIT"
] | 2
|
2019-05-22T14:54:36.000Z
|
2019-05-30T23:59:45.000Z
|
setup.py
|
lfdelphino/WebWhatsapp-Wrapper
|
377edb35d8143de9de4939883d64933e0909173b
|
[
"MIT"
] | 3
|
2019-11-23T20:51:07.000Z
|
2021-09-28T09:22:59.000Z
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import ast
# To use a consistent encoding
from codecs import open
import os
# Always prefer setuptools over distutils
from setuptools import setup
PACKAGE_NAME = 'webwhatsapi'
path = os.path.join(os.path.dirname(__file__), PACKAGE_NAME, '__init__.py')
with open(path, 'r') as file:
t = compile(file.read(), path, 'exec', ast.PyCF_ONLY_AST)
for node in (n for n in t.body if isinstance(n, ast.Assign)):
if len(node.targets) != 1:
continue
name = node.targets[0]
if not isinstance(name, ast.Name) or \
name.id not in ('__version__', '__version_info__', 'VERSION'):
continue
v = node.value
if isinstance(v, ast.Str):
version = v.s
break
if isinstance(v, ast.Tuple):
r = []
for e in v.elts:
if isinstance(e, ast.Str):
r.append(e.s)
elif isinstance(e, ast.Num):
r.append(str(e.n))
version = '.'.join(r)
break
# Get the long description from the README file
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='webwhatsapi',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='A python interface for Whatsapp Web',
long_description=long_description,
# The project's main homepage.
url='https://github.com/mukulhase/WhatsAPI',
download_url='https://github.com/mukulhase/WhatsAPI/archive/{}.tar.gz'.format(version),
# Author details
author='Mukul Hase',
author_email='mukulhase@gmail.com',
include_package_data=True,
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Communications :: Chat',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='Whatsapp Chat Bot Chatbot Selenium Web Whatsapp API',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=[PACKAGE_NAME, ],
install_requires=[
# 'aiohttp', see https://github.com/mukulhase/WebWhatsAPI/issues/159
'python-dateutil>=2.6.0',
'selenium>=3.4.3',
'six>=1.10.0',
'python-axolotl',
'cryptography',
'python-magic'
],
extras_require={
},
)
| 30.612613
| 91
| 0.622131
|
import ast
from codecs import open
import os
from setuptools import setup
PACKAGE_NAME = 'webwhatsapi'
path = os.path.join(os.path.dirname(__file__), PACKAGE_NAME, '__init__.py')
with open(path, 'r') as file:
t = compile(file.read(), path, 'exec', ast.PyCF_ONLY_AST)
for node in (n for n in t.body if isinstance(n, ast.Assign)):
if len(node.targets) != 1:
continue
name = node.targets[0]
if not isinstance(name, ast.Name) or \
name.id not in ('__version__', '__version_info__', 'VERSION'):
continue
v = node.value
if isinstance(v, ast.Str):
version = v.s
break
if isinstance(v, ast.Tuple):
r = []
for e in v.elts:
if isinstance(e, ast.Str):
r.append(e.s)
elif isinstance(e, ast.Num):
r.append(str(e.n))
version = '.'.join(r)
break
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='webwhatsapi',
version=version,
description='A python interface for Whatsapp Web',
long_description=long_description,
url='https://github.com/mukulhase/WhatsAPI',
download_url='https://github.com/mukulhase/WhatsAPI/archive/{}.tar.gz'.format(version),
# Author details
author='Mukul Hase',
author_email='mukulhase@gmail.com',
include_package_data=True,
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Communications :: Chat',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='Whatsapp Chat Bot Chatbot Selenium Web Whatsapp API',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=[PACKAGE_NAME, ],
install_requires=[
# 'aiohttp', see https://github.com/mukulhase/WebWhatsAPI/issues/159
'python-dateutil>=2.6.0',
'selenium>=3.4.3',
'six>=1.10.0',
'python-axolotl',
'cryptography',
'python-magic'
],
extras_require={
},
)
| true
| true
|
1c48ef5e1eed7b7caa4331f9bd566bd9f41446c2
| 606
|
py
|
Python
|
superlists/lists/tests.py
|
williamHuang5468/LearningDjango
|
309b89c7072a3ef713164e6832f733e9f26938e4
|
[
"MIT"
] | null | null | null |
superlists/lists/tests.py
|
williamHuang5468/LearningDjango
|
309b89c7072a3ef713164e6832f733e9f26938e4
|
[
"MIT"
] | null | null | null |
superlists/lists/tests.py
|
williamHuang5468/LearningDjango
|
309b89c7072a3ef713164e6832f733e9f26938e4
|
[
"MIT"
] | null | null | null |
from django.core.urlresolvers import resolve
from django.test import TestCase
from django.http import HttpRequest
from lists.views import home_page
class HomePageTest(TestCase):
def test_home_page(self):
home = resolve('/')
self.assertEqual(home.func, home_page)
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
self.assertTrue(response.content.startswith(b'<html>'))
self.assertIn(b'<title>To-Do lists</title>', response.content)
self.assertTrue(response.content.endswith(b'</html>'))
| 31.894737
| 70
| 0.714521
|
from django.core.urlresolvers import resolve
from django.test import TestCase
from django.http import HttpRequest
from lists.views import home_page
class HomePageTest(TestCase):
def test_home_page(self):
home = resolve('/')
self.assertEqual(home.func, home_page)
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = home_page(request)
self.assertTrue(response.content.startswith(b'<html>'))
self.assertIn(b'<title>To-Do lists</title>', response.content)
self.assertTrue(response.content.endswith(b'</html>'))
| true
| true
|
1c48f1159d084538f075e76f7042b9f900261016
| 7,267
|
py
|
Python
|
tests/integration/test_unreal.py
|
rhcarvalho/relay
|
6f1e81115f1dd82aaf63d242d4e4db754c393a5e
|
[
"BSL-1.0"
] | null | null | null |
tests/integration/test_unreal.py
|
rhcarvalho/relay
|
6f1e81115f1dd82aaf63d242d4e4db754c393a5e
|
[
"BSL-1.0"
] | null | null | null |
tests/integration/test_unreal.py
|
rhcarvalho/relay
|
6f1e81115f1dd82aaf63d242d4e4db754c393a5e
|
[
"BSL-1.0"
] | null | null | null |
import os
import pytest
import json
def _load_dump_file(base_file_name: str):
dmp_path = os.path.join(
os.path.dirname(__file__), "fixtures", "native", base_file_name
)
with open(dmp_path, "rb") as f:
dmp_file = f.read()
return dmp_file
@pytest.mark.parametrize("dump_file_name", ["unreal_crash", "unreal_crash_apple"])
def test_unreal_crash(mini_sentry, relay, dump_file_name):
project_id = 42
relay = relay(mini_sentry)
relay.wait_relay_healthcheck()
mini_sentry.project_configs[project_id] = mini_sentry.full_project_config()
unreal_content = _load_dump_file(dump_file_name)
response = relay.send_unreal_request(project_id, unreal_content)
event_id = response.text.replace("-", "")
envelope = mini_sentry.captured_events.get(timeout=1)
assert envelope
assert event_id == envelope.headers.get("event_id")
items = envelope.items
assert len(items) == 1
unreal_item = items[0]
assert unreal_item.headers
assert unreal_item.headers.get("type") == "unreal_report"
assert unreal_item.headers.get("content_type") == "application/octet-stream"
assert unreal_item.payload is not None
def test_unreal_minidump_with_processing(
mini_sentry, relay_with_processing, attachments_consumer, events_consumer
):
project_id = 42
options = {"processing": {"attachment_chunk_size": "1.23 GB"}}
relay = relay_with_processing(options)
relay.wait_relay_healthcheck()
attachments_consumer = attachments_consumer()
mini_sentry.project_configs[project_id] = mini_sentry.full_project_config()
unreal_content = _load_dump_file("unreal_crash")
relay.send_unreal_request(project_id, unreal_content)
attachments = {}
while True:
raw_message, message = attachments_consumer.get_message()
if message is None or message["type"] != "attachment_chunk":
event = message
break
attachments[message["id"]] = message
assert event
assert event["type"] == "event"
project_id = event["project_id"]
event_id = event["event_id"]
assert len(event["attachments"]) == 4
assert len(attachments) == 4
logs_file_found = False
mini_dump_found = False
crash_report_ini_found = False
unreal_context_found = False
for attachment_entry in event["attachments"]:
# check that the attachment is registered in the event
attachment_id = attachment_entry["id"]
# check that we didn't get the messages chunked
assert attachment_entry["chunks"] == 1
entry_name = attachment_entry["name"]
if entry_name == "UE4Minidump.dmp":
mini_dump_found = True
elif entry_name == "YetAnother.log":
logs_file_found = True
elif entry_name == "CrashContext.runtime-xml":
unreal_context_found = True
elif entry_name == "CrashReportClient.ini":
crash_report_ini_found = True
attachment = attachments.get(attachment_id)
assert attachment is not None
assert attachment["event_id"] == event_id
assert attachment["project_id"] == project_id
assert mini_dump_found
assert logs_file_found
assert unreal_context_found
assert crash_report_ini_found
# check the created event
event_data = json.loads(event["payload"])
assert event_data["event_id"] == event_id
exception = event_data.get("exception")
assert exception is not None
values = exception["values"]
assert values is not None
mini_dump_process_marker_found = False
for value in values:
if value == {
"type": "Minidump",
"value": "Invalid Minidump",
"mechanism": {"type": "minidump", "synthetic": True, "handled": False},
}:
mini_dump_process_marker_found = True
assert mini_dump_process_marker_found
def test_unreal_apple_crash_with_processing(
mini_sentry, relay_with_processing, attachments_consumer, events_consumer
):
project_id = 42
options = {"processing": {"attachment_chunk_size": "1.23 GB"}}
relay = relay_with_processing(options)
relay.wait_relay_healthcheck()
attachments_consumer = attachments_consumer()
mini_sentry.project_configs[project_id] = mini_sentry.full_project_config()
unreal_content = _load_dump_file("unreal_crash_apple")
relay.send_unreal_request(project_id, unreal_content)
attachments = {}
user_report = None
event = None
while True:
raw_message, message = attachments_consumer.get_message()
if message is None:
pytest.fail("could not get messages from attachment consumer")
if message["type"] == "attachment_chunk":
attachments[message["id"]] = message
elif message["type"] == "user_report":
user_report = message
elif message["type"] == "event":
event = message
break
assert event is not None
assert user_report is not None
project_id = event["project_id"]
event_id = event["event_id"]
assert len(event["attachments"]) == 6
assert len(attachments) == 6
mini_dump_found = False
crash_report_ini_found = False
logs_file_found = False
crash_context_found = False
info_file_found = False
diagnostics_file_found = False
for attachment_entry in event["attachments"]:
# check that the attachment is registered in the event
attachment_id = attachment_entry["id"]
# check that we didn't get the messages chunked
assert attachment_entry["chunks"] == 1
entry_name = attachment_entry["name"]
if entry_name == "minidump.dmp":
mini_dump_found = True
elif entry_name == "CrashReportClient.ini":
crash_report_ini_found = True
elif entry_name == "info.txt":
info_file_found = True
elif entry_name == "YetAnotherMac.log":
logs_file_found = True
elif entry_name == "CrashContext.runtime-xml":
crash_context_found = True
elif entry_name == "Diagnostics.txt":
diagnostics_file_found = True
attachment = attachments.get(attachment_id)
assert attachment is not None
assert attachment["event_id"] == event_id
assert attachment["project_id"] == project_id
assert mini_dump_found
assert logs_file_found
assert crash_context_found
assert crash_report_ini_found
assert info_file_found
assert diagnostics_file_found
# check the created event
event_data = json.loads(event["payload"])
assert event_data["event_id"] == event_id
exception = event_data.get("exception")
assert exception is not None
values = exception["values"]
assert values is not None
apple_crash_report_marker_found = False
for value in values:
if value == {
"type": "AppleCrashReport",
"value": "Invalid Apple Crash Report",
"mechanism": {
"type": "applecrashreport",
"synthetic": True,
"handled": False,
},
}:
apple_crash_report_marker_found = True
assert apple_crash_report_marker_found
| 31.323276
| 83
| 0.670841
|
import os
import pytest
import json
def _load_dump_file(base_file_name: str):
dmp_path = os.path.join(
os.path.dirname(__file__), "fixtures", "native", base_file_name
)
with open(dmp_path, "rb") as f:
dmp_file = f.read()
return dmp_file
@pytest.mark.parametrize("dump_file_name", ["unreal_crash", "unreal_crash_apple"])
def test_unreal_crash(mini_sentry, relay, dump_file_name):
project_id = 42
relay = relay(mini_sentry)
relay.wait_relay_healthcheck()
mini_sentry.project_configs[project_id] = mini_sentry.full_project_config()
unreal_content = _load_dump_file(dump_file_name)
response = relay.send_unreal_request(project_id, unreal_content)
event_id = response.text.replace("-", "")
envelope = mini_sentry.captured_events.get(timeout=1)
assert envelope
assert event_id == envelope.headers.get("event_id")
items = envelope.items
assert len(items) == 1
unreal_item = items[0]
assert unreal_item.headers
assert unreal_item.headers.get("type") == "unreal_report"
assert unreal_item.headers.get("content_type") == "application/octet-stream"
assert unreal_item.payload is not None
def test_unreal_minidump_with_processing(
mini_sentry, relay_with_processing, attachments_consumer, events_consumer
):
project_id = 42
options = {"processing": {"attachment_chunk_size": "1.23 GB"}}
relay = relay_with_processing(options)
relay.wait_relay_healthcheck()
attachments_consumer = attachments_consumer()
mini_sentry.project_configs[project_id] = mini_sentry.full_project_config()
unreal_content = _load_dump_file("unreal_crash")
relay.send_unreal_request(project_id, unreal_content)
attachments = {}
while True:
raw_message, message = attachments_consumer.get_message()
if message is None or message["type"] != "attachment_chunk":
event = message
break
attachments[message["id"]] = message
assert event
assert event["type"] == "event"
project_id = event["project_id"]
event_id = event["event_id"]
assert len(event["attachments"]) == 4
assert len(attachments) == 4
logs_file_found = False
mini_dump_found = False
crash_report_ini_found = False
unreal_context_found = False
for attachment_entry in event["attachments"]:
attachment_id = attachment_entry["id"]
assert attachment_entry["chunks"] == 1
entry_name = attachment_entry["name"]
if entry_name == "UE4Minidump.dmp":
mini_dump_found = True
elif entry_name == "YetAnother.log":
logs_file_found = True
elif entry_name == "CrashContext.runtime-xml":
unreal_context_found = True
elif entry_name == "CrashReportClient.ini":
crash_report_ini_found = True
attachment = attachments.get(attachment_id)
assert attachment is not None
assert attachment["event_id"] == event_id
assert attachment["project_id"] == project_id
assert mini_dump_found
assert logs_file_found
assert unreal_context_found
assert crash_report_ini_found
# check the created event
event_data = json.loads(event["payload"])
assert event_data["event_id"] == event_id
exception = event_data.get("exception")
assert exception is not None
values = exception["values"]
assert values is not None
mini_dump_process_marker_found = False
for value in values:
if value == {
"type": "Minidump",
"value": "Invalid Minidump",
"mechanism": {"type": "minidump", "synthetic": True, "handled": False},
}:
mini_dump_process_marker_found = True
assert mini_dump_process_marker_found
def test_unreal_apple_crash_with_processing(
mini_sentry, relay_with_processing, attachments_consumer, events_consumer
):
project_id = 42
options = {"processing": {"attachment_chunk_size": "1.23 GB"}}
relay = relay_with_processing(options)
relay.wait_relay_healthcheck()
attachments_consumer = attachments_consumer()
mini_sentry.project_configs[project_id] = mini_sentry.full_project_config()
unreal_content = _load_dump_file("unreal_crash_apple")
relay.send_unreal_request(project_id, unreal_content)
attachments = {}
user_report = None
event = None
while True:
raw_message, message = attachments_consumer.get_message()
if message is None:
pytest.fail("could not get messages from attachment consumer")
if message["type"] == "attachment_chunk":
attachments[message["id"]] = message
elif message["type"] == "user_report":
user_report = message
elif message["type"] == "event":
event = message
break
assert event is not None
assert user_report is not None
project_id = event["project_id"]
event_id = event["event_id"]
assert len(event["attachments"]) == 6
assert len(attachments) == 6
mini_dump_found = False
crash_report_ini_found = False
logs_file_found = False
crash_context_found = False
info_file_found = False
diagnostics_file_found = False
for attachment_entry in event["attachments"]:
# check that the attachment is registered in the event
attachment_id = attachment_entry["id"]
# check that we didn't get the messages chunked
assert attachment_entry["chunks"] == 1
entry_name = attachment_entry["name"]
if entry_name == "minidump.dmp":
mini_dump_found = True
elif entry_name == "CrashReportClient.ini":
crash_report_ini_found = True
elif entry_name == "info.txt":
info_file_found = True
elif entry_name == "YetAnotherMac.log":
logs_file_found = True
elif entry_name == "CrashContext.runtime-xml":
crash_context_found = True
elif entry_name == "Diagnostics.txt":
diagnostics_file_found = True
attachment = attachments.get(attachment_id)
assert attachment is not None
assert attachment["event_id"] == event_id
assert attachment["project_id"] == project_id
assert mini_dump_found
assert logs_file_found
assert crash_context_found
assert crash_report_ini_found
assert info_file_found
assert diagnostics_file_found
event_data = json.loads(event["payload"])
assert event_data["event_id"] == event_id
exception = event_data.get("exception")
assert exception is not None
values = exception["values"]
assert values is not None
apple_crash_report_marker_found = False
for value in values:
if value == {
"type": "AppleCrashReport",
"value": "Invalid Apple Crash Report",
"mechanism": {
"type": "applecrashreport",
"synthetic": True,
"handled": False,
},
}:
apple_crash_report_marker_found = True
assert apple_crash_report_marker_found
| true
| true
|
1c48f173f4cc5a21b4683a68476f35fc62018189
| 935
|
py
|
Python
|
Chapter01/03 Saving image using lossy and lossless compression.py
|
PCJimmmy/OpenCV-3-Computer-Vision-with-Python-Cookbook
|
08be606384e3439183599c147291901d80fc8310
|
[
"MIT"
] | 1
|
2019-08-18T03:53:01.000Z
|
2019-08-18T03:53:01.000Z
|
Chapter01/03 Saving image using lossy and lossless compression.py
|
PCJimmmy/OpenCV-3-Computer-Vision-with-Python-Cookbook
|
08be606384e3439183599c147291901d80fc8310
|
[
"MIT"
] | 1
|
2020-06-29T06:25:37.000Z
|
2020-06-29T06:25:37.000Z
|
Chapter01/03 Saving image using lossy and lossless compression.py
|
eventia/opencv_vision_train
|
3d0bedd02cd73ca40595f483bf468913dbc54f2d
|
[
"MIT"
] | 2
|
2019-08-12T01:02:07.000Z
|
2021-02-18T15:02:45.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('--path', default='../data/Lena.png', help='Image path.')
parser.add_argument('--out_png', default='../data/Lena_compressed.png',
help='Output image path for lossless result.')
parser.add_argument('--out_jpg', default='../data/Lena_compressed.jpg',
help='Output image path for lossy result.')
params = parser.parse_args()
img = cv2.imread(params.path)
# save image with lower compression - bigger file size but faster decoding
cv2.imwrite(params.out_png, img, [cv2.IMWRITE_PNG_COMPRESSION, 0])
# check that image saved and loaded again image is the same as original one
saved_img = cv2.imread(params.out_png)
assert saved_img.all() == img.all()
# save image with lower quality - smaller file size
cv2.imwrite(params.out_jpg, img, [cv2.IMWRITE_JPEG_QUALITY, 0])
| 37.4
| 77
| 0.715508
|
import argparse
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('--path', default='../data/Lena.png', help='Image path.')
parser.add_argument('--out_png', default='../data/Lena_compressed.png',
help='Output image path for lossless result.')
parser.add_argument('--out_jpg', default='../data/Lena_compressed.jpg',
help='Output image path for lossy result.')
params = parser.parse_args()
img = cv2.imread(params.path)
cv2.imwrite(params.out_png, img, [cv2.IMWRITE_PNG_COMPRESSION, 0])
saved_img = cv2.imread(params.out_png)
assert saved_img.all() == img.all()
cv2.imwrite(params.out_jpg, img, [cv2.IMWRITE_JPEG_QUALITY, 0])
| true
| true
|
1c48f194eb2ad2ca307fb349d3027401b1f40d3e
| 164
|
py
|
Python
|
setup.py
|
BarryLiu97/kwyk
|
638edd85bfffe154180e0b861c0dc5c7ad5754fc
|
[
"Apache-2.0"
] | 16
|
2019-08-14T14:19:42.000Z
|
2021-11-21T15:21:50.000Z
|
setup.py
|
BarryLiu97/kwyk
|
638edd85bfffe154180e0b861c0dc5c7ad5754fc
|
[
"Apache-2.0"
] | 24
|
2019-08-14T19:13:21.000Z
|
2022-03-29T13:46:49.000Z
|
setup.py
|
BarryLiu97/kwyk
|
638edd85bfffe154180e0b861c0dc5c7ad5754fc
|
[
"Apache-2.0"
] | 10
|
2019-08-22T17:13:21.000Z
|
2021-11-21T15:21:51.000Z
|
from setuptools import setup
import versioneer
version = versioneer.get_version()
cmdclass = versioneer.get_cmdclass()
setup(version=version, cmdclass=cmdclass)
| 18.222222
| 41
| 0.817073
|
from setuptools import setup
import versioneer
version = versioneer.get_version()
cmdclass = versioneer.get_cmdclass()
setup(version=version, cmdclass=cmdclass)
| true
| true
|
1c48f1df284a6c58dfa75de1b7d15e3cb9fdfb70
| 388
|
py
|
Python
|
client/bt.py
|
AmarMaksumic/BlueComms
|
fe7020d0b025c61c7a5ea918b7c79cd64f98653c
|
[
"MIT"
] | null | null | null |
client/bt.py
|
AmarMaksumic/BlueComms
|
fe7020d0b025c61c7a5ea918b7c79cd64f98653c
|
[
"MIT"
] | null | null | null |
client/bt.py
|
AmarMaksumic/BlueComms
|
fe7020d0b025c61c7a5ea918b7c79cd64f98653c
|
[
"MIT"
] | null | null | null |
import socket
server_mac = server_port = s = None
def init(mac, port):
global server_mac
global server_port
server_mac = mac
server_port = port
def connect():
global s
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
s.connect((server_mac, server_port))
def send(message):
s.send(message.encode('utf-8'))
def disconnect():
s.close()
| 19.4
| 83
| 0.726804
|
import socket
server_mac = server_port = s = None
def init(mac, port):
global server_mac
global server_port
server_mac = mac
server_port = port
def connect():
global s
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
s.connect((server_mac, server_port))
def send(message):
s.send(message.encode('utf-8'))
def disconnect():
s.close()
| true
| true
|
1c48f2344814242295953255855c079b009af965
| 7,241
|
py
|
Python
|
subnetting.py
|
patelnisheet/subnet
|
bff2abb0a9f7cfc00931f7c95ed8f2f426f3e0c3
|
[
"MIT"
] | 4
|
2021-06-18T12:51:23.000Z
|
2021-06-19T16:55:44.000Z
|
subnetting.py
|
patelnisheet/subnet
|
bff2abb0a9f7cfc00931f7c95ed8f2f426f3e0c3
|
[
"MIT"
] | null | null | null |
subnetting.py
|
patelnisheet/subnet
|
bff2abb0a9f7cfc00931f7c95ed8f2f426f3e0c3
|
[
"MIT"
] | null | null | null |
from math import ceil, log
#1 ip address
ipAddress = input("Enter ip Address: ")
#2 separated in 4 parts => string and binary
firstPart, secondPart, thirdPart, fourthPart = ipAddress.split(".")
ipAddressFourParts = [int(firstPart), int(secondPart), int(thirdPart), int(fourthPart)]
binaryipAddressFourParts = list(map(lambda x: format(int(x),"08b") , ipAddressFourParts))
#3 Class of IP address
if int(firstPart) <= 127:
addressRange = "A"
subnetMaskInitialPart = format(255,"b")
elif 128 <= int(firstPart) <= 191:
addressRange = "B"
subnetMaskInitialPart = format(255,"b") + format(255,"b")
elif 192 <= int(firstPart) <= 239:
addressRange = "C"
subnetMaskInitialPart = format(255,"b") + format(255,"b") + format(255,"b")
print("Address class: ",addressRange)
#4 Default subnet Mask
formation = str("0"+str(32-len(subnetMaskInitialPart))+"b")
tailingZeros = format(0,formation)
defaultSubnetMaskBinary = subnetMaskInitialPart + tailingZeros
defaultSubnetMaskWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(defaultSubnetMaskBinary))
defaultSubnetMaskWithDotsBinary = defaultSubnetMaskWithDotsBinary[1:] #to remove . at start
defaultSubnetMaskWithDotsDecFourParts = list(map(lambda x: int(x,2) , defaultSubnetMaskWithDotsBinary.split(".")))
defaultSubnetMaskWithDotsDec = ".".join(str(x) for x in defaultSubnetMaskWithDotsDecFourParts)
print("Default subnet mask in Binary: ", defaultSubnetMaskWithDotsBinary)
print("Default subnet mask in Decimal: ", defaultSubnetMaskWithDotsDec)
#5 Network Address
networkAddressFourParts = list(map(lambda x: x[0] & x[1] , list(zip(ipAddressFourParts, defaultSubnetMaskWithDotsDecFourParts))))
networkAddressDotDec = ".".join(str(x) for x in networkAddressFourParts)
print("Network Address in Decimal: ", networkAddressDotDec)
binarynetworkAddressFourParts = list(map(lambda x: format(int(x),"08b") , networkAddressFourParts))
networkAddressBin = "".join(str(x) for x in binarynetworkAddressFourParts)
networkAddressDotBin = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(networkAddressBin))
networkAddressDotBin = networkAddressDotBin[1:]
print("Network Address in Binary: ", networkAddressDotBin)
networkAddressInitialPart = networkAddressBin[0:len(subnetMaskInitialPart)]
#6 custom subnet mask & host
choice = input("Which information do You have?\n1. CIDR\n2. No of subnet Bits\n3. No of total subnets\n4. No of total hosts\n5. No of usable hosts\nYour choice should be 1, 2, 3, 4 or 5: ")
if choice == '1':
CIDR = input("Enter CIDR value: ")
CIDR = int(CIDR)
subnetBitsCount = CIDR - len(subnetMaskInitialPart)
hostsBitsCount = 32 - CIDR
elif choice == '2':
subnetBitsCount = input("Enter subnet *Bits* you want: ")
subnetBitsCount = int(subnetBitsCount)
hostsBitsCount = 32 - subnetBitsCount - len(subnetMaskInitialPart)
elif choice == '3':
totalSubnets = input("Enter total number of Subnets: ")
totalSubnets = int(totalSubnets)
subnetBitsCount = ceil(log(totalSubnets)/(log(2)))
hostsBitsCount = 32 - subnetBitsCount - len(subnetMaskInitialPart)
elif choice == '4':
totalHosts = input("Enter total number of Hosts: ")
totalHosts = int(totalHosts)
hostsBitsCount = ceil(log(totalHosts)/(log(2)))
subnetBitsCount = 32 - hostsBitsCount - len(subnetMaskInitialPart)
elif choice == '5':
usableHosts = input("Enter usableHosts value: ")
usableHosts = int(usableHosts)
usableHosts = usableHosts + 2
hostsBitsCount = ceil(log(usableHosts)/(log(2)))
subnetBitsCount = 32 - hostsBitsCount - len(subnetMaskInitialPart)
else:
print("Please input correct choice from 1 to 4 only...")
numberOfSubnets = (2**subnetBitsCount)
numberOfHosts = (2**hostsBitsCount)
print("Number of Subnet bits: ", subnetBitsCount)
print("Total Number of subnets: ", numberOfSubnets)
print("Number of host bits: ", hostsBitsCount)
print("Total Number of Hosts: ", numberOfHosts)
#7 CUSTOM subnet
formation = str("0"+str(subnetBitsCount+len(subnetMaskInitialPart))+"b")
customSubnet = format(2**(int(subnetBitsCount+len(subnetMaskInitialPart)))-1, formation)
formation = str("0"+str(hostsBitsCount)+"b")
customSubnetTrailingZero = format(0,formation)
customSubnetMaskBinary = customSubnet + customSubnetTrailingZero
customSubnetMaskWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(customSubnetMaskBinary))
customSubnetMaskWithDotsBinary = customSubnetMaskWithDotsBinary[1:] #to remove . at start
customSubnetMaskWithDotsDecFourParts = list(map(lambda x: int(x,2) , customSubnetMaskWithDotsBinary.split(".")))
customSubnetMaskWithDotsDec = ".".join(str(x) for x in customSubnetMaskWithDotsDecFourParts)
print("Custom subnet Mask in Binary: ", customSubnetMaskWithDotsBinary)
print("Custom subnet Mask in Decimal: ", customSubnetMaskWithDotsDec)
def my_function(initialPart):
formation = str("0"+str(subnetBitsCount)+"b")
subnetAddressBits = format(subnetNumber-1, formation)
if len(subnetAddressBits) > subnetBitsCount:
print("You cannot borrow more bits than available")
formation = str("0"+str(hostsBitsCount)+"b")
networkAddressHostBits = format(0, formation)
networkAddressHostSubnet = subnetAddressBits + networkAddressHostBits
networkAddress = initialPart + networkAddressHostSubnet
networkAddressWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(networkAddress))
networkAddressWithDotsBinary = networkAddressWithDotsBinary[1:] #to remove . at start
networkAddressWithDotsDecFourParts = list(map(lambda x: int(x,2) , networkAddressWithDotsBinary.split(".")))
networkAddressWithDotsDec = ".".join(str(x) for x in networkAddressWithDotsDecFourParts)
broadcastAddressHostBits = format(2**(int(hostsBitsCount))-1, formation)
broadcastAddressHostSubnet = subnetAddressBits + broadcastAddressHostBits
broadcastAddress = initialPart + broadcastAddressHostSubnet
broadcastAddressWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(broadcastAddress))
broadcastAddressWithDotsBinary = broadcastAddressWithDotsBinary[1:] #to remove . at start
broadcastAddressWithDotsDecFourParts = list(map(lambda x: int(x,2) , broadcastAddressWithDotsBinary.split(".")))
broadcastAddressWithDotsDec = ".".join(str(x) for x in broadcastAddressWithDotsDecFourParts)
print(networkAddressWithDotsDec ," to ", broadcastAddressWithDotsDec)
print("In binary: ", networkAddressWithDotsBinary," to ", broadcastAddressWithDotsBinary)
#8 Need information specific subnet
while(True):
subnetNumber = int(input("Enter subnet's number you want: "))
formation = str("0"+str(subnetBitsCount)+"b")
subnetAddressBits = format(subnetNumber-1, formation)
if len(subnetAddressBits) > subnetBitsCount:
print("You cannot borrow more bits than available")
continue
print("You required of: ",subnetNumber)
print("-"*80)
print("Network Range: ", end="")
my_function(networkAddressInitialPart)
print("-"*80)
print("Subnet Range: ", end="")
my_function(subnetMaskInitialPart)
| 49.937931
| 190
| 0.729319
|
from math import ceil, log
ipAddress = input("Enter ip Address: ")
firstPart, secondPart, thirdPart, fourthPart = ipAddress.split(".")
ipAddressFourParts = [int(firstPart), int(secondPart), int(thirdPart), int(fourthPart)]
binaryipAddressFourParts = list(map(lambda x: format(int(x),"08b") , ipAddressFourParts))
if int(firstPart) <= 127:
addressRange = "A"
subnetMaskInitialPart = format(255,"b")
elif 128 <= int(firstPart) <= 191:
addressRange = "B"
subnetMaskInitialPart = format(255,"b") + format(255,"b")
elif 192 <= int(firstPart) <= 239:
addressRange = "C"
subnetMaskInitialPart = format(255,"b") + format(255,"b") + format(255,"b")
print("Address class: ",addressRange)
formation = str("0"+str(32-len(subnetMaskInitialPart))+"b")
tailingZeros = format(0,formation)
defaultSubnetMaskBinary = subnetMaskInitialPart + tailingZeros
defaultSubnetMaskWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(defaultSubnetMaskBinary))
defaultSubnetMaskWithDotsBinary = defaultSubnetMaskWithDotsBinary[1:] defaultSubnetMaskWithDotsDecFourParts = list(map(lambda x: int(x,2) , defaultSubnetMaskWithDotsBinary.split(".")))
defaultSubnetMaskWithDotsDec = ".".join(str(x) for x in defaultSubnetMaskWithDotsDecFourParts)
print("Default subnet mask in Binary: ", defaultSubnetMaskWithDotsBinary)
print("Default subnet mask in Decimal: ", defaultSubnetMaskWithDotsDec)
networkAddressFourParts = list(map(lambda x: x[0] & x[1] , list(zip(ipAddressFourParts, defaultSubnetMaskWithDotsDecFourParts))))
networkAddressDotDec = ".".join(str(x) for x in networkAddressFourParts)
print("Network Address in Decimal: ", networkAddressDotDec)
binarynetworkAddressFourParts = list(map(lambda x: format(int(x),"08b") , networkAddressFourParts))
networkAddressBin = "".join(str(x) for x in binarynetworkAddressFourParts)
networkAddressDotBin = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(networkAddressBin))
networkAddressDotBin = networkAddressDotBin[1:]
print("Network Address in Binary: ", networkAddressDotBin)
networkAddressInitialPart = networkAddressBin[0:len(subnetMaskInitialPart)]
choice = input("Which information do You have?\n1. CIDR\n2. No of subnet Bits\n3. No of total subnets\n4. No of total hosts\n5. No of usable hosts\nYour choice should be 1, 2, 3, 4 or 5: ")
if choice == '1':
CIDR = input("Enter CIDR value: ")
CIDR = int(CIDR)
subnetBitsCount = CIDR - len(subnetMaskInitialPart)
hostsBitsCount = 32 - CIDR
elif choice == '2':
subnetBitsCount = input("Enter subnet *Bits* you want: ")
subnetBitsCount = int(subnetBitsCount)
hostsBitsCount = 32 - subnetBitsCount - len(subnetMaskInitialPart)
elif choice == '3':
totalSubnets = input("Enter total number of Subnets: ")
totalSubnets = int(totalSubnets)
subnetBitsCount = ceil(log(totalSubnets)/(log(2)))
hostsBitsCount = 32 - subnetBitsCount - len(subnetMaskInitialPart)
elif choice == '4':
totalHosts = input("Enter total number of Hosts: ")
totalHosts = int(totalHosts)
hostsBitsCount = ceil(log(totalHosts)/(log(2)))
subnetBitsCount = 32 - hostsBitsCount - len(subnetMaskInitialPart)
elif choice == '5':
usableHosts = input("Enter usableHosts value: ")
usableHosts = int(usableHosts)
usableHosts = usableHosts + 2
hostsBitsCount = ceil(log(usableHosts)/(log(2)))
subnetBitsCount = 32 - hostsBitsCount - len(subnetMaskInitialPart)
else:
print("Please input correct choice from 1 to 4 only...")
numberOfSubnets = (2**subnetBitsCount)
numberOfHosts = (2**hostsBitsCount)
print("Number of Subnet bits: ", subnetBitsCount)
print("Total Number of subnets: ", numberOfSubnets)
print("Number of host bits: ", hostsBitsCount)
print("Total Number of Hosts: ", numberOfHosts)
formation = str("0"+str(subnetBitsCount+len(subnetMaskInitialPart))+"b")
customSubnet = format(2**(int(subnetBitsCount+len(subnetMaskInitialPart)))-1, formation)
formation = str("0"+str(hostsBitsCount)+"b")
customSubnetTrailingZero = format(0,formation)
customSubnetMaskBinary = customSubnet + customSubnetTrailingZero
customSubnetMaskWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(customSubnetMaskBinary))
customSubnetMaskWithDotsBinary = customSubnetMaskWithDotsBinary[1:] customSubnetMaskWithDotsDecFourParts = list(map(lambda x: int(x,2) , customSubnetMaskWithDotsBinary.split(".")))
customSubnetMaskWithDotsDec = ".".join(str(x) for x in customSubnetMaskWithDotsDecFourParts)
print("Custom subnet Mask in Binary: ", customSubnetMaskWithDotsBinary)
print("Custom subnet Mask in Decimal: ", customSubnetMaskWithDotsDec)
def my_function(initialPart):
formation = str("0"+str(subnetBitsCount)+"b")
subnetAddressBits = format(subnetNumber-1, formation)
if len(subnetAddressBits) > subnetBitsCount:
print("You cannot borrow more bits than available")
formation = str("0"+str(hostsBitsCount)+"b")
networkAddressHostBits = format(0, formation)
networkAddressHostSubnet = subnetAddressBits + networkAddressHostBits
networkAddress = initialPart + networkAddressHostSubnet
networkAddressWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(networkAddress))
networkAddressWithDotsBinary = networkAddressWithDotsBinary[1:] networkAddressWithDotsDecFourParts = list(map(lambda x: int(x,2) , networkAddressWithDotsBinary.split(".")))
networkAddressWithDotsDec = ".".join(str(x) for x in networkAddressWithDotsDecFourParts)
broadcastAddressHostBits = format(2**(int(hostsBitsCount))-1, formation)
broadcastAddressHostSubnet = subnetAddressBits + broadcastAddressHostBits
broadcastAddress = initialPart + broadcastAddressHostSubnet
broadcastAddressWithDotsBinary = ''.join('.' * (n%8 == 0) + l for n, l in enumerate(broadcastAddress))
broadcastAddressWithDotsBinary = broadcastAddressWithDotsBinary[1:] broadcastAddressWithDotsDecFourParts = list(map(lambda x: int(x,2) , broadcastAddressWithDotsBinary.split(".")))
broadcastAddressWithDotsDec = ".".join(str(x) for x in broadcastAddressWithDotsDecFourParts)
print(networkAddressWithDotsDec ," to ", broadcastAddressWithDotsDec)
print("In binary: ", networkAddressWithDotsBinary," to ", broadcastAddressWithDotsBinary)
while(True):
subnetNumber = int(input("Enter subnet's number you want: "))
formation = str("0"+str(subnetBitsCount)+"b")
subnetAddressBits = format(subnetNumber-1, formation)
if len(subnetAddressBits) > subnetBitsCount:
print("You cannot borrow more bits than available")
continue
print("You required of: ",subnetNumber)
print("-"*80)
print("Network Range: ", end="")
my_function(networkAddressInitialPart)
print("-"*80)
print("Subnet Range: ", end="")
my_function(subnetMaskInitialPart)
| true
| true
|
1c48f23a0607eb4431e38e6fa76c6a0f127f7dba
| 11,326
|
py
|
Python
|
learning/MLP_base.py
|
tblondelle/TransferLearningProject
|
1c6a9bba2480919e22dd08756f328a47a321eafa
|
[
"Apache-2.0"
] | 2
|
2018-01-12T16:54:52.000Z
|
2018-03-01T09:35:06.000Z
|
learning/MLP_base.py
|
tblondelle/TransferLearningProject
|
1c6a9bba2480919e22dd08756f328a47a321eafa
|
[
"Apache-2.0"
] | null | null | null |
learning/MLP_base.py
|
tblondelle/TransferLearningProject
|
1c6a9bba2480919e22dd08756f328a47a321eafa
|
[
"Apache-2.0"
] | 1
|
2018-06-26T12:46:33.000Z
|
2018-06-26T12:46:33.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import os
import time
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from numpy.random import permutation
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import time
import math
import matplotlib.pyplot as plt
use_cuda = torch.cuda.is_available()
print("Utilisation de la carte graphique :",use_cuda)
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
class my_MLP(nn.Module):
def __init__(self, input_size, hidden_size,batch_size, n_layers=1):
super(my_MLP, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.batch_size = batch_size
self.input_size = input_size
self.linear1 = nn.Linear(input_size,hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
# le réseaux linéaires sert à ce que la sortie ait la bonne taille
def forward(self, input):
# Entrées :
# input (variable(mat)) : les instances
# Sortie
# Variable(vect) : les prédictions
X_int_1 = F.relu(self.linear1(input))
X_int_2 = F.relu(self.linear2(X_int_1))
return torch.tanh(self.linear3(X_int_2))
def train_once(self, input_variable, target_variable, optimizer, criterion):
# Réalise l'entraînement pour une seule epoch
# Entrées :
# - n_epochs (int) : nombre de fois qu'on applique toutes les instance de l'ensemble d'apprentissage
# - input_variable Variable(mat) : instances d'apprentissage
# - target_variable Variable(vect(+1|-1))) : labels
# - optimizer (pytorch object) : le résultat de optim.SGD ou optim.Adam
# - criterion (pytorch object) : le résultat de nn.L1Loss ou nn.MSELoss
# Sorties :
# none
optimizer.zero_grad()
input_length = input_variable.size()[0]
output= self(input_variable)
loss = criterion(output.view(1,-1), target_variable.view(-1))
loss.backward()
optimizer.step()
return loss.data[0]
def trainIters(self, n_epochs, training_pairs, te_pairs, learning_rate, print_every=1000, eval_every = 1000):
# Réalise l'entraînement complet, à partir des ensembles d'apprentissage
# Entrées :
# - n_epochs (int) : nombre de fois qu'on applique toutes les instance de l'ensemble d'apprentissage
# - training_pairs (Variable(mat), Variable(vect(+1|-1))) ) : instances d'apprentissage
# - te_pairs (list of (Variable(vect), Variable(+1|-1))) : instances de test
# - learning_rate (float) : devine ;)
# - print_every (int) : imprime l'erreur moyenne toutes les print_every epochs
# - eval_every (int) : teste le NN sur la base de test et imprime la matrice de confusion
# Sorties :
# none
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
#optimizer = optim.SGD(self.parameters(), lr=learning_rate)
# Autre choix possible :
optimizer = optim.Adam(self.parameters(), lr=learning_rate)
criterion = nn.L1Loss()
#criterion = nn.MSELoss()
for epoch in range(1, n_epochs + 1):
input_variable = training_pairs[0]
target_variable = training_pairs[1]
loss = self.train_once(input_variable, target_variable, optimizer, criterion)
print_loss_total += loss
if epoch % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, epoch / n_epochs),
epoch, epoch / n_epochs * 100, print_loss_avg))
if epoch % eval_every == 0:
self.evaluateRandomly(te_pairs) # show global results
def evaluateRandomly(self, pairs):
# evaluate on all pairs, print the confusion matrix
n_successes = 0
n_pos = 0 # also computes the proportion of positive reviews
TP,TN,FP,FN = 0,0,0,0
for pair in pairs: # replace with pairs[:n] for testing
output = self(pair[1])
#success = (output[int(pair[1])] == max(output))
note = pair[0].data[0,0]
predicted = output.data[0]
success = (note*predicted > 0)
if success :
n_successes += 1
if note>0:
TP += 1
else:
TN += 1
else:
if note>0:
FP += 1
else:
FN += 1
n_pos = n_pos+1 if note==1 else n_pos
print('')
print('')
print('Confusion matrix ')
print()
print(" \t\t Actual class")
print(" \t\t Pos \t Neg")
print("Predicted Pos \t {} \t {}".format(TP,FN))
print(" Neg \t {} \t {}".format(FP,TN))
print('')
print('\t \t \t \t Positive reviews (%)) : ',100*n_pos/len(pairs))
print('\t \t \t \t Success rate (%) : ',100*n_successes/len(pairs))
# overriding getData to only load 1 folder
def getData(folder):
"""
Input:
- folder: string of the path of a folder containing txt files.
Output:
- listdata: list of [Y, X] (e.g. Y = 'Positive', X = "very cool")
"""
listdata = []
filenames = os.listdir(folder)
for filename in filenames[:1]: # change here
with open(os.path.join(folder, filename), 'r') as f:
for line in f:
line2 = line.strip().split('\t')
if len(line2) == 2:
listdata.append(line2)
return listdata
def folder2data(train_filename,test_filename,balanced_tr ,balanced_te, n_features):
# Entrées :
# - train_filename (str) : le nom du **dossier** (et pas le nom du fichier) où se trouvent les instances d'apprentissage
# - test_filename (str) : le nom du **dossier** (et pas le nom du fichier) où se trouvent les instances de test
# - balanced_tr (bool) : True si l'ensemble d'apprentissage est équilibré; False s'il est laissé tel quel
# - balanced_te (bool) : True si l'ensemble de test est équilibré; False s'il est laissé tel quel
# - n_features (int) : nombre de variables pour coder chaque instance
# Sorties :
# - cuple (new_tr_pairs, new_te_pairs):
# new_tr_pairs : (Variable(mat), Variable(vect(+1|-1))) )
# new_te_pairs : (list of (Variable(vect), Variable(+1|-1)))
tr_te_pairs = {}
pairs = getData(train_filename)
print(pairs[:2])
if balanced_tr :
#Pour un équilibrage 75/25
pairs_using_numbers = [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]
Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']
pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers)*3)]
tr_pairs = pairs_using_numbers
"""
#Pour un équilibrage 50/50
pairs_using_numbers = [(-1,text) for (target,text) in pairs if target == 'Negative']
Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']
pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers))]
tr_pairs = pairs_using_numbers
"""
else :
pairs_using_numbers = [(1,text) for (target,text) in pairs if target == 'Positive']
pairs_using_numbers += [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]
tr_pairs = pairs_using_numbers
pairs = getData(test_filename)
print(pairs[:2])
if balanced_te :
pairs_using_numbers = [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]
Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']
pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers))]
te_pairs = pairs_using_numbers
else :
pairs_using_numbers = [(1,text) for (target,text) in pairs if target == 'Positive']
pairs_using_numbers += [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]
te_pairs = pairs_using_numbers
print([text for (_,text) in tr_pairs[:2]])
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,2))
tfidf_vectorizer.fit([ text for (_,text) in tr_pairs+te_pairs])
# fitting
X_tr_token = tfidf_vectorizer.transform([ text for (_,text) in tr_pairs])
X_te_token = tfidf_vectorizer.transform([ text for (_,text) in te_pairs])
truncatedsvd = TruncatedSVD(n_components=n_features) # prépare à projeter les données dans un espace à n_components dimensions
truncatedsvd.fit(X_tr_token)
truncatedsvd.fit(X_te_token)
# Réduction de dimension
X_tr_reduced_dim = truncatedsvd.transform(X_tr_token)
X_te_reduced_dim = truncatedsvd.transform(X_te_token)
print('part de la variance conservée :',sum(truncatedsvd.explained_variance_ratio_))
new_tr_pairs = [Variable(torch.FloatTensor(X_tr_reduced_dim)),Variable(torch.FloatTensor([[note for (note,_) in tr_pairs]]))]
new_te_pairs = []
for i in range(len(te_pairs)):
(note,_) = te_pairs[i]
note = Variable(torch.FloatTensor([[note]]))
vect = X_te_reduced_dim[i,:]
variable_vect = torch.autograd.Variable(torch.Tensor(vect))
new_te_pairs.append((note,variable_vect))
return new_tr_pairs, new_te_pairs
# ==================================================================
# ================ Using the MLP in itself =========================
# ==================================================================
training_set_folder = "../../data/data_books_training_set"
test_set_folder = "../../data/data_videos_testing_set"
#test_set_folder = "../../data/data_books_testing_set"
n_features = 200
tr_pairs,te_pairs = folder2data(training_set_folder,test_set_folder,balanced_tr = True,balanced_te = True,n_features=n_features)
hidden_size = 100
batch_size = tr_pairs[0].data.size()[0]
MLP = my_MLP(n_features, hidden_size, batch_size, n_layers = 1)
#MLP.evaluateNpairs(te_pairs,1) # show some examples
lr = 0.005
N_epochs = 20000
print("learning rate",lr)
print(batch_size,'instances')
MLP.trainIters( N_epochs,tr_pairs,te_pairs,lr,500,5000)
MLP.evaluateRandomly(te_pairs) # show global results
torch.save(MLP,'MLP')
#cours ; cd 2eme_partie_S9/Transfer_learning/TransferLearningProject/learning/ ; python MLP_base.py
print('')
print('')
print(' Done')
print('')
print('')
print('')
| 32.734104
| 130
| 0.620607
|
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import os
import time
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from numpy.random import permutation
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import time
import math
import matplotlib.pyplot as plt
use_cuda = torch.cuda.is_available()
print("Utilisation de la carte graphique :",use_cuda)
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
class my_MLP(nn.Module):
def __init__(self, input_size, hidden_size,batch_size, n_layers=1):
super(my_MLP, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.batch_size = batch_size
self.input_size = input_size
self.linear1 = nn.Linear(input_size,hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
def forward(self, input):
X_int_1 = F.relu(self.linear1(input))
X_int_2 = F.relu(self.linear2(X_int_1))
return torch.tanh(self.linear3(X_int_2))
def train_once(self, input_variable, target_variable, optimizer, criterion):
# Entrées :
# - n_epochs (int) : nombre de fois qu'on applique toutes les instance de l'ensemble d'apprentissage
# - target_variable Variable(vect(+1|-1))) : labels
# - optimizer (pytorch object) : le résultat de optim.SGD ou optim.Adam
# - criterion (pytorch object) : le résultat de nn.L1Loss ou nn.MSELoss
# Sorties :
# none
optimizer.zero_grad()
input_length = input_variable.size()[0]
output= self(input_variable)
loss = criterion(output.view(1,-1), target_variable.view(-1))
loss.backward()
optimizer.step()
return loss.data[0]
def trainIters(self, n_epochs, training_pairs, te_pairs, learning_rate, print_every=1000, eval_every = 1000):
# Réalise l'entraînement complet, à partir des ensembles d'apprentissage
# Entrées :
# - n_epochs (int) : nombre de fois qu'on applique toutes les instance de l'ensemble d'apprentissage
# - te_pairs (list of (Variable(vect), Variable(+1|-1))) : instances de test
# - learning_rate (float) : devine ;)
# - print_every (int) : imprime l'erreur moyenne toutes les print_every epochs
start = time.time()
plot_losses = []
print_loss_total = 0
optimizer = optim.Adam(self.parameters(), lr=learning_rate)
criterion = nn.L1Loss()
for epoch in range(1, n_epochs + 1):
input_variable = training_pairs[0]
target_variable = training_pairs[1]
loss = self.train_once(input_variable, target_variable, optimizer, criterion)
print_loss_total += loss
if epoch % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, epoch / n_epochs),
epoch, epoch / n_epochs * 100, print_loss_avg))
if epoch % eval_every == 0:
self.evaluateRandomly(te_pairs)
def evaluateRandomly(self, pairs):
n_successes = 0
n_pos = 0
TP,TN,FP,FN = 0,0,0,0
for pair in pairs: output = self(pair[1])
note = pair[0].data[0,0]
predicted = output.data[0]
success = (note*predicted > 0)
if success :
n_successes += 1
if note>0:
TP += 1
else:
TN += 1
else:
if note>0:
FP += 1
else:
FN += 1
n_pos = n_pos+1 if note==1 else n_pos
print('')
print('')
print('Confusion matrix ')
print()
print(" \t\t Actual class")
print(" \t\t Pos \t Neg")
print("Predicted Pos \t {} \t {}".format(TP,FN))
print(" Neg \t {} \t {}".format(FP,TN))
print('')
print('\t \t \t \t Positive reviews (%)) : ',100*n_pos/len(pairs))
print('\t \t \t \t Success rate (%) : ',100*n_successes/len(pairs))
def getData(folder):
listdata = []
filenames = os.listdir(folder)
for filename in filenames[:1]:
with open(os.path.join(folder, filename), 'r') as f:
for line in f:
line2 = line.strip().split('\t')
if len(line2) == 2:
listdata.append(line2)
return listdata
def folder2data(train_filename,test_filename,balanced_tr ,balanced_te, n_features):
# - test_filename (str) : le nom du **dossier** (et pas le nom du fichier) où se trouvent les instances de test
# - balanced_tr (bool) : True si l'ensemble d'apprentissage est équilibré; False s'il est laissé tel quel
tr_te_pairs = {}
pairs = getData(train_filename)
print(pairs[:2])
if balanced_tr :
pairs_using_numbers = [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]
Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']
pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers)*3)]
tr_pairs = pairs_using_numbers
else :
pairs_using_numbers = [(1,text) for (target,text) in pairs if target == 'Positive']
pairs_using_numbers += [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]
tr_pairs = pairs_using_numbers
pairs = getData(test_filename)
print(pairs[:2])
if balanced_te :
pairs_using_numbers = [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]
Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']
pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers))]
te_pairs = pairs_using_numbers
else :
pairs_using_numbers = [(1,text) for (target,text) in pairs if target == 'Positive']
pairs_using_numbers += [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]
te_pairs = pairs_using_numbers
print([text for (_,text) in tr_pairs[:2]])
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,2))
tfidf_vectorizer.fit([ text for (_,text) in tr_pairs+te_pairs])
X_tr_token = tfidf_vectorizer.transform([ text for (_,text) in tr_pairs])
X_te_token = tfidf_vectorizer.transform([ text for (_,text) in te_pairs])
truncatedsvd = TruncatedSVD(n_components=n_features) truncatedsvd.fit(X_tr_token)
truncatedsvd.fit(X_te_token)
X_tr_reduced_dim = truncatedsvd.transform(X_tr_token)
X_te_reduced_dim = truncatedsvd.transform(X_te_token)
print('part de la variance conservée :',sum(truncatedsvd.explained_variance_ratio_))
new_tr_pairs = [Variable(torch.FloatTensor(X_tr_reduced_dim)),Variable(torch.FloatTensor([[note for (note,_) in tr_pairs]]))]
new_te_pairs = []
for i in range(len(te_pairs)):
(note,_) = te_pairs[i]
note = Variable(torch.FloatTensor([[note]]))
vect = X_te_reduced_dim[i,:]
variable_vect = torch.autograd.Variable(torch.Tensor(vect))
new_te_pairs.append((note,variable_vect))
return new_tr_pairs, new_te_pairs
training_set_folder = "../../data/data_books_training_set"
test_set_folder = "../../data/data_videos_testing_set"
n_features = 200
tr_pairs,te_pairs = folder2data(training_set_folder,test_set_folder,balanced_tr = True,balanced_te = True,n_features=n_features)
hidden_size = 100
batch_size = tr_pairs[0].data.size()[0]
MLP = my_MLP(n_features, hidden_size, batch_size, n_layers = 1)
lr = 0.005
N_epochs = 20000
print("learning rate",lr)
print(batch_size,'instances')
MLP.trainIters( N_epochs,tr_pairs,te_pairs,lr,500,5000)
MLP.evaluateRandomly(te_pairs)
torch.save(MLP,'MLP')
print('')
print('')
print(' Done')
print('')
print('')
print('')
| true
| true
|
1c48f24ae3c32c49052dd25f913598d1564702d8
| 154
|
py
|
Python
|
moztrap/view/users/context_processors.py
|
mbeko/moztrap
|
db75e1f8756ef2c0c39652a66302b19c8afa0256
|
[
"BSD-2-Clause"
] | null | null | null |
moztrap/view/users/context_processors.py
|
mbeko/moztrap
|
db75e1f8756ef2c0c39652a66302b19c8afa0256
|
[
"BSD-2-Clause"
] | null | null | null |
moztrap/view/users/context_processors.py
|
mbeko/moztrap
|
db75e1f8756ef2c0c39652a66302b19c8afa0256
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Auth-related context processors.
"""
from django.conf import settings
def browserid(request):
return {"USE_BROWSERID": settings.USE_BROWSERID}
| 15.4
| 52
| 0.746753
|
from django.conf import settings
def browserid(request):
return {"USE_BROWSERID": settings.USE_BROWSERID}
| true
| true
|
1c48f29a5bdce0893bb04d299aa247d12d029e89
| 5,525
|
py
|
Python
|
python/pyspark/sql/observation.py
|
kyoty/spark
|
4a4f207f4215d56f126c2474fd7a94f427937a2f
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4
|
2020-07-30T02:37:20.000Z
|
2021-03-20T11:36:46.000Z
|
python/pyspark/sql/observation.py
|
kyoty/spark
|
4a4f207f4215d56f126c2474fd7a94f427937a2f
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 10
|
2021-04-14T10:54:00.000Z
|
2021-04-18T04:53:54.000Z
|
python/pyspark/sql/observation.py
|
kyoty/spark
|
4a4f207f4215d56f126c2474fd7a94f427937a2f
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4
|
2015-09-11T13:27:02.000Z
|
2021-03-29T11:14:32.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Dict, Optional
from py4j.java_gateway import JavaObject, JVMView # type: ignore[import]
from pyspark.sql import column
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame
__all__ = ["Observation"]
class Observation:
"""Class to observe (named) metrics on a :class:`DataFrame`.
Metrics are aggregation expressions, which are applied to the DataFrame while it is being
processed by an action.
The metrics have the following guarantees:
- It will compute the defined aggregates (metrics) on all the data that is flowing through
the Dataset during the action.
- It will report the value of the defined aggregate columns as soon as we reach the end of
the action.
The metrics columns must either contain a literal (e.g. lit(42)), or should contain one or
more aggregate functions (e.g. sum(a) or sum(a + b) + avg(c) - lit(1)). Expressions that
contain references to the input Dataset's columns must always be wrapped in an aggregate
function.
An Observation instance collects the metrics while the first action is executed. Subsequent
actions do not modify the metrics returned by `Observation.get`. Retrieval of the metric via
`Observation.get` blocks until the first action has finished and metrics become available.
.. versionadded:: 3.3.0
Notes
-----
This class does not support streaming datasets.
Examples
--------
>>> from pyspark.sql.functions import col, count, lit, max
>>> from pyspark.sql import Observation
>>> df = spark.createDataFrame([["Alice", 2], ["Bob", 5]], ["name", "age"])
>>> observation = Observation("my metrics")
>>> observed_df = df.observe(observation, count(lit(1)).alias("count"), max(col("age")))
>>> observed_df.count()
2
>>> observation.get
{'count': 2, 'max(age)': 5}
"""
def __init__(self, name: Optional[str] = None) -> None:
"""Constructs a named or unnamed Observation instance.
Parameters
----------
name : str, optional
default is a random UUID string. This is the name of the Observation and the metric.
"""
if name is not None:
if not isinstance(name, str):
raise TypeError("name should be a string")
if name == '':
raise ValueError("name should not be empty")
self._name = name
self._jvm: Optional[JVMView] = None
self._jo: Optional[JavaObject] = None
def _on(self, df: DataFrame, *exprs: Column) -> DataFrame:
"""Attaches this observation to the given :class:`DataFrame` to observe aggregations.
Parameters
----------
df : :class:`DataFrame`
the :class:`DataFrame` to be observed
exprs : list of :class:`Column`
column expressions (:class:`Column`).
Returns
-------
:class:`DataFrame`
the observed :class:`DataFrame`.
"""
assert exprs, "exprs should not be empty"
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
assert self._jo is None, "an Observation can be used with a DataFrame only once"
self._jvm = df._sc._jvm # type: ignore[attr-defined]
cls = self._jvm.org.apache.spark.sql.Observation # type: ignore[attr-defined]
self._jo = cls(self._name) if self._name is not None else cls()
observed_df = self._jo.on(
df._jdf,
exprs[0]._jc,
column._to_seq(df._sc, [c._jc for c in exprs[1:]])
)
return DataFrame(observed_df, df.sql_ctx)
@property
def get(self) -> Dict[str, Any]:
"""Get the observed metrics.
Waits until the observed dataset finishes its first action. Only the result of the
first action is available. Subsequent actions do not modify the result.
Returns
-------
dict
the observed metrics
"""
assert self._jo is not None, 'call DataFrame.observe'
jmap = self._jo.getAsJava()
# return a pure Python dict, not jmap which is a py4j JavaMap
return {k: v for k, v in jmap.items()}
def _test() -> None:
import doctest
import sys
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
import pyspark.sql.observation
globs = pyspark.sql.observation.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['spark'] = SparkSession(sc)
(failure_count, test_count) = doctest.testmod(pyspark.sql.observation, globs=globs)
sc.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 36.833333
| 96
| 0.655928
|
from typing import Any, Dict, Optional
from py4j.java_gateway import JavaObject, JVMView
from pyspark.sql import column
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame
__all__ = ["Observation"]
class Observation:
def __init__(self, name: Optional[str] = None) -> None:
if name is not None:
if not isinstance(name, str):
raise TypeError("name should be a string")
if name == '':
raise ValueError("name should not be empty")
self._name = name
self._jvm: Optional[JVMView] = None
self._jo: Optional[JavaObject] = None
def _on(self, df: DataFrame, *exprs: Column) -> DataFrame:
assert exprs, "exprs should not be empty"
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
assert self._jo is None, "an Observation can be used with a DataFrame only once"
self._jvm = df._sc._jvm cls = self._jvm.org.apache.spark.sql.Observation self._jo = cls(self._name) if self._name is not None else cls()
observed_df = self._jo.on(
df._jdf,
exprs[0]._jc,
column._to_seq(df._sc, [c._jc for c in exprs[1:]])
)
return DataFrame(observed_df, df.sql_ctx)
@property
def get(self) -> Dict[str, Any]:
assert self._jo is not None, 'call DataFrame.observe'
jmap = self._jo.getAsJava()
return {k: v for k, v in jmap.items()}
def _test() -> None:
import doctest
import sys
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
import pyspark.sql.observation
globs = pyspark.sql.observation.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['spark'] = SparkSession(sc)
(failure_count, test_count) = doctest.testmod(pyspark.sql.observation, globs=globs)
sc.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| true
| true
|
1c48f372132c29c942954184e87f8fc352bba0c7
| 158
|
py
|
Python
|
contrib/wallettools/walletunlock.py
|
safrica/bit
|
ae9533aeb09965b324191357a6afd90f627b7c2f
|
[
"MIT"
] | null | null | null |
contrib/wallettools/walletunlock.py
|
safrica/bit
|
ae9533aeb09965b324191357a6afd90f627b7c2f
|
[
"MIT"
] | null | null | null |
contrib/wallettools/walletunlock.py
|
safrica/bit
|
ae9533aeb09965b324191357a6afd90f627b7c2f
|
[
"MIT"
] | null | null | null |
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:8432")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| 31.6
| 46
| 0.765823
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:8432")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
| true
| true
|
1c48f568c63b011a7af4c9af7b69df0947e61549
| 3,139
|
py
|
Python
|
lib/dataset/dataset_factory.py
|
chencq1234/ssds.pytorch
|
340aeac3e5f15ffeee6750f40bfbd64343926fc9
|
[
"MIT"
] | null | null | null |
lib/dataset/dataset_factory.py
|
chencq1234/ssds.pytorch
|
340aeac3e5f15ffeee6750f40bfbd64343926fc9
|
[
"MIT"
] | null | null | null |
lib/dataset/dataset_factory.py
|
chencq1234/ssds.pytorch
|
340aeac3e5f15ffeee6750f40bfbd64343926fc9
|
[
"MIT"
] | null | null | null |
from lib.dataset import voc
from lib.dataset import coco
dataset_map = {
'voc': voc.VOCDetection,
'coco': coco.COCODetection,
}
def gen_dataset_fn(name):
"""Returns a dataset func.
Args:
name: The name of the dataset.
Returns:
func: dataset_fn
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in dataset_map:
raise ValueError('The dataset unknown %s' % name)
func = dataset_map[name]
return func
import torch
import numpy as np
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets)
from lib.utils.data_augment import preproc
from lib.utils.amdegroot_augmentations import SSDAugmentation
import torch.utils.data as data
def load_data(cfg, phase):
if phase == 'train':
dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TRAIN_SETS, preproc(cfg.IMAGE_SIZE,
cfg.PIXEL_MEANS, cfg.PROB), transform=SSDAugmentation(cfg.IMAGE_SIZE,
cfg.PIXEL_MEANS))
data_loader = data.DataLoader(dataset, cfg.TRAIN_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,
shuffle=True, collate_fn=detection_collate, pin_memory=True)
if phase == 'eval':
dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TEST_SETS, preproc(cfg.IMAGE_SIZE, cfg.PIXEL_MEANS, -1))
data_loader = data.DataLoader(dataset, cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,
shuffle=False, collate_fn=detection_collate, pin_memory=True)
if phase == 'test':
dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TEST_SETS, preproc(cfg.IMAGE_SIZE, cfg.PIXEL_MEANS, -2))
data_loader = data.DataLoader(dataset, cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,
shuffle=False, collate_fn=detection_collate, pin_memory=True)
if phase == 'visualize':
dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TEST_SETS, preproc(cfg.IMAGE_SIZE, cfg.PIXEL_MEANS, 1))
data_loader = data.DataLoader(dataset, cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,
shuffle=False, collate_fn=detection_collate, pin_memory=True)
return data_loader
| 39.734177
| 120
| 0.631093
|
from lib.dataset import voc
from lib.dataset import coco
dataset_map = {
'voc': voc.VOCDetection,
'coco': coco.COCODetection,
}
def gen_dataset_fn(name):
if name not in dataset_map:
raise ValueError('The dataset unknown %s' % name)
func = dataset_map[name]
return func
import torch
import numpy as np
def detection_collate(batch):
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets)
from lib.utils.data_augment import preproc
from lib.utils.amdegroot_augmentations import SSDAugmentation
import torch.utils.data as data
def load_data(cfg, phase):
if phase == 'train':
dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TRAIN_SETS, preproc(cfg.IMAGE_SIZE,
cfg.PIXEL_MEANS, cfg.PROB), transform=SSDAugmentation(cfg.IMAGE_SIZE,
cfg.PIXEL_MEANS))
data_loader = data.DataLoader(dataset, cfg.TRAIN_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,
shuffle=True, collate_fn=detection_collate, pin_memory=True)
if phase == 'eval':
dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TEST_SETS, preproc(cfg.IMAGE_SIZE, cfg.PIXEL_MEANS, -1))
data_loader = data.DataLoader(dataset, cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,
shuffle=False, collate_fn=detection_collate, pin_memory=True)
if phase == 'test':
dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TEST_SETS, preproc(cfg.IMAGE_SIZE, cfg.PIXEL_MEANS, -2))
data_loader = data.DataLoader(dataset, cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,
shuffle=False, collate_fn=detection_collate, pin_memory=True)
if phase == 'visualize':
dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TEST_SETS, preproc(cfg.IMAGE_SIZE, cfg.PIXEL_MEANS, 1))
data_loader = data.DataLoader(dataset, cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,
shuffle=False, collate_fn=detection_collate, pin_memory=True)
return data_loader
| true
| true
|
1c48f62675be1d81c467bb0ed31ff04385d1fb8a
| 322
|
py
|
Python
|
move/config.py
|
ninamiolane/move
|
83ab147ad1ebab6972591357f02fa29e186116f0
|
[
"MIT"
] | null | null | null |
move/config.py
|
ninamiolane/move
|
83ab147ad1ebab6972591357f02fa29e186116f0
|
[
"MIT"
] | null | null | null |
move/config.py
|
ninamiolane/move
|
83ab147ad1ebab6972591357f02fa29e186116f0
|
[
"MIT"
] | null | null | null |
import logging
import torch
#Set the configuration of the model
logging.info('Confirgure the run')
batch_size = 8
learning_rate= 3e-4
epochs = 10
seq_len=128
negative_slope = 0 #LeakyRelu
logging.info('Setup device')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
| 20.125
| 36
| 0.742236
|
import logging
import torch
logging.info('Confirgure the run')
batch_size = 8
learning_rate= 3e-4
epochs = 10
seq_len=128
negative_slope = 0
logging.info('Setup device')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
| true
| true
|
1c48f691945537de6b233fd87e0288531b17339d
| 944
|
py
|
Python
|
latent_experiments/discrete/Exp_hiv_test.py
|
ferjorosa/test-glfm
|
b219c650d0429ea71b953743730ae53cc122a61b
|
[
"MIT"
] | null | null | null |
latent_experiments/discrete/Exp_hiv_test.py
|
ferjorosa/test-glfm
|
b219c650d0429ea71b953743730ae53cc122a61b
|
[
"MIT"
] | null | null | null |
latent_experiments/discrete/Exp_hiv_test.py
|
ferjorosa/test-glfm
|
b219c650d0429ea71b953743730ae53cc122a61b
|
[
"MIT"
] | null | null | null |
import DiscreteExperiment
class Exp_hiv_test(DiscreteExperiment.DiscreteExperiment):
def __init__(self, _data_name):
DiscreteExperiment.DiscreteExperiment.__init__(self, _data_name)
def run(self, run, n_folds, fold_log):
print("\n------------------------------------------------------------------")
print("------------------------------------------------------------------")
print("---------------------------- HIV_TEST --------------------------")
print("------------------------------------------------------------------")
print("------------------------------------------------------------------\n")
DiscreteExperiment.DiscreteExperiment.run(self, run, n_folds, fold_log)
def main():
run = 1
n_folds = 10
data_name = "hiv_test"
fold_log = True
exp = Exp_hiv_test(data_name)
exp.run(run, n_folds, fold_log)
if __name__ == "__main__":
main()
| 31.466667
| 85
| 0.426907
|
import DiscreteExperiment
class Exp_hiv_test(DiscreteExperiment.DiscreteExperiment):
def __init__(self, _data_name):
DiscreteExperiment.DiscreteExperiment.__init__(self, _data_name)
def run(self, run, n_folds, fold_log):
print("\n------------------------------------------------------------------")
print("------------------------------------------------------------------")
print("---------------------------- HIV_TEST --------------------------")
print("------------------------------------------------------------------")
print("------------------------------------------------------------------\n")
DiscreteExperiment.DiscreteExperiment.run(self, run, n_folds, fold_log)
def main():
run = 1
n_folds = 10
data_name = "hiv_test"
fold_log = True
exp = Exp_hiv_test(data_name)
exp.run(run, n_folds, fold_log)
if __name__ == "__main__":
main()
| true
| true
|
1c48f7aabc4f2ca2adbb00c146a938965e06adb9
| 953
|
py
|
Python
|
tests/manage/rgw/conftest.py
|
MeridianExplorer/ocs-ci
|
a33d5116128b88f176f5eff68a3ef805125cdba1
|
[
"MIT"
] | null | null | null |
tests/manage/rgw/conftest.py
|
MeridianExplorer/ocs-ci
|
a33d5116128b88f176f5eff68a3ef805125cdba1
|
[
"MIT"
] | null | null | null |
tests/manage/rgw/conftest.py
|
MeridianExplorer/ocs-ci
|
a33d5116128b88f176f5eff68a3ef805125cdba1
|
[
"MIT"
] | null | null | null |
import logging
from ocs_ci.framework import config
from ocs_ci.ocs.constants import ON_PREM_PLATFORMS
from ocs_ci.utility import version
log = logging.getLogger(__name__)
def pytest_collection_modifyitems(items):
"""
A pytest hook to filter out RGW tests
when running on cloud platforms
Args:
items: list of collected tests
"""
if (
config.ENV_DATA["platform"].lower() not in ON_PREM_PLATFORMS
or version.get_semantic_ocs_version_from_config() < version.VERSION_4_5
):
for item in items.copy():
if "manage/rgw" in str(item.fspath):
log.info(
f"Test {item} is removed from the collected items"
f" due to {config.ENV_DATA['platform'].lower()} not being an on-prem platform "
f"or OCS version ({config.ENV_DATA['ocs_version']}) being lower than 4.5"
)
items.remove(item)
| 31.766667
| 99
| 0.628541
|
import logging
from ocs_ci.framework import config
from ocs_ci.ocs.constants import ON_PREM_PLATFORMS
from ocs_ci.utility import version
log = logging.getLogger(__name__)
def pytest_collection_modifyitems(items):
if (
config.ENV_DATA["platform"].lower() not in ON_PREM_PLATFORMS
or version.get_semantic_ocs_version_from_config() < version.VERSION_4_5
):
for item in items.copy():
if "manage/rgw" in str(item.fspath):
log.info(
f"Test {item} is removed from the collected items"
f" due to {config.ENV_DATA['platform'].lower()} not being an on-prem platform "
f"or OCS version ({config.ENV_DATA['ocs_version']}) being lower than 4.5"
)
items.remove(item)
| true
| true
|
1c48f7b36d79d6b90662b5de7aad42529d400ca3
| 2,465
|
py
|
Python
|
plugins/currencies/src/function.py
|
mariacarlinahernandez/code-examples
|
ebfa40c301bedfea1c9c41644a6fcd534a0dcd0f
|
[
"MIT"
] | 4
|
2020-08-16T15:05:49.000Z
|
2021-03-04T10:57:25.000Z
|
plugins/currencies/src/function.py
|
mariacarlinahernandez/code-examples
|
ebfa40c301bedfea1c9c41644a6fcd534a0dcd0f
|
[
"MIT"
] | 2
|
2019-04-30T13:50:48.000Z
|
2020-01-17T23:33:56.000Z
|
plugins/currencies/src/function.py
|
mariacarlinahernandez/code-examples
|
ebfa40c301bedfea1c9c41644a6fcd534a0dcd0f
|
[
"MIT"
] | 19
|
2019-01-08T15:42:28.000Z
|
2022-03-30T20:03:33.000Z
|
import requests
import time
def main(kwargs):
print("[INFO] Info recieved: {}".format(kwargs))
if len(kwargs) < 4:
print("[ERROR] One or more parameters are missing")
return {"result": "error"}
result = get_currency(**kwargs)
if result.get("result") == "ok":
args = result.get("data")
else:
return result
print("[INFO] Currencies obtained", args)
data = {}
for i in args.get("rates"):
data[i] = {
"value": args.get("rates").get(i),
"context": {"base": args.get("base")},
}
req = update_device(data, **kwargs)
del kwargs
return req
def get_currency(currencies, base, _plugin_env_API_URL, **kwargs):
url = "{}/latest?base={}&symbols={}".format(_plugin_env_API_URL, base, currencies)
headers = {"Content-Type": "application/json"}
try:
req = create_request(url, headers, attempts=5, request_type="get")
except:
return {
"result": "[ERROR] The currency server did not respond, please try again later"
}
return {"result": "ok", "data": req.json()}
def update_device(
payload, _plugin_env_UBIDOTS_URL, deviceLabel, ubidotsToken, **kwargs
):
"""
updates a variable with a single dot
"""
url = "{}/api/v1.6/devices/{}".format(_plugin_env_UBIDOTS_URL, deviceLabel)
headers = {"X-Auth-Token": ubidotsToken, "Content-Type": "application/json"}
req = create_request(url, headers, attempts=5, request_type="post", data=payload)
return {"result": "ok", "data": req.json()}
def create_request(url, headers, attempts, request_type, data=None):
"""
Function to make a request to the server
"""
request_func = getattr(requests, request_type)
kwargs = {"url": url, "headers": headers}
if request_type == "post" or request_type == "patch":
kwargs["json"] = data
try:
req = request_func(**kwargs)
print("[INFO] Request result: {}".format(req.text))
status_code = req.status_code
time.sleep(1)
while status_code >= 400 and attempts < 5:
req = request_func(**kwargs)
print("[INFO] Request result: {}".format(req.text))
status_code = req.status_code
attempts += 1
time.sleep(1)
return req
except Exception as e:
print("[ERROR] There was an error with the request, details:")
print(e)
return None
| 29.345238
| 91
| 0.599594
|
import requests
import time
def main(kwargs):
print("[INFO] Info recieved: {}".format(kwargs))
if len(kwargs) < 4:
print("[ERROR] One or more parameters are missing")
return {"result": "error"}
result = get_currency(**kwargs)
if result.get("result") == "ok":
args = result.get("data")
else:
return result
print("[INFO] Currencies obtained", args)
data = {}
for i in args.get("rates"):
data[i] = {
"value": args.get("rates").get(i),
"context": {"base": args.get("base")},
}
req = update_device(data, **kwargs)
del kwargs
return req
def get_currency(currencies, base, _plugin_env_API_URL, **kwargs):
url = "{}/latest?base={}&symbols={}".format(_plugin_env_API_URL, base, currencies)
headers = {"Content-Type": "application/json"}
try:
req = create_request(url, headers, attempts=5, request_type="get")
except:
return {
"result": "[ERROR] The currency server did not respond, please try again later"
}
return {"result": "ok", "data": req.json()}
def update_device(
payload, _plugin_env_UBIDOTS_URL, deviceLabel, ubidotsToken, **kwargs
):
url = "{}/api/v1.6/devices/{}".format(_plugin_env_UBIDOTS_URL, deviceLabel)
headers = {"X-Auth-Token": ubidotsToken, "Content-Type": "application/json"}
req = create_request(url, headers, attempts=5, request_type="post", data=payload)
return {"result": "ok", "data": req.json()}
def create_request(url, headers, attempts, request_type, data=None):
request_func = getattr(requests, request_type)
kwargs = {"url": url, "headers": headers}
if request_type == "post" or request_type == "patch":
kwargs["json"] = data
try:
req = request_func(**kwargs)
print("[INFO] Request result: {}".format(req.text))
status_code = req.status_code
time.sleep(1)
while status_code >= 400 and attempts < 5:
req = request_func(**kwargs)
print("[INFO] Request result: {}".format(req.text))
status_code = req.status_code
attempts += 1
time.sleep(1)
return req
except Exception as e:
print("[ERROR] There was an error with the request, details:")
print(e)
return None
| true
| true
|
1c48f7bd6ff7ccc1fa63ca67184ef1af3ace64ce
| 4,029
|
py
|
Python
|
pymatgen/util/serialization.py
|
anjlip/pymatgen
|
62ecae1c7382a41861e3a5d9b9c8dd1207472409
|
[
"MIT"
] | 2
|
2017-10-02T03:11:47.000Z
|
2018-12-02T12:56:12.000Z
|
pymatgen/util/serialization.py
|
darnoceloc/pymatgen
|
5cc42912a12a265a603df7e34c856561f76edc1f
|
[
"MIT"
] | 3
|
2017-07-18T01:13:41.000Z
|
2019-04-29T18:17:30.000Z
|
pymatgen/util/serialization.py
|
darnoceloc/pymatgen
|
5cc42912a12a265a603df7e34c856561f76edc1f
|
[
"MIT"
] | 2
|
2016-06-15T00:12:59.000Z
|
2018-12-02T12:56:47.000Z
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import json
import functools
import pickle
from pymatgen.core.periodic_table import Element
"""
Most features of this module has been moved to monty. Please refer to
monty.json and monty.serialization documentation.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 30, 2012"
def pmg_serialize(method):
"""
Decorator for methods that add MSON serializations keys
to the dictionary. See documentation of MSON for more details
"""
@functools.wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
d = method(*args, **kwargs)
# Add @module and @class
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
return wrapper
def json_pretty_dump(obj, filename):
"""
Serialize obj as a JSON formatted stream to the given filename (
pretty printing version)
"""
with open(filename, "wt") as fh:
json.dump(obj, fh, indent=4, sort_keys=4)
class PmgPickler(pickle.Pickler):
"""
Persistence of External Objects as described in section 12.1.5.1 of
https://docs.python.org/3/library/pickle.html
"""
def persistent_id(self, obj):
"""Instead of pickling as a regular class instance, we emit a
persistent ID."""
if isinstance(obj, Element):
# Here, our persistent ID is simply a tuple, containing a tag and
# a key
return obj.__class__.__name__, obj.symbol
else:
# If obj does not have a persistent ID, return None. This means obj
# needs to be pickled as usual.
return None
class PmgUnpickler(pickle.Unpickler):
"""
Persistence of External Objects as described in section 12.1.5.1 of
https://docs.python.org/3/library/pickle.html
"""
def persistent_load(self, pid):
"""
This method is invoked whenever a persistent ID is encountered.
Here, pid is the tuple returned by PmgPickler.
"""
try:
type_tag, key_id = pid
except Exception as exc:
# Sometimes we get a string such as ('Element', u'C') instead
# of a real tuple. Use ast to evalute the expression (much safer
# than eval).
import ast
type_tag, key_id = ast.literal_eval(pid)
if type_tag == "Element":
return Element(key_id)
else:
# Always raises an error if you cannot return the correct object.
# Otherwise, the unpickler will think None is the object referenced
# by the persistent ID.
raise pickle.UnpicklingError(
"unsupported persistent object with pid %s" % pid)
def pmg_pickle_load(filobj, **kwargs):
"""
Loads a pickle file and deserialize it with PmgUnpickler.
Args:
filobj: File-like object
\\*\\*kwargs: Any of the keyword arguments supported by PmgUnpickler
Returns:
Deserialized object.
"""
return PmgUnpickler(filobj, **kwargs).load()
def pmg_pickle_dump(obj, filobj, **kwargs):
"""
Dump an object to a pickle file using PmgPickler.
Args:
obj : Object to dump.
fileobj: File-like object
\\*\\*kwargs: Any of the keyword arguments supported by PmgPickler
"""
return PmgPickler(filobj, **kwargs).dump(obj)
class SlotPickleMixin:
"""
This mixin makes it possible to pickle/unpickle objects with __slots__
defined.
"""
def __getstate__(self):
return dict(
(slot, getattr(self, slot))
for slot in self.__slots__ if hasattr(self, slot)
)
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
| 28.373239
| 79
| 0.63167
|
import json
import functools
import pickle
from pymatgen.core.periodic_table import Element
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 30, 2012"
def pmg_serialize(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
self = args[0]
d = method(*args, **kwargs)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
return wrapper
def json_pretty_dump(obj, filename):
with open(filename, "wt") as fh:
json.dump(obj, fh, indent=4, sort_keys=4)
class PmgPickler(pickle.Pickler):
def persistent_id(self, obj):
if isinstance(obj, Element):
return obj.__class__.__name__, obj.symbol
else:
return None
class PmgUnpickler(pickle.Unpickler):
def persistent_load(self, pid):
try:
type_tag, key_id = pid
except Exception as exc:
import ast
type_tag, key_id = ast.literal_eval(pid)
if type_tag == "Element":
return Element(key_id)
else:
raise pickle.UnpicklingError(
"unsupported persistent object with pid %s" % pid)
def pmg_pickle_load(filobj, **kwargs):
return PmgUnpickler(filobj, **kwargs).load()
def pmg_pickle_dump(obj, filobj, **kwargs):
return PmgPickler(filobj, **kwargs).dump(obj)
class SlotPickleMixin:
def __getstate__(self):
return dict(
(slot, getattr(self, slot))
for slot in self.__slots__ if hasattr(self, slot)
)
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
| true
| true
|
1c48f96d65f6ea07d262df3ba991c8effd958e17
| 5,364
|
py
|
Python
|
ghost/core/shell.py
|
Bcoderx6/Ghost
|
2d518b838315d257bfdd5655eaf688c3796267c5
|
[
"MIT"
] | 2
|
2022-01-21T11:34:03.000Z
|
2022-03-11T22:08:25.000Z
|
ghost/core/shell.py
|
Bcoderx6/Ghost
|
2d518b838315d257bfdd5655eaf688c3796267c5
|
[
"MIT"
] | null | null | null |
ghost/core/shell.py
|
Bcoderx6/Ghost
|
2d518b838315d257bfdd5655eaf688c3796267c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from os import system, chdir
from subprocess import CalledProcessError, check_output
from ghost.core.badges import Badges
from ghost.core.helper import Helper
from ghost.core.loader import Loader
class Shell:
def __init__(self, ghost):
self.ghost = ghost
self.badges = Badges()
self.helper = Helper()
self.loader = Loader(ghost)
def check_root(self):
try:
output = check_output(["adb", "shell", "which", "su"])
return_code = 0
except CalledProcessError as e:
return_code = e.returncode
if not return_code:
return False
return True
def shell(self, target_addr):
target_commands = self.loader.load_modules()
while True:
try:
command = str(input('\033[4mghost\033[0m(\033[1;31m' + target_addr + '\033[0m)> '))
while not command.strip():
command = str(input('\033[4mghost\033[0m(\033[1;31m' + target_addr + '\033[0m)> '))
command = command.strip()
arguments = "".join(command.split(command.split()[0])).strip()
command = command.split()
if command[0] == "help":
self.helper.show_commands(target_commands)
elif command[0] == "exit":
print(self.badges.G + "Cleaning up...")
self.ghost.disconnect(target_addr)
self.ghost.stop_server()
break
elif command[0] == "details":
if len(command) < 2:
print("Usage: details <command>")
else:
if command[1] in target_commands.keys():
print(self.badges.I + "Module Name: " + target_commands[command[1]].details['name'])
authors = ""
for author in target_commands[command[1]].details['authors']:
authors += author + " "
print(self.badges.I + "Module Authors: " + authors.strip())
print(self.badges.I + "Module Description: " + target_commands[command[1]].details[
'description'])
print(self.badges.I + "Module Usage: " + target_commands[command[1]].details['usage'])
else:
print(self.badges.E + "No such module command!")
elif command[0] == "clear":
system("clear")
else:
if command[0] in target_commands.keys():
if target_commands[command[0]].details['needs_args']:
if (len(command) - 1) < int(target_commands[command[0]].details['args']):
print("Usage: " + target_commands[command[0]].details['usage'])
else:
if target_commands[command[0]].details['needs_root']:
if self.check_root():
target_commands[command[0]].run(arguments)
else:
print(self.badges.E + "Target device is not rooted!")
else:
target_commands[command[0]].run(arguments)
else:
if target_commands[command[0]].details['needs_root']:
if self.check_root():
target_commands[command[0]].run()
else:
print(self.badges.E + "Target device is not rooted!")
else:
target_commands[command[0]].run()
else:
print(self.badges.E + "Unrecognized command!")
except (KeyboardInterrupt, EOFError):
print("")
except Exception as e:
print(self.badges.E + "An error occured: " + str(e) + "!")
| 45.846154
| 114
| 0.525168
|
from os import system, chdir
from subprocess import CalledProcessError, check_output
from ghost.core.badges import Badges
from ghost.core.helper import Helper
from ghost.core.loader import Loader
class Shell:
def __init__(self, ghost):
self.ghost = ghost
self.badges = Badges()
self.helper = Helper()
self.loader = Loader(ghost)
def check_root(self):
try:
output = check_output(["adb", "shell", "which", "su"])
return_code = 0
except CalledProcessError as e:
return_code = e.returncode
if not return_code:
return False
return True
def shell(self, target_addr):
target_commands = self.loader.load_modules()
while True:
try:
command = str(input('\033[4mghost\033[0m(\033[1;31m' + target_addr + '\033[0m)> '))
while not command.strip():
command = str(input('\033[4mghost\033[0m(\033[1;31m' + target_addr + '\033[0m)> '))
command = command.strip()
arguments = "".join(command.split(command.split()[0])).strip()
command = command.split()
if command[0] == "help":
self.helper.show_commands(target_commands)
elif command[0] == "exit":
print(self.badges.G + "Cleaning up...")
self.ghost.disconnect(target_addr)
self.ghost.stop_server()
break
elif command[0] == "details":
if len(command) < 2:
print("Usage: details <command>")
else:
if command[1] in target_commands.keys():
print(self.badges.I + "Module Name: " + target_commands[command[1]].details['name'])
authors = ""
for author in target_commands[command[1]].details['authors']:
authors += author + " "
print(self.badges.I + "Module Authors: " + authors.strip())
print(self.badges.I + "Module Description: " + target_commands[command[1]].details[
'description'])
print(self.badges.I + "Module Usage: " + target_commands[command[1]].details['usage'])
else:
print(self.badges.E + "No such module command!")
elif command[0] == "clear":
system("clear")
else:
if command[0] in target_commands.keys():
if target_commands[command[0]].details['needs_args']:
if (len(command) - 1) < int(target_commands[command[0]].details['args']):
print("Usage: " + target_commands[command[0]].details['usage'])
else:
if target_commands[command[0]].details['needs_root']:
if self.check_root():
target_commands[command[0]].run(arguments)
else:
print(self.badges.E + "Target device is not rooted!")
else:
target_commands[command[0]].run(arguments)
else:
if target_commands[command[0]].details['needs_root']:
if self.check_root():
target_commands[command[0]].run()
else:
print(self.badges.E + "Target device is not rooted!")
else:
target_commands[command[0]].run()
else:
print(self.badges.E + "Unrecognized command!")
except (KeyboardInterrupt, EOFError):
print("")
except Exception as e:
print(self.badges.E + "An error occured: " + str(e) + "!")
| true
| true
|
1c48f976fec8b1acb5931b64a52c232b58b01820
| 726
|
py
|
Python
|
Python/seven_kyu/greet.py
|
Brokenshire/codewars-projects
|
db9cd09618b8a7085b0d53ad76f73f9e249b9396
|
[
"Apache-2.0"
] | 1
|
2019-12-20T04:09:56.000Z
|
2019-12-20T04:09:56.000Z
|
Python/seven_kyu/greet.py
|
Brokenshire/codewars-projects
|
db9cd09618b8a7085b0d53ad76f73f9e249b9396
|
[
"Apache-2.0"
] | null | null | null |
Python/seven_kyu/greet.py
|
Brokenshire/codewars-projects
|
db9cd09618b8a7085b0d53ad76f73f9e249b9396
|
[
"Apache-2.0"
] | null | null | null |
# Python solution for 'Greet Me' codewars question.
# Level: 7 kyu
# Tags: FUNDAMENTALS and STRINGS.
# Author: Jack Brokenshire
# Date: 20/05/2020
import unittest
def greet(name):
"""
Greets a person with their name capitalized.
:param name: a string.
:return: greets that name, capitalized and ends with an exclamation point.
"""
return "Hello " + name.capitalize() + "!"
class TestGreet(unittest.TestCase):
"""Class to test 'greet' function"""
def test_greet(self):
self.assertEqual(greet('riley'), 'Hello Riley!')
self.assertEqual(greet('molly'), "Hello Molly!")
self.assertEqual(greet('BILLY'), "Hello Billy!")
if __name__ == '__main__':
unittest.main()
| 24.2
| 78
| 0.658402
|
import unittest
def greet(name):
return "Hello " + name.capitalize() + "!"
class TestGreet(unittest.TestCase):
def test_greet(self):
self.assertEqual(greet('riley'), 'Hello Riley!')
self.assertEqual(greet('molly'), "Hello Molly!")
self.assertEqual(greet('BILLY'), "Hello Billy!")
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c48f9b4512865b19372db903249405f3f8c7a76
| 9,521
|
py
|
Python
|
graph_objs/carpet/baxis/_tickformatstop.py
|
wwwidonja/changed_plotly
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
[
"MIT"
] | null | null | null |
graph_objs/carpet/baxis/_tickformatstop.py
|
wwwidonja/changed_plotly
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
[
"MIT"
] | null | null | null |
graph_objs/carpet/baxis/_tickformatstop.py
|
wwwidonja/changed_plotly
|
1bda35a438539a97c84a3ab3952e95e8848467bd
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "carpet.baxis"
_path_str = "carpet.baxis.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.carpet.baxis.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.carpet.baxis.Tickformatstop
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.carpet.baxis.Tickformatstop`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.524648
| 82
| 0.569688
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
_parent_path_str = "carpet.baxis"
_path_str = "carpet.baxis.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.carpet.baxis.Tickformatstop
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.carpet.baxis.Tickformatstop`"""
)
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| true
| true
|
1c48fa436ae520503532022d81b249752c5f81ce
| 1,222
|
py
|
Python
|
Searchlll.py
|
sangeetasingh17/python
|
02fe83d5188a643a1d95b1a2b5592ae6444e260f
|
[
"MIT"
] | 1
|
2020-11-11T14:42:48.000Z
|
2020-11-11T14:42:48.000Z
|
Searchlll.py
|
sangeetasingh17/python
|
02fe83d5188a643a1d95b1a2b5592ae6444e260f
|
[
"MIT"
] | null | null | null |
Searchlll.py
|
sangeetasingh17/python
|
02fe83d5188a643a1d95b1a2b5592ae6444e260f
|
[
"MIT"
] | 3
|
2021-08-04T20:26:06.000Z
|
2021-10-18T10:24:43.000Z
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.last_node = None
def append(self, data):
if self.last_node is None:
self.head = Node(data)
self.last_node = self.head
else:
self.last_node.next = Node(data)
self.last_node = self.last_node.next
def display(self):
current = self.head
while current is not None:
print(current.data, end = ' ')
current = current.next
def find_index(self, key):
current = self.head
index = 0
while current:
if current.data == key:
return index
current = current.next
index = index + 1
return -1
a_llist = LinkedList()
for data in [4, -3, 1, 0, 9, 11]:
a_llist.append(data)
print('The linked list: ', end = '')
a_llist.display()
print()
key = int(input('What data item would you like to search for? '))
index = a_llist.find_index(key)
if index == -1:
print(str(key) + ' was not found.')
else:
print(str(key) + ' is at index ' + str(index) + '.')
| 24.44
| 65
| 0.5491
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.last_node = None
def append(self, data):
if self.last_node is None:
self.head = Node(data)
self.last_node = self.head
else:
self.last_node.next = Node(data)
self.last_node = self.last_node.next
def display(self):
current = self.head
while current is not None:
print(current.data, end = ' ')
current = current.next
def find_index(self, key):
current = self.head
index = 0
while current:
if current.data == key:
return index
current = current.next
index = index + 1
return -1
a_llist = LinkedList()
for data in [4, -3, 1, 0, 9, 11]:
a_llist.append(data)
print('The linked list: ', end = '')
a_llist.display()
print()
key = int(input('What data item would you like to search for? '))
index = a_llist.find_index(key)
if index == -1:
print(str(key) + ' was not found.')
else:
print(str(key) + ' is at index ' + str(index) + '.')
| true
| true
|
1c48fb37aaf58efdadc2f66c9ca291f61705d507
| 1,133
|
py
|
Python
|
examples/sample_puma.py
|
Gigahawk/dh2vrml
|
65a610332fe5f3f1b0ba14ca9ba57193139e18bf
|
[
"MIT"
] | null | null | null |
examples/sample_puma.py
|
Gigahawk/dh2vrml
|
65a610332fe5f3f1b0ba14ca9ba57193139e18bf
|
[
"MIT"
] | 3
|
2022-02-09T12:07:41.000Z
|
2022-03-08T07:52:14.000Z
|
examples/sample_puma.py
|
Gigahawk/dh2vrml
|
65a610332fe5f3f1b0ba14ca9ba57193139e18bf
|
[
"MIT"
] | null | null | null |
from math import pi
params = [
{
"type": "revolute",
"theta": 0,
"d": 0,
"r": 0,
"alpha": -pi/2,
"offset": (0, 0, -300),
"color": (1, 0, 0),
"scale": 50,
},
{
"type": "revolute",
"theta": 0,
"d": 0,
"r": 430,
"alpha": pi,
"offset": (0, 0, 0),
"color": (0, 0, 1)
},
{
"type": "revolute",
"theta": pi/2,
"d": -149.1,
"r": 20.3,
"alpha": pi/2,
"offset": (0, 0, -75),
"color": (0, 1, 0)
},
{
"type": "revolute",
"theta": 0,
"d": 435,
"r": 0,
"alpha": pi/2,
"offset": (0, 0, 225),
"color": (0.7, 0, 1),
"scale": 15
},
{
"type": "revolute",
"theta": 0,
"d": 0,
"r": 0,
"alpha": -pi/2,
"offset": (0, 0, 0),
"color": (1, 0.4, 0)
},
{
"type": "revolute",
"theta": 0,
"d": 60,
"r": 0,
"alpha": 0,
"offset": (0, 0, 30),
"color": (0, 1, 1)
},
]
| 18.883333
| 31
| 0.295675
|
from math import pi
params = [
{
"type": "revolute",
"theta": 0,
"d": 0,
"r": 0,
"alpha": -pi/2,
"offset": (0, 0, -300),
"color": (1, 0, 0),
"scale": 50,
},
{
"type": "revolute",
"theta": 0,
"d": 0,
"r": 430,
"alpha": pi,
"offset": (0, 0, 0),
"color": (0, 0, 1)
},
{
"type": "revolute",
"theta": pi/2,
"d": -149.1,
"r": 20.3,
"alpha": pi/2,
"offset": (0, 0, -75),
"color": (0, 1, 0)
},
{
"type": "revolute",
"theta": 0,
"d": 435,
"r": 0,
"alpha": pi/2,
"offset": (0, 0, 225),
"color": (0.7, 0, 1),
"scale": 15
},
{
"type": "revolute",
"theta": 0,
"d": 0,
"r": 0,
"alpha": -pi/2,
"offset": (0, 0, 0),
"color": (1, 0.4, 0)
},
{
"type": "revolute",
"theta": 0,
"d": 60,
"r": 0,
"alpha": 0,
"offset": (0, 0, 30),
"color": (0, 1, 1)
},
]
| true
| true
|
1c48fbb34627d1bebd1ced1abced3024490050b7
| 4,516
|
py
|
Python
|
dataset/compute_metrics.py
|
sc0ttms/SE-TFCN
|
466a2d641c6ff4184c768c1e7aaf2b8a8158ce51
|
[
"BSD-3-Clause"
] | 9
|
2022-01-18T05:30:33.000Z
|
2022-03-09T02:25:11.000Z
|
dataset/compute_metrics.py
|
sc0ttms/SE-TFCN
|
466a2d641c6ff4184c768c1e7aaf2b8a8158ce51
|
[
"BSD-3-Clause"
] | 1
|
2022-01-22T01:52:08.000Z
|
2022-01-28T03:01:33.000Z
|
dataset/compute_metrics.py
|
sc0ttms/SE-TFCN
|
466a2d641c6ff4184c768c1e7aaf2b8a8158ce51
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
import argparse
import toml
import librosa
import pandas as pd
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
sys.path.append(os.getcwd())
from audio.metrics import SI_SDR, STOI, WB_PESQ, NB_PESQ, REGISTERED_METRICS
def calculate_metric(noisy_file, clean_file, sr=16000, metric_type="STOI", pre_load=False):
# get noisy, clean
if pre_load == False:
noisy, _ = librosa.load(noisy_file, sr=sr)
clean, _ = librosa.load(clean_file, sr=sr)
else:
noisy = noisy_file
clean = clean_file
assert len(noisy) == len(clean)
# get metric score
if metric_type in ["SI_SDR"]:
return SI_SDR(noisy, clean)
elif metric_type in ["STOI"]:
return STOI(noisy, clean, sr=sr)
elif metric_type in ["WB_PESQ"]:
return WB_PESQ(noisy, clean)
elif metric_type in ["NB_PESQ"]:
return NB_PESQ(noisy, clean)
def compute_metric(noisy_files, clean_files, metrics, n_folds=1, n_jobs=8, pre_load=False):
for metric_type, _ in metrics.items():
assert metric_type in REGISTERED_METRICS
split_num = len(noisy_files) // n_folds
score = []
for n in range(n_folds):
metric_score = Parallel(n_jobs=n_jobs)(
delayed(calculate_metric)(
noisy_file,
clean_file,
sr=8000 if metric_type in ["NB_PESQ"] else 16000,
metric_type=metric_type,
pre_load=pre_load,
)
for noisy_file, clean_file in tqdm(
zip(
noisy_files[n * split_num : (n + 1) * split_num],
clean_files[n * split_num : (n + 1) * split_num],
)
)
)
score.append(np.mean(metric_score))
metrics[metric_type] = np.mean(score)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="compute_metrics")
parser.add_argument("-c", "--config", required=True, type=str, help="Config (*.toml).")
args = parser.parse_args()
# get dataset path
dataset_path = os.path.join(os.getcwd(), "dataset_csv")
# get set path
train_path = os.path.join(dataset_path, "train.csv")
valid_path = os.path.join(dataset_path, "valid.csv")
test_path = os.path.join(dataset_path, "test.csv")
# get train files
train_files = pd.read_csv(train_path).values
train_noisy_files = train_files[:, 0].reshape(1, len(train_files))[0]
train_clean_files = train_files[:, 1].reshape(1, len(train_files))[0]
# get valid files
valid_files = pd.read_csv(valid_path).values
valid_noisy_files = valid_files[:, 0].reshape(1, len(valid_files))[0]
valid_clean_files = valid_files[:, 1].reshape(1, len(valid_files))[0]
# get test files
test_files = pd.read_csv(test_path).values
test_noisy_files = test_files[:, 0].reshape(1, len(test_files))[0]
test_clean_files = test_files[:, 1].reshape(1, len(test_files))[0]
# get compute metrics config
config = toml.load(args.config)
# get n_jobs
n_folds = config["ppl"]["n_folds"]
n_jobs = config["ppl"]["n_jobs"]
# get metrics
metrics = {
"SI_SDR": [],
"STOI": [],
"WB_PESQ": [],
"NB_PESQ": [],
}
# compute train metrics
compute_metric(
train_noisy_files, train_clean_files, metrics, n_folds=n_folds, n_jobs=n_jobs, pre_load=False,
)
# save train metrics
df = pd.DataFrame(metrics, index=["train"])
df.to_csv(os.path.join(dataset_path, "train_metrics.csv"))
# get metrics
metrics = {
"SI_SDR": [],
"STOI": [],
"WB_PESQ": [],
"NB_PESQ": [],
}
# compute valid metrics
compute_metric(
valid_noisy_files, valid_clean_files, metrics, n_folds=n_folds, n_jobs=n_jobs, pre_load=False,
)
# save train metrics
df = pd.DataFrame(metrics, index=["valid"])
df.to_csv(os.path.join(dataset_path, "valid_metrics.csv"))
# get metrics
metrics = {
"SI_SDR": [],
"STOI": [],
"WB_PESQ": [],
"NB_PESQ": [],
}
# compute test metrics
compute_metric(
test_noisy_files, test_clean_files, metrics, n_folds=n_folds, n_jobs=n_jobs, pre_load=False,
)
# save train metrics
df = pd.DataFrame(metrics, index=["test"])
df.to_csv(os.path.join(dataset_path, "test_metrics.csv"))
| 31.58042
| 102
| 0.611382
|
import sys
import os
import argparse
import toml
import librosa
import pandas as pd
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
sys.path.append(os.getcwd())
from audio.metrics import SI_SDR, STOI, WB_PESQ, NB_PESQ, REGISTERED_METRICS
def calculate_metric(noisy_file, clean_file, sr=16000, metric_type="STOI", pre_load=False):
if pre_load == False:
noisy, _ = librosa.load(noisy_file, sr=sr)
clean, _ = librosa.load(clean_file, sr=sr)
else:
noisy = noisy_file
clean = clean_file
assert len(noisy) == len(clean)
if metric_type in ["SI_SDR"]:
return SI_SDR(noisy, clean)
elif metric_type in ["STOI"]:
return STOI(noisy, clean, sr=sr)
elif metric_type in ["WB_PESQ"]:
return WB_PESQ(noisy, clean)
elif metric_type in ["NB_PESQ"]:
return NB_PESQ(noisy, clean)
def compute_metric(noisy_files, clean_files, metrics, n_folds=1, n_jobs=8, pre_load=False):
for metric_type, _ in metrics.items():
assert metric_type in REGISTERED_METRICS
split_num = len(noisy_files) // n_folds
score = []
for n in range(n_folds):
metric_score = Parallel(n_jobs=n_jobs)(
delayed(calculate_metric)(
noisy_file,
clean_file,
sr=8000 if metric_type in ["NB_PESQ"] else 16000,
metric_type=metric_type,
pre_load=pre_load,
)
for noisy_file, clean_file in tqdm(
zip(
noisy_files[n * split_num : (n + 1) * split_num],
clean_files[n * split_num : (n + 1) * split_num],
)
)
)
score.append(np.mean(metric_score))
metrics[metric_type] = np.mean(score)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="compute_metrics")
parser.add_argument("-c", "--config", required=True, type=str, help="Config (*.toml).")
args = parser.parse_args()
dataset_path = os.path.join(os.getcwd(), "dataset_csv")
train_path = os.path.join(dataset_path, "train.csv")
valid_path = os.path.join(dataset_path, "valid.csv")
test_path = os.path.join(dataset_path, "test.csv")
train_files = pd.read_csv(train_path).values
train_noisy_files = train_files[:, 0].reshape(1, len(train_files))[0]
train_clean_files = train_files[:, 1].reshape(1, len(train_files))[0]
valid_files = pd.read_csv(valid_path).values
valid_noisy_files = valid_files[:, 0].reshape(1, len(valid_files))[0]
valid_clean_files = valid_files[:, 1].reshape(1, len(valid_files))[0]
test_files = pd.read_csv(test_path).values
test_noisy_files = test_files[:, 0].reshape(1, len(test_files))[0]
test_clean_files = test_files[:, 1].reshape(1, len(test_files))[0]
config = toml.load(args.config)
n_folds = config["ppl"]["n_folds"]
n_jobs = config["ppl"]["n_jobs"]
metrics = {
"SI_SDR": [],
"STOI": [],
"WB_PESQ": [],
"NB_PESQ": [],
}
compute_metric(
train_noisy_files, train_clean_files, metrics, n_folds=n_folds, n_jobs=n_jobs, pre_load=False,
)
df = pd.DataFrame(metrics, index=["train"])
df.to_csv(os.path.join(dataset_path, "train_metrics.csv"))
metrics = {
"SI_SDR": [],
"STOI": [],
"WB_PESQ": [],
"NB_PESQ": [],
}
compute_metric(
valid_noisy_files, valid_clean_files, metrics, n_folds=n_folds, n_jobs=n_jobs, pre_load=False,
)
df = pd.DataFrame(metrics, index=["valid"])
df.to_csv(os.path.join(dataset_path, "valid_metrics.csv"))
metrics = {
"SI_SDR": [],
"STOI": [],
"WB_PESQ": [],
"NB_PESQ": [],
}
compute_metric(
test_noisy_files, test_clean_files, metrics, n_folds=n_folds, n_jobs=n_jobs, pre_load=False,
)
df = pd.DataFrame(metrics, index=["test"])
df.to_csv(os.path.join(dataset_path, "test_metrics.csv"))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.