hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbbf7a3d1ea6a059d79ae71533a8dda4aef3f9db | 694 | py | Python | python_base/HTML/html_css/app.py | sven820/python | ddb13ffdab45bdb2c8ca8038cfa0c47f2502e554 | [
"Apache-2.0"
] | null | null | null | python_base/HTML/html_css/app.py | sven820/python | ddb13ffdab45bdb2c8ca8038cfa0c47f2502e554 | [
"Apache-2.0"
] | null | null | null | python_base/HTML/html_css/app.py | sven820/python | ddb13ffdab45bdb2c8ca8038cfa0c47f2502e554 | [
"Apache-2.0"
] | null | null | null | __author__ = "JJ.sven"
import tornado.web
from tornado import ioloop
class MainHandle(tornado.web.RequestHandler):
def get(self):
self.__login()
def post(self, *args, **kwargs):
self.__login()
'''==========private=========='''
def __login(self):
name = self.get_argument('user')
pwd = self.get_argument('pwd')
print(name, pwd)
if name=='jxf' and pwd=='123':
self.write('success')
else:
self.write('fail')
application = tornado.web.Application([
(r'/index', MainHandle),
])
if __name__ == '__main__':
application.listen(9090, address='localhost')
ioloop.IOLoop.instance().start() | 22.387097 | 49 | 0.583573 |
bbbbc39acdaf7c9ad88c4bdf4cdf86cbcb7807db | 716 | py | Python | data.py | wzwietering/mnist-fun | c14334bc5504f9d0d3dca5154986d49ae4532482 | [
"MIT"
] | null | null | null | data.py | wzwietering/mnist-fun | c14334bc5504f9d0d3dca5154986d49ae4532482 | [
"MIT"
] | null | null | null | data.py | wzwietering/mnist-fun | c14334bc5504f9d0d3dca5154986d49ae4532482 | [
"MIT"
] | null | null | null | from keras.datasets import mnist
def prepare_data(data_set, zero_center=False, flatten=False):
data_set = data_set.reshape(data_set.shape[0], data_set.shape[1], data_set.shape[2], 1)
data_set = data_set.astype("float32")
if zero_center:
data_set = (data_set - 127.5) / 127.5
else:
data_set /= 255
if flatten:
data_set = data_set.reshape(data_set.shape[0], data_set.shape[1] * data_set.shape[2])
return data_set
def get_data(zero_center=False, flatten=False):
(trainX, trainY), (testX, testY) = mnist.load_data()
testX = prepare_data(testX, zero_center, flatten)
trainX = prepare_data(trainX, zero_center, flatten)
return trainX, trainY, testX, testY
| 37.684211 | 93 | 0.696927 |
6b36ead2acac4212c83e2700570ba2ef9eef98cf | 65 | py | Python | paraguay/telegram.py | PythonParaguay/paraguay | 201ffed271a1f3c83ac6e4bd29b0688946968a2a | [
"MIT"
] | 4 | 2018-05-31T01:55:13.000Z | 2018-06-09T12:54:46.000Z | paraguay/telegram.py | PythonParaguay/paraguay | 201ffed271a1f3c83ac6e4bd29b0688946968a2a | [
"MIT"
] | null | null | null | paraguay/telegram.py | PythonParaguay/paraguay | 201ffed271a1f3c83ac6e4bd29b0688946968a2a | [
"MIT"
] | 1 | 2018-06-08T21:35:13.000Z | 2018-06-08T21:35:13.000Z | import webbrowser
webbrowser.open("https://t.me/pythonparaguay") | 21.666667 | 46 | 0.8 |
f64acb77f48984def6dcb78f91ec5a60b47fc193 | 1,384 | py | Python | resources/renderers.py | erlendr/swapi | a7d69bb88bf0e0f25f00dde1dbb196084f43f6d6 | [
"BSD-3-Clause"
] | null | null | null | resources/renderers.py | erlendr/swapi | a7d69bb88bf0e0f25f00dde1dbb196084f43f6d6 | [
"BSD-3-Clause"
] | null | null | null | resources/renderers.py | erlendr/swapi | a7d69bb88bf0e0f25f00dde1dbb196084f43f6d6 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from rest_framework import renderers
class WookieeRenderer(renderers.JSONRenderer):
media_type = "application/json"
charset = 'utf-8'
format = "wookiee"
lookup = {
"a": "ra",
"b": "rh",
"c": "oa",
"d": "wa",
"e": "wo",
"f": "ww",
"g": "rr",
"h": "ac",
"i": "ah",
"j": "sh",
"k": "or",
"l": "an",
"m": "sc",
"n": "wh",
"o": "oo",
"p": "ak",
"q": "rq",
"r": "rc",
"s": "c",
"t": "ao",
"u": "hu",
"v": "ho",
"w": "oh",
"x": "k",
"y": "ro",
"z": "uf",
}
def render(self, data, media_type=None, renderer_context=None):
encoded_data = super(WookieeRenderer, self).render(
data, media_type, renderer_context
)
return bytes(self.translate_to_wookie(encoded_data), encoding='utf8')
def translate_to_wookie(self, data):
translated_data = ""
try:
data = data.decode("utf-8")
except (UnicodeDecodeError, AttributeError):
pass
for char in data:
if char in self.lookup:
translated_data += self.lookup[char]
else:
translated_data += char
return translated_data
| 23.862069 | 77 | 0.458092 |
eff625b92dc4c57f7f9a20a2b61ae9045e8f8dad | 24,220 | py | Python | venv/Lib/site-packages/pylint/extensions/docparams.py | AnxhelaMehmetaj/is219_flask | 1e88579f14a96c9826e9452b3c7f8e6477577ef7 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/pylint/extensions/docparams.py | AnxhelaMehmetaj/is219_flask | 1e88579f14a96c9826e9452b3c7f8e6477577ef7 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/pylint/extensions/docparams.py | AnxhelaMehmetaj/is219_flask | 1e88579f14a96c9826e9452b3c7f8e6477577ef7 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
"""Pylint plugin for checking in Sphinx, Google, or Numpy style docstrings."""
import re
from typing import TYPE_CHECKING, Optional
import astroid
from astroid import nodes
from pylint.checkers import BaseChecker
from pylint.checkers import utils as checker_utils
from pylint.extensions import _check_docs_utils as utils
from pylint.extensions._check_docs_utils import Docstring
from pylint.interfaces import IAstroidChecker
from pylint.utils import get_global_option
if TYPE_CHECKING:
from pylint.lint import PyLinter
class DocstringParameterChecker(BaseChecker):
"""Checker for Sphinx, Google, or Numpy style docstrings.
* Check that all function, method and constructor parameters are mentioned
in the params and types part of the docstring. Constructor parameters
can be documented in either the class docstring or ``__init__`` docstring,
but not both.
* Check that there are no naming inconsistencies between the signature and
the documentation, i.e. also report documented parameters that are missing
in the signature. This is important to find cases where parameters are
renamed only in the code, not in the documentation.
* Check that all explicitly raised exceptions in a function are documented
in the function docstring. Caught exceptions are ignored.
Activate this checker by adding the line::
load-plugins=pylint.extensions.docparams
to the ``MASTER`` section of your ``.pylintrc``.
"""
__implements__ = IAstroidChecker
name = "parameter_documentation"
msgs = {
"W9005": (
'"%s" has constructor parameters documented in class and __init__',
"multiple-constructor-doc",
"Please remove parameter declarations in the class or constructor.",
),
"W9006": (
'"%s" not documented as being raised',
"missing-raises-doc",
"Please document exceptions for all raised exception types.",
),
"W9008": (
"Redundant returns documentation",
"redundant-returns-doc",
"Please remove the return/rtype documentation from this method.",
),
"W9010": (
"Redundant yields documentation",
"redundant-yields-doc",
"Please remove the yields documentation from this method.",
),
"W9011": (
"Missing return documentation",
"missing-return-doc",
"Please add documentation about what this method returns.",
{"old_names": [("W9007", "old-missing-returns-doc")]},
),
"W9012": (
"Missing return type documentation",
"missing-return-type-doc",
"Please document the type returned by this method.",
# we can't use the same old_name for two different warnings
# {'old_names': [('W9007', 'missing-returns-doc')]},
),
"W9013": (
"Missing yield documentation",
"missing-yield-doc",
"Please add documentation about what this generator yields.",
{"old_names": [("W9009", "old-missing-yields-doc")]},
),
"W9014": (
"Missing yield type documentation",
"missing-yield-type-doc",
"Please document the type yielded by this method.",
# we can't use the same old_name for two different warnings
# {'old_names': [('W9009', 'missing-yields-doc')]},
),
"W9015": (
'"%s" missing in parameter documentation',
"missing-param-doc",
"Please add parameter declarations for all parameters.",
{"old_names": [("W9003", "old-missing-param-doc")]},
),
"W9016": (
'"%s" missing in parameter type documentation',
"missing-type-doc",
"Please add parameter type declarations for all parameters.",
{"old_names": [("W9004", "old-missing-type-doc")]},
),
"W9017": (
'"%s" differing in parameter documentation',
"differing-param-doc",
"Please check parameter names in declarations.",
),
"W9018": (
'"%s" differing in parameter type documentation',
"differing-type-doc",
"Please check parameter names in type declarations.",
),
"W9019": (
'"%s" useless ignored parameter documentation',
"useless-param-doc",
"Please remove the ignored parameter documentation.",
),
"W9020": (
'"%s" useless ignored parameter type documentation',
"useless-type-doc",
"Please remove the ignored parameter type documentation.",
),
"W9021": (
'Missing any documentation in "%s"',
"missing-any-param-doc",
"Please add parameter and/or type documentation.",
),
}
options = (
(
"accept-no-param-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing parameter "
"documentation in the docstring of a function that has "
"parameters.",
},
),
(
"accept-no-raise-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing raises "
"documentation in the docstring of a function that "
"raises an exception.",
},
),
(
"accept-no-return-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing return "
"documentation in the docstring of a function that "
"returns a statement.",
},
),
(
"accept-no-yields-doc",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Whether to accept totally missing yields "
"documentation in the docstring of a generator.",
},
),
(
"default-docstring-type",
{
"type": "choice",
"default": "default",
"choices": list(utils.DOCSTRING_TYPES),
"help": "If the docstring type cannot be guessed "
"the specified docstring type will be used.",
},
),
)
priority = -2
constructor_names = {"__init__", "__new__"}
not_needed_param_in_docstring = {"self", "cls"}
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""Called for function and method definitions (def).
:param node: Node for a function or method definition in the AST
:type node: :class:`astroid.scoped_nodes.Function`
"""
node_doc = utils.docstringify(node.doc_node, self.config.default_docstring_type)
# skip functions that match the 'no-docstring-rgx' config option
no_docstring_rgx = get_global_option(self, "no-docstring-rgx")
if no_docstring_rgx and re.match(no_docstring_rgx, node.name):
return
# skip functions smaller than 'docstring-min-length'
lines = checker_utils.get_node_last_lineno(node) - node.lineno
max_lines = get_global_option(self, "docstring-min-length")
if max_lines > -1 and lines < max_lines:
return
self.check_functiondef_params(node, node_doc)
self.check_functiondef_returns(node, node_doc)
self.check_functiondef_yields(node, node_doc)
visit_asyncfunctiondef = visit_functiondef
def check_functiondef_params(self, node, node_doc):
node_allow_no_param = None
if node.name in self.constructor_names:
class_node = checker_utils.node_frame_class(node)
if class_node is not None:
class_doc = utils.docstringify(
class_node.doc_node, self.config.default_docstring_type
)
self.check_single_constructor_params(class_doc, node_doc, class_node)
# __init__ or class docstrings can have no parameters documented
# as long as the other documents them.
node_allow_no_param = (
class_doc.has_params()
or class_doc.params_documented_elsewhere()
or None
)
class_allow_no_param = (
node_doc.has_params()
or node_doc.params_documented_elsewhere()
or None
)
self.check_arguments_in_docstring(
class_doc, node.args, class_node, class_allow_no_param
)
self.check_arguments_in_docstring(
node_doc, node.args, node, node_allow_no_param
)
def check_functiondef_returns(self, node, node_doc):
if (not node_doc.supports_yields and node.is_generator()) or node.is_abstract():
return
return_nodes = node.nodes_of_class(astroid.Return)
if (node_doc.has_returns() or node_doc.has_rtype()) and not any(
utils.returns_something(ret_node) for ret_node in return_nodes
):
self.add_message("redundant-returns-doc", node=node)
def check_functiondef_yields(self, node, node_doc):
if not node_doc.supports_yields or node.is_abstract():
return
if (
node_doc.has_yields() or node_doc.has_yields_type()
) and not node.is_generator():
self.add_message("redundant-yields-doc", node=node)
def visit_raise(self, node: nodes.Raise) -> None:
func_node = node.frame(future=True)
if not isinstance(func_node, astroid.FunctionDef):
return
expected_excs = utils.possible_exc_types(node)
if not expected_excs:
return
if not func_node.doc_node:
# If this is a property setter,
# the property should have the docstring instead.
property_ = utils.get_setters_property(func_node)
if property_:
func_node = property_
doc = utils.docstringify(func_node.doc_node, self.config.default_docstring_type)
if not doc.matching_sections():
if doc.doc:
missing = {exc.name for exc in expected_excs}
self._handle_no_raise_doc(missing, func_node)
return
found_excs_full_names = doc.exceptions()
# Extract just the class name, e.g. "error" from "re.error"
found_excs_class_names = {exc.split(".")[-1] for exc in found_excs_full_names}
missing_excs = set()
for expected in expected_excs:
for found_exc in found_excs_class_names:
if found_exc == expected.name:
break
if any(found_exc == ancestor.name for ancestor in expected.ancestors()):
break
else:
missing_excs.add(expected.name)
self._add_raise_message(missing_excs, func_node)
def visit_return(self, node: nodes.Return) -> None:
if not utils.returns_something(node):
return
if self.config.accept_no_return_doc:
return
func_node = node.frame(future=True)
if not isinstance(func_node, astroid.FunctionDef):
return
doc = utils.docstringify(func_node.doc_node, self.config.default_docstring_type)
is_property = checker_utils.decorated_with_property(func_node)
if not (doc.has_returns() or (doc.has_property_returns() and is_property)):
self.add_message("missing-return-doc", node=func_node)
if func_node.returns:
return
if not (doc.has_rtype() or (doc.has_property_type() and is_property)):
self.add_message("missing-return-type-doc", node=func_node)
def visit_yield(self, node: nodes.Yield) -> None:
if self.config.accept_no_yields_doc:
return
func_node = node.frame(future=True)
if not isinstance(func_node, astroid.FunctionDef):
return
doc = utils.docstringify(func_node.doc_node, self.config.default_docstring_type)
if doc.supports_yields:
doc_has_yields = doc.has_yields()
doc_has_yields_type = doc.has_yields_type()
else:
doc_has_yields = doc.has_returns()
doc_has_yields_type = doc.has_rtype()
if not doc_has_yields:
self.add_message("missing-yield-doc", node=func_node)
if not (doc_has_yields_type or func_node.returns):
self.add_message("missing-yield-type-doc", node=func_node)
def visit_yieldfrom(self, node: nodes.YieldFrom) -> None:
self.visit_yield(node)
def _compare_missing_args(
self,
found_argument_names,
message_id,
not_needed_names,
expected_argument_names,
warning_node,
):
"""Compare the found argument names with the expected ones and
generate a message if there are arguments missing.
:param found_argument_names: argument names found in the docstring
:type found_argument_names: set
:param message_id: pylint message id
:type message_id: str
:param not_needed_names: names that may be omitted
:type not_needed_names: set
:param expected_argument_names: Expected argument names
:type expected_argument_names: set
:param warning_node: The node to be analyzed
:type warning_node: :class:`astroid.scoped_nodes.Node`
"""
missing_argument_names = (
expected_argument_names - found_argument_names
) - not_needed_names
if missing_argument_names:
self.add_message(
message_id,
args=(", ".join(sorted(missing_argument_names)),),
node=warning_node,
)
def _compare_different_args(
self,
found_argument_names,
message_id,
not_needed_names,
expected_argument_names,
warning_node,
):
"""Compare the found argument names with the expected ones and
generate a message if there are extra arguments found.
:param found_argument_names: argument names found in the docstring
:type found_argument_names: set
:param message_id: pylint message id
:type message_id: str
:param not_needed_names: names that may be omitted
:type not_needed_names: set
:param expected_argument_names: Expected argument names
:type expected_argument_names: set
:param warning_node: The node to be analyzed
:type warning_node: :class:`astroid.scoped_nodes.Node`
"""
differing_argument_names = (
(expected_argument_names ^ found_argument_names)
- not_needed_names
- expected_argument_names
)
if differing_argument_names:
self.add_message(
message_id,
args=(", ".join(sorted(differing_argument_names)),),
node=warning_node,
)
def _compare_ignored_args(
self,
found_argument_names,
message_id,
ignored_argument_names,
warning_node,
):
"""Compare the found argument names with the ignored ones and
generate a message if there are ignored arguments found.
:param found_argument_names: argument names found in the docstring
:type found_argument_names: set
:param message_id: pylint message id
:type message_id: str
:param ignored_argument_names: Expected argument names
:type ignored_argument_names: set
:param warning_node: The node to be analyzed
:type warning_node: :class:`astroid.scoped_nodes.Node`
"""
existing_ignored_argument_names = ignored_argument_names & found_argument_names
if existing_ignored_argument_names:
self.add_message(
message_id,
args=(", ".join(sorted(existing_ignored_argument_names)),),
node=warning_node,
)
def check_arguments_in_docstring(
self,
doc: Docstring,
arguments_node: astroid.Arguments,
warning_node: astroid.NodeNG,
accept_no_param_doc: Optional[bool] = None,
):
"""Check that all parameters are consistent with the parameters mentioned
in the parameter documentation (e.g. the Sphinx tags 'param' and 'type').
* Undocumented parameters except 'self' are noticed.
* Undocumented parameter types except for 'self' and the ``*<args>``
and ``**<kwargs>`` parameters are noticed.
* Parameters mentioned in the parameter documentation that don't or no
longer exist in the function parameter list are noticed.
* If the text "For the parameters, see" or "For the other parameters,
see" (ignoring additional whitespace) is mentioned in the docstring,
missing parameter documentation is tolerated.
* If there's no Sphinx style, Google style or NumPy style parameter
documentation at all, i.e. ``:param`` is never mentioned etc., the
checker assumes that the parameters are documented in another format
and the absence is tolerated.
:param doc: Docstring for the function, method or class.
:type doc: :class:`Docstring`
:param arguments_node: Arguments node for the function, method or
class constructor.
:type arguments_node: :class:`astroid.scoped_nodes.Arguments`
:param warning_node: The node to assign the warnings to
:type warning_node: :class:`astroid.scoped_nodes.Node`
:param accept_no_param_doc: Whether to allow no parameters to be
documented. If None then this value is read from the configuration.
:type accept_no_param_doc: bool or None
"""
# Tolerate missing param or type declarations if there is a link to
# another method carrying the same name.
if not doc.doc:
return
if accept_no_param_doc is None:
accept_no_param_doc = self.config.accept_no_param_doc
tolerate_missing_params = doc.params_documented_elsewhere()
# Collect the function arguments.
expected_argument_names = {arg.name for arg in arguments_node.args}
expected_argument_names.update(arg.name for arg in arguments_node.kwonlyargs)
not_needed_type_in_docstring = self.not_needed_param_in_docstring.copy()
expected_but_ignored_argument_names = set()
ignored_argument_names = get_global_option(self, "ignored-argument-names")
if ignored_argument_names:
expected_but_ignored_argument_names = {
arg
for arg in expected_argument_names
if ignored_argument_names.match(arg)
}
if arguments_node.vararg is not None:
expected_argument_names.add(f"*{arguments_node.vararg}")
not_needed_type_in_docstring.add(f"*{arguments_node.vararg}")
if arguments_node.kwarg is not None:
expected_argument_names.add(f"**{arguments_node.kwarg}")
not_needed_type_in_docstring.add(f"**{arguments_node.kwarg}")
params_with_doc, params_with_type = doc.match_param_docs()
# Tolerate no parameter documentation at all.
if not params_with_doc and not params_with_type and accept_no_param_doc:
tolerate_missing_params = True
# This is before the update of param_with_type because this must check only
# the type documented in a docstring, not the one using pep484
# See #4117 and #4593
self._compare_ignored_args(
params_with_type,
"useless-type-doc",
expected_but_ignored_argument_names,
warning_node,
)
for index, arg_name in enumerate(arguments_node.args):
if arguments_node.annotations[index]:
params_with_type.add(arg_name.name)
for index, arg_name in enumerate(arguments_node.kwonlyargs):
if arguments_node.kwonlyargs_annotations[index]:
params_with_type.add(arg_name.name)
if not tolerate_missing_params:
missing_param_doc = (expected_argument_names - params_with_doc) - (
self.not_needed_param_in_docstring | expected_but_ignored_argument_names
)
missing_type_doc = (expected_argument_names - params_with_type) - (
not_needed_type_in_docstring | expected_but_ignored_argument_names
)
if (
missing_param_doc == expected_argument_names == missing_type_doc
and len(expected_argument_names) != 0
):
self.add_message(
"missing-any-param-doc",
args=(warning_node.name,),
node=warning_node,
)
else:
self._compare_missing_args(
params_with_doc,
"missing-param-doc",
self.not_needed_param_in_docstring
| expected_but_ignored_argument_names,
expected_argument_names,
warning_node,
)
self._compare_missing_args(
params_with_type,
"missing-type-doc",
not_needed_type_in_docstring | expected_but_ignored_argument_names,
expected_argument_names,
warning_node,
)
self._compare_different_args(
params_with_doc,
"differing-param-doc",
self.not_needed_param_in_docstring,
expected_argument_names,
warning_node,
)
self._compare_different_args(
params_with_type,
"differing-type-doc",
not_needed_type_in_docstring,
expected_argument_names,
warning_node,
)
self._compare_ignored_args(
params_with_doc,
"useless-param-doc",
expected_but_ignored_argument_names,
warning_node,
)
def check_single_constructor_params(self, class_doc, init_doc, class_node):
if class_doc.has_params() and init_doc.has_params():
self.add_message(
"multiple-constructor-doc", args=(class_node.name,), node=class_node
)
def _handle_no_raise_doc(self, excs, node):
if self.config.accept_no_raise_doc:
return
self._add_raise_message(excs, node)
def _add_raise_message(self, missing_excs, node):
"""Adds a message on :param:`node` for the missing exception type.
:param missing_excs: A list of missing exception types.
:type missing_excs: set(str)
:param node: The node show the message on.
:type node: nodes.NodeNG
"""
if node.is_abstract():
try:
missing_excs.remove("NotImplementedError")
except KeyError:
pass
if not missing_excs:
return
self.add_message(
"missing-raises-doc", args=(", ".join(sorted(missing_excs)),), node=node
)
def register(linter: "PyLinter") -> None:
linter.register_checker(DocstringParameterChecker(linter))
| 37.608696 | 88 | 0.607473 |
f8ea783e3240a057ea661e49b47d664b9ffe5b78 | 475 | py | Python | 346/solution.py | wizh/euler | 604e8776b984ddf00669d9c29e232b6ef164d28e | [
"MIT"
] | null | null | null | 346/solution.py | wizh/euler | 604e8776b984ddf00669d9c29e232b6ef164d28e | [
"MIT"
] | null | null | null | 346/solution.py | wizh/euler | 604e8776b984ddf00669d9c29e232b6ef164d28e | [
"MIT"
] | null | null | null | def to_base(n, base):
digits = []
while n > 0:
digits.insert(0, n % base)
n = n // base
return digits
def repunit(digits):
for d in digits:
if d != 1:
return False
return True
def main(n):
ret = 1
for i in range(2, n):
reps = 0
for j in range(2, i):
reps += repunit(to_base(i, j))
if reps > 1:
ret += i
break
return ret
main(10**12) | 19 | 42 | 0.448421 |
16356123c475b171cfc258b6a72c99a35aff4ced | 1,776 | py | Python | tl5.py | YmerejRedienhcs/ws2811 | d4299cfc8577eebcd23a73bacbfbafbde439711a | [
"Unlicense"
] | 1 | 2021-06-22T16:42:59.000Z | 2021-06-22T16:42:59.000Z | tl5.py | YmerejRedienhcs/ws2811 | d4299cfc8577eebcd23a73bacbfbafbde439711a | [
"Unlicense"
] | null | null | null | tl5.py | YmerejRedienhcs/ws2811 | d4299cfc8577eebcd23a73bacbfbafbde439711a | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
import sys
import board
import neopixel
import time
import random
num_lights = 200
#num_lights = int(sys.argv[1])
seg_length = 100
black = (0, 0, 0)
white = (255, 255, 255)
# program 50 lights with the default brightness 1.0, and autoWrite true
pixels = neopixel.NeoPixel(board.D18, num_lights)
# light 20 bright green
#pixels[19] = (0,255,0)
# light all pixels red
#pixels.fill((255,0,0))
# turn off neopixels
pixels.fill(black)
colors = [
(000, 000, 000),
(255, 000, 000),
(000, 255, 000),
(000, 000, 255),
(000, 255, 255),
(255, 000, 255),
(255, 255, 000),
(255, 255, 255)]
def randomColor(x):
if (x % 2 == 0):
return colors[random.randint(4,6)]
#return black
# return (random.randint(0,127),random.randint(0,127),random.randint(0,127))
return (random.randint(0,255),random.randint(0,255),random.randint(0,255))
# return colors[random.randint(4,6)]
def slowOn(x):
c = randomColor(x)
delay = .020
for y in range(256):
#print(f'y is {y}')
pc = float(y+1) / 256.0
print(f'pc is {pc}')
#c2 = (int(pc * c[0]), int(pc * c[1]), int(pc * c[2]))
c2 = (pc * c[0], pc * c[1], pc * c[2])
#print(f'c2 is {c2}')
time.sleep(delay * (1-pc))
pixels[x] = c2
#slowOn(2)
#exit()
delay = 0.035
while True:
for x in range(num_lights+seg_length):
# print(f'setting light {x} to a color')
if (x < num_lights):
pixels[x] = randomColor(x)
# print(f'x is {x}')
if ((x >= num_lights) or (x >= seg_length)):
# print(f'setting light {x-seg_length} to black')
pixels[x-seg_length] = black
time.sleep(delay)
time.sleep(1)
pixels.fill((0,0,0))
| 25.371429 | 80 | 0.569257 |
1acc90db4db8b6995d465ea563a66688e3329156 | 629 | py | Python | RPESystem/manage.py | YanTszyafen/RPESystem | 8ddf8eb5d7a159c0146cc5a7215ff4b8e91ae62d | [
"MIT"
] | null | null | null | RPESystem/manage.py | YanTszyafen/RPESystem | 8ddf8eb5d7a159c0146cc5a7215ff4b8e91ae62d | [
"MIT"
] | null | null | null | RPESystem/manage.py | YanTszyafen/RPESystem | 8ddf8eb5d7a159c0146cc5a7215ff4b8e91ae62d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'RPESystem.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.590909 | 73 | 0.683625 |
26a192e500016acc0de1b4694c5632d2f4c8513a | 5,850 | py | Python | models/datastructures.py | dtu-act/pinn-acoustic-wave-prop | 2fbc7e39499e28264396669999b8be8f86f01082 | [
"MIT"
] | 7 | 2021-11-05T21:48:44.000Z | 2022-01-28T14:52:14.000Z | models/datastructures.py | dtu-act/pinn-acoustic-wave-prop | 2fbc7e39499e28264396669999b8be8f86f01082 | [
"MIT"
] | null | null | null | models/datastructures.py | dtu-act/pinn-acoustic-wave-prop | 2fbc7e39499e28264396669999b8be8f86f01082 | [
"MIT"
] | 1 | 2021-11-21T01:43:17.000Z | 2021-11-21T01:43:17.000Z | # ==============================================================================
# Copyright 2021 Technical University of Denmark
# Author: Nikolas Borrel-Jensen
#
# All Rights Reserved.
#
# Licensed under the MIT License.
# ==============================================================================
from collections import namedtuple
from dataclasses import dataclass
from enum import Enum
import os
from pathlib import Path
import shutil
from typing import Callable, List
import models.sources as sources
import numpy as np
SciannFunctionals = namedtuple('target_indexes',['x','y','t','x0','p','v'])
Accumulators = namedtuple('accumulators',['phi','psi0','psi1'])
class BoundaryType(Enum):
DIRICHLET = 1
NEUMANN = 2
IMPEDANCE_FREQ_DEP = 3
IMPEDANCE_FREQ_INDEP = 4
class SourceType(Enum):
IC = 1
INJECTION = 2
class LossType(Enum):
DATA = 'data_loss'
PINN = 'pinn_loss'
PINN_DATA = 'pinn_data_loss'
@dataclass
class LossPenalties:
pde: float
bc: float
data: float
ic: float = None
ade: float = None
@dataclass
class SourceInfo:
type: SourceType
mu: float = 0
sigma0: float = None
source: Callable = None
def __init__(self, type, sigma0: float, spatial_dim: int):
self.type = type
self.sigma0 = sigma0
self.source = sources.sciann_gaussianIC(sigma0, spatial_dim)
@dataclass
class FrequencyDependentImpedance:
Yinf: float
A: List[float]
B: List[float]
C: List[float]
lambdas: List[float]
alpha: List[float]
beta: List[float]
@dataclass
class BoundaryCondition:
type: BoundaryType
xi: float = None # specific acoustic impedance
p: float = None # pressure at boundary
v: float = None # velocity at boundary
impedance_data: FrequencyDependentImpedance = None
def __init__(self, type, p: float=None, v: float=None, impedance_data=None, xi: float=None):
self.type = type
if type == BoundaryType.DIRICHLET:
if p == None:
raise Exception('p not set')
self.p = p
elif type == BoundaryType.NEUMANN:
if v == None:
raise Exception('v not set')
self.v = v
elif type == BoundaryType.IMPEDANCE_FREQ_INDEP:
if xi == None:
raise Exception('xi not set')
self.xi = xi
elif type == BoundaryType.IMPEDANCE_FREQ_DEP:
if impedance_data == None:
raise Exception('impedance_data not set')
self.impedance_data = impedance_data
else:
raise NotImplementedError()
@dataclass
class InputOutputDirs:
id: str
id_dir: str
figs_dir: str
models_dir: str
transfer_models_dir: str
plot_graph_path: str
data_dir: str
plot_graph_path: str
data_path: str
def __init__(self,settings_dict,base_dir=None):
if base_dir == None:
base_dir = settings_dict['base_dir']
self.id = settings_dict['id']
self.id_dir = os.path.join(base_dir, "results", self.id)
self.figs_dir = os.path.join(self.id_dir, "figs")
self.models_dir = os.path.join(self.id_dir, "models")
self.data_dir = os.path.join(base_dir, "reference_data")
self.transfer_models_dir = os.path.join(base_dir, "trained_models")
self.data_path = os.path.join(self.data_dir, settings_dict['data_filename'])
self.plot_graph_path = os.path.join(self.models_dir, f'{LossType.PINN}', 'network.png')
def createDirs(self, delete_existing=False):
if delete_existing and Path(self.id_dir).exists():
shutil.rmtree(self.id_dir)
Path(self.figs_dir).mkdir(parents=True, exist_ok=True)
Path(self.models_dir).mkdir(parents=True, exist_ok=True)
@dataclass
class TransferLearning:
boundary_cond: BoundaryCondition
model_dir: str
trainable: bool
@dataclass
class Physics:
sigma0: float
fmax: float
c: float
c_phys: float
rho: float
@dataclass
class Domain:
boundary_cond: BoundaryCondition
spatial_dimension: int
Xbounds: List[List[float]]
tmax: float
ppw: float
dt: float
dx: float
nX: List[List[int]]
nt: int
source: SourceInfo
x0_sources: List[List[float]]
ic_points_p: float
bc_points_p: float
def __init__(self, Xbounds, tmax, ppw, dt, dx, boundary_cond, sigma0, x0_sources, ic_points_p, bc_points_p):
assert(len(Xbounds[0]) == len(Xbounds[1]))
if len(Xbounds) > 2:
raise NotImplementedError()
self.spatial_dimension = np.asarray(Xbounds).shape[1]
self.Xbounds = Xbounds
self.tmax = tmax
self.ppw = ppw
self.dt = dt
self.dx = dx
self.boundary_cond = boundary_cond
self.source = SourceInfo(SourceType.IC, sigma0, self.spatial_dimension)
self.x0_sources = x0_sources
self.ic_points_p = ic_points_p
self.bc_points_p = bc_points_p
self.nX = ((np.asarray(Xbounds[1])-np.asarray(Xbounds[0]))/dx).astype(int) # number of spatial points
self.nt = int(tmax/dt) # number of temporal steps
@property
def num_sources(self) -> int:
return len(self.x0_sources)
@dataclass
class ADENeuralNetwork:
activation: str
num_layers: int
num_neurons: int
accumulator_norm: List[float] # renamed from accumulator_factors
weights: LossPenalties
@dataclass
class PressureNeuralNetwork:
activation: str
num_layers: int
num_neurons: int
weights: LossPenalties
@dataclass
class NetworkSettings:
epochs: int
stop_loss_value: float
batch_size: int
learning_rate: float
optimizer: str
p_nn: PressureNeuralNetwork
ade_nn: ADENeuralNetwork | 27.209302 | 112 | 0.63094 |
54817627e9c4aba5e469f3d51adc6cd75c343ed3 | 1,652 | py | Python | test/unit/mysql_class/flush_logs.py | mjpernot/mysql-lib | aabc0c3b3120c0ec5344dc460092d830e796d43c | [
"MIT"
] | null | null | null | test/unit/mysql_class/flush_logs.py | mjpernot/mysql-lib | aabc0c3b3120c0ec5344dc460092d830e796d43c | [
"MIT"
] | null | null | null | test/unit/mysql_class/flush_logs.py | mjpernot/mysql-lib | aabc0c3b3120c0ec5344dc460092d830e796d43c | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Classification (U)
"""Program: flush_logs.py
Description: Unit testing of flush_logs in mysql_class.py.
Usage:
test/unit/mysql_class/flush_logs.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mysql_class
import version
__version__ = version.__version__
class Server(object):
"""Class: Server
Description: Class stub holder for Server class.
Methods:
__init__
sql
"""
def __init__(self):
"""Method: __init__
Description: Class initialization.
Arguments:
"""
self.cmd = None
def cmd_sql(self, cmd):
"""Method: cmd_sql
Description: Stub holder for Server.cmd_sql method.
Arguments:
(input) cmd
"""
self.cmd = cmd
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_flush_logs
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.server = Server()
def test_flush_logs(self):
"""Function: test_flush_logs
Description: Test flush_logs function.
Arguments:
"""
self.assertFalse(mysql_class.flush_logs(self.server))
if __name__ == "__main__":
unittest.main()
| 14.365217 | 68 | 0.603511 |
dbadb30b326bcd3c4b1dbcc973937be2d5e80593 | 4,836 | py | Python | doc/summarize.py | tovrstra/numpy | bb5d666e84e2eb294543a67c6143d7e9124d1c73 | [
"BSD-3-Clause"
] | 15 | 2015-01-24T09:16:17.000Z | 2021-12-19T10:41:07.000Z | doc/summarize.py | tovrstra/numpy | bb5d666e84e2eb294543a67c6143d7e9124d1c73 | [
"BSD-3-Clause"
] | 2 | 2019-07-19T16:30:31.000Z | 2019-07-19T19:17:13.000Z | doc/summarize.py | tacaswell/numpy | 1147490663d36b05fad8dcce1e104601c2724560 | [
"BSD-3-Clause"
] | 5 | 2017-01-31T21:28:01.000Z | 2021-02-23T06:38:32.000Z | #!/usr/bin/env python
"""
summarize.py
Show a summary about which NumPy functions are documented and which are not.
"""
from __future__ import division, absolute_import, print_function
import os, glob, re, sys, inspect, optparse
import collections
sys.path.append(os.path.join(os.path.dirname(__file__), 'sphinxext'))
from sphinxext.phantom_import import import_phantom_module
from sphinxext.autosummary_generate import get_documented
CUR_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(CUR_DIR, 'source', 'reference')
SKIP_LIST = """
# --- aliases:
alltrue sometrue bitwise_not cumproduct
row_stack column_stack product rank
# -- skipped:
core lib f2py dual doc emath ma rec char distutils oldnumeric numarray
testing version matlib
add_docstring add_newdoc add_newdocs fastCopyAndTranspose pkgload
conjugate disp
int0 object0 unicode0 uint0 string_ string0 void0
flagsobj
setup PackageLoader
lib.scimath.arccos lib.scimath.arcsin lib.scimath.arccosh lib.scimath.arcsinh
lib.scimath.arctanh lib.scimath.log lib.scimath.log2 lib.scimath.log10
lib.scimath.logn lib.scimath.power lib.scimath.sqrt
# --- numpy.random:
random random.info random.mtrand random.ranf random.sample random.random
# --- numpy.fft:
fft fft.Tester fft.bench fft.fftpack fft.fftpack_lite fft.helper
fft.info fft.test
# --- numpy.linalg:
linalg linalg.Tester
linalg.bench linalg.info linalg.lapack_lite linalg.linalg linalg.test
# --- numpy.ctypeslib:
ctypeslib ctypeslib.test
""".split()
def main():
p = optparse.OptionParser(__doc__)
p.add_option("-c", "--columns", action="store", type="int", dest="cols",
default=3, help="Maximum number of columns")
options, args = p.parse_args()
if len(args) != 0:
p.error('Wrong number of arguments')
# prepare
fn = os.path.join(CUR_DIR, 'dump.xml')
if os.path.isfile(fn):
import_phantom_module(fn)
# check
documented, undocumented = check_numpy()
# report
in_sections = {}
for name, locations in documented.items():
for (filename, section, keyword, toctree) in locations:
in_sections.setdefault((filename, section, keyword), []).append(name)
print("Documented")
print("==========\n")
last_filename = None
for (filename, section, keyword), names in sorted(in_sections.items()):
if filename != last_filename:
print("--- %s\n" % filename)
last_filename = filename
print(" ** ", section)
print(format_in_columns(sorted(names), options.cols))
print("\n")
print("")
print("Undocumented")
print("============\n")
print(format_in_columns(sorted(undocumented.keys()), options.cols))
def check_numpy():
documented = get_documented(glob.glob(SOURCE_DIR + '/*.rst'))
undocumented = {}
import numpy, numpy.fft, numpy.linalg, numpy.random
for mod in [numpy, numpy.fft, numpy.linalg, numpy.random,
numpy.ctypeslib, numpy.emath, numpy.ma]:
undocumented.update(get_undocumented(documented, mod, skip=SKIP_LIST))
for d in (documented, undocumented):
for k in d.keys():
if k.startswith('numpy.'):
d[k[6:]] = d[k]
del d[k]
return documented, undocumented
def get_undocumented(documented, module, module_name=None, skip=[]):
"""
Find out which items in NumPy are not documented.
Returns
-------
undocumented : dict of bool
Dictionary containing True for each documented item name
and False for each undocumented one.
"""
undocumented = {}
if module_name is None:
module_name = module.__name__
for name in dir(module):
obj = getattr(module, name)
if name.startswith('_'): continue
full_name = '.'.join([module_name, name])
if full_name in skip: continue
if full_name.startswith('numpy.') and full_name[6:] in skip: continue
if not (inspect.ismodule(obj) or isinstance(obj, collections.Callable) or inspect.isclass(obj)):
continue
if full_name not in documented:
undocumented[full_name] = True
return undocumented
def format_in_columns(lst, max_columns):
"""
Format a list containing strings to a string containing the items
in columns.
"""
lst = [str(_m) for _m in lst]
col_len = max([len(_m) for _m in lst]) + 2
ncols = 80//col_len
if ncols > max_columns:
ncols = max_columns
if ncols <= 0:
ncols = 1
if len(lst) % ncols == 0:
nrows = len(lst)//ncols
else:
nrows = 1 + len(lst)//ncols
fmt = ' %%-%ds ' % (col_len-2)
lines = []
for n in range(nrows):
lines.append("".join([fmt % x for x in lst[n::nrows]]))
return "\n".join(lines)
if __name__ == "__main__": main()
| 27.953757 | 104 | 0.660256 |
fd594a0c0467c362e46686a8f22ee532c768f504 | 508 | py | Python | tools/sapp/sapp/iterutil.py | s-pace/pyre-check | 2b71dcf22e4672567cfe0dfef356f11646d66244 | [
"MIT"
] | 5 | 2019-02-14T19:46:47.000Z | 2020-01-16T05:48:45.000Z | tools/sapp/sapp/iterutil.py | s-pace/pyre-check | 2b71dcf22e4672567cfe0dfef356f11646d66244 | [
"MIT"
] | 4 | 2022-02-15T02:42:33.000Z | 2022-02-28T01:30:07.000Z | tools/sapp/sapp/iterutil.py | s-pace/pyre-check | 2b71dcf22e4672567cfe0dfef356f11646d66244 | [
"MIT"
] | 2 | 2019-02-14T19:46:23.000Z | 2020-07-13T03:53:04.000Z | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
def split_every(n, iterable):
"""Yields batches of size 'n' from an iterable:
list(split_every(2, range(10))) => [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
"""
i = iter(iterable)
piece = list(itertools.islice(i, n))
while piece:
yield piece
piece = list(itertools.islice(i, n))
| 26.736842 | 79 | 0.627953 |
5ae8dd47559ca261ea3a1dd3c3d8c861d48df99b | 403 | py | Python | src/format_string_name_hashes.py | famavott/codewars-katas | fdc5574bcd13adc194975ac94ab90c59ee3aa398 | [
"MIT"
] | null | null | null | src/format_string_name_hashes.py | famavott/codewars-katas | fdc5574bcd13adc194975ac94ab90c59ee3aa398 | [
"MIT"
] | null | null | null | src/format_string_name_hashes.py | famavott/codewars-katas | fdc5574bcd13adc194975ac94ab90c59ee3aa398 | [
"MIT"
] | null | null | null | """Format a string of names from list of dicts."""
def namelist(names):
if len(names) == 0:
return ''
elif len(names) == 1:
return names[0]['name']
elif len(names) == 2:
return names[0]['name'] + ' & ' + names[1]['name']
else:
name_list = [x['name'] for x in names]
names = ', '.join(name_list[:-1])
return names + ' & ' + name_list[-1]
| 26.866667 | 58 | 0.508685 |
fbf02148f28b648d5916639b651c898e3294fa83 | 3,131 | py | Python | setup.py | risto-trajanov/nevergrad | 8c123bd5911debc4840c1683112251cee0cf6121 | [
"MIT"
] | 3,217 | 2018-12-20T05:41:46.000Z | 2022-03-31T10:22:54.000Z | setup.py | risto-trajanov/nevergrad | 8c123bd5911debc4840c1683112251cee0cf6121 | [
"MIT"
] | 590 | 2018-12-20T21:03:38.000Z | 2022-03-31T04:38:45.000Z | setup.py | risto-trajanov/nevergrad | 8c123bd5911debc4840c1683112251cee0cf6121 | [
"MIT"
] | 333 | 2018-12-20T08:38:03.000Z | 2022-03-28T06:23:53.000Z | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import os
import sys
import typing as tp
from pathlib import Path
from setuptools import setup
from setuptools import find_packages
from setuptools.command.install import install
# read requirements
requirements: tp.Dict[str, tp.List[str]] = {}
for extra in ["dev", "bench", "main"]:
requirements[extra] = Path(f"requirements/{extra}.txt").read_text().splitlines()
# build long description
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# find version
init_str = Path("nevergrad/__init__.py").read_text()
match = re.search(r"^__version__ = \"(?P<version>[\w\.]+?)\"$", init_str, re.MULTILINE)
assert match is not None, "Could not find version in nevergrad/__init__.py"
version = match.group("version")
def _replace_relative_links(regex: tp.Match[str]) -> str:
"""Converts relative links into links to version
so that links on Pypi long description are correct
"""
string = regex.group()
link = regex.group("link")
name = regex.group("name")
if not link.startswith("http") and Path(link).exists():
githuburl = (
f"github.com/facebookresearch/nevergrad/blob/{version}"
if not link.endswith((".png", ".gif"))
else f"raw.githubusercontent.com/facebookresearch/nevergrad/{version}"
)
string = f"[{name}](https://{githuburl}/{link})"
return string
pattern = re.compile(r"\[(?P<name>.+?)\]\((?P<link>\S+?)\)")
long_description = re.sub(pattern, _replace_relative_links, long_description)
class VerifyCircleCiVersionCommand(install): # type: ignore
"""Custom command to verify that the git tag matches CircleCI version"""
description = "verify that the git tag matches CircleCI version"
def run(self) -> None:
tag = os.getenv("CIRCLE_TAG")
if tag != version:
info = f"Git tag: {tag} does not match the version of this app: {version}"
sys.exit(info)
# setup
setup(
name="nevergrad",
version=version,
license="MIT",
description="A Python toolbox for performing gradient-free optimization",
long_description=long_description,
long_description_content_type="text/markdown",
author="Facebook AI Research",
url="https://github.com/facebookresearch/nevergrad",
packages=find_packages(),
classifiers=[
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Programming Language :: Python",
],
install_requires=requirements["main"],
extras_require={
"all": requirements["dev"] + requirements["bench"],
"dev": requirements["dev"],
"benchmark": requirements["bench"],
},
package_data={"nevergrad": ["py.typed", "*.csv", "*.py"]},
python_requires=">=3.6",
cmdclass={"verify_circleci_version": VerifyCircleCiVersionCommand},
)
| 32.278351 | 87 | 0.671351 |
f72dc3ca8aa596b8bc7e93a42cd965a4678d9bb0 | 1,626 | py | Python | rates_classify/rdf.py | xhades/rates_classify | 225627dad22c162023bc6b5e4d8f5881c5a6f354 | [
"MIT"
] | 7 | 2017-12-23T05:34:01.000Z | 2021-01-03T10:10:03.000Z | rates_classify/rdf.py | xhades/rates_classify | 225627dad22c162023bc6b5e4d8f5881c5a6f354 | [
"MIT"
] | null | null | null | rates_classify/rdf.py | xhades/rates_classify | 225627dad22c162023bc6b5e4d8f5881c5a6f354 | [
"MIT"
] | 3 | 2019-05-23T20:15:44.000Z | 2020-01-14T07:27:58.000Z | # !/usr/bin/env python
# -*-coding:utf-8-*-
"""
@author: xhades
@Date: 2017/12/28
"""
# 随机森林分类器
import numpy as np
from numpy import *
from numpy import array, argmax
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pickle
from sklearn.ensemble import RandomForestClassifier as RDF
np.set_printoptions(threshold=np.inf)
# 训练集测试集 3/7分割
def train(xFile, yFile):
with open(xFile, "rb") as file_r:
X = pickle.load(file_r)
X = reshape(X, (212841, -1)) # reshape一下 (212841, 30*128)
# 读取label数据,并且encodig
with open(yFile, "r") as yFile_r:
labelLines = [_.strip("\n") for _ in yFile_r.readlines()]
values = array(labelLines)
labelEncoder = LabelEncoder()
integerEncoded = labelEncoder.fit_transform(values)
integerEncoded = integerEncoded.reshape(len(integerEncoded), 1)
# print(integerEncoded)
# 获得label 编码
Y = integerEncoded.reshape(212841, )
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
# 随机森林分类器
clf = RDF(criterion="gini")
# criterion 可以使用"gini"或者"entropy",前者代表基尼系数,后者代表信息增益。一般说使用默认的基尼系数"gini"就可以了,即CART算法。除非你更喜欢类似ID3, C4.5的最优特征选择方法。
clf.fit(X_train, Y_train)
# 测试数据
predict = clf.predict(X_test)
count = 0
for p, t in zip(predict, Y_test):
if p == t:
count += 1
print("RandomForest Accuracy is:", count/len(Y_test))
if __name__ == "__main__":
xFile = "Res/char_embedded.pkl"
yFile = "data/label.txt"
print("Start Training.....")
train(xFile, yFile)
print("End.....")
| 25.015385 | 114 | 0.673432 |
771a986085f463cd264825425978c3cacadeee83 | 2,819 | py | Python | 3D_Graphics_Shapes/3D_render.py | Wason1797/Fun-Python | 1432aec98423f13cc228c34c53bdb19ba4efe1da | [
"MIT"
] | 11 | 2018-11-21T19:34:48.000Z | 2019-01-13T04:30:44.000Z | 3D_Graphics_Shapes/3D_render.py | Wason1797/Fun-Python | 1432aec98423f13cc228c34c53bdb19ba4efe1da | [
"MIT"
] | null | null | null | 3D_Graphics_Shapes/3D_render.py | Wason1797/Fun-Python | 1432aec98423f13cc228c34c53bdb19ba4efe1da | [
"MIT"
] | null | null | null | import numpy as np
import math as mt
import pygame
import sys
from pygame.locals import *
pygame.init()
# set up the window
windowSurface = pygame.display.set_mode((500, 500), 0, 32)
pygame.display.set_caption('Cube Rotation')
# Set up the colors
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
WHITE = (255, 255, 255)
projection_matrix = np.array([
[1, 0, 0],
[0, 1, 0]])
def rotation_matrix_z(_angle):
return np.array([
[mt.cos(_angle), -mt.sin(_angle), 0],
[mt.sin(_angle), mt.cos(_angle), 0],
[0, 0, 1]
])
def rotation_matrix_y(_angle):
return np.array([
[mt.cos(_angle), 0, mt.sin(_angle)],
[0, 1, 0],
[-mt.sin(_angle), 0, mt.cos(_angle)]
])
def rotation_matrix_x(_angle):
return np.array([
[1, 0, 0],
[0, mt.cos(_angle), -mt.sin(_angle)],
[0, mt.sin(_angle), mt.cos(_angle)]
])
points = [np.array([-50, -50, -50]),
np.array([50, -50, -50]),
np.array([50, 50, -50]),
np.array([-50, 50, -50]),
np.array([-50, -50, 50]),
np.array([50, -50, 50]),
np.array([50, 50, 50]),
np.array([-50, 50, 50])]
def translate(_coord):
_with, _height = windowSurface.get_size()
return _coord[0]+_with//2, _coord[1]+_height//2
def connect_points(x1, y1, x2, y2, _stroke):
pygame.draw.line(windowSurface, BLUE, translate(
(int(x1), int(y1))), translate((int(x2), int(y2))), _stroke)
rotate = mt.radians(0)
projected_points = []
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
for vector in points:
rotated_2d = np.matmul(rotation_matrix_x(rotate), vector)
rotated_2d = np.matmul(rotation_matrix_y(rotate), rotated_2d)
rotated_2d = np.matmul(rotation_matrix_z(rotate), rotated_2d)
projected_2d = np.matmul(projection_matrix, rotated_2d)
projected_points.append(projected_2d)
pygame.draw.circle(windowSurface, RED, translate(
(int(projected_2d[0]), int(projected_2d[1]))), 5)
for j in range(4):
start = projected_points[j]
end = projected_points[(j + 1) % 4]
connect_points(start[0], start[1], end[0], end[1], 1)
start = projected_points[j + 4]
end = projected_points[((j + 1) % 4) + 4]
connect_points(start[0], start[1], end[0], end[1], 1)
start = projected_points[j]
end = projected_points[j + 4]
connect_points(start[0], start[1], end[0], end[1], 1)
pygame.display.update()
projected_points.clear()
rotate += mt.radians(1)
pygame.time.wait(40)
windowSurface.fill(BLACK)
| 26.847619 | 69 | 0.566513 |
adc0b415e4a9da1fdb2221eb862525e00f803105 | 2,602 | py | Python | tests/brightway_fixtures.py | kais-siala/wurst | 448dd4e9e0bfbde956c2913222222509ff2b14e1 | [
"BSD-2-Clause"
] | null | null | null | tests/brightway_fixtures.py | kais-siala/wurst | 448dd4e9e0bfbde956c2913222222509ff2b14e1 | [
"BSD-2-Clause"
] | null | null | null | tests/brightway_fixtures.py | kais-siala/wurst | 448dd4e9e0bfbde956c2913222222509ff2b14e1 | [
"BSD-2-Clause"
] | null | null | null | try:
from bw2data.tests import bw2test
from bw2data import Database
import pytest
biosphere = {
("biosphere", "1"): {
"categories": ["things"],
"code": "1",
"exchanges": [],
"reference product": "find me!",
"name": "an emission",
"type": "emission",
"unit": "kg",
},
("biosphere", "2"): {
"categories": ["things"],
"code": "2",
"exchanges": [],
"type": "emission",
"name": "another emission",
"unit": "kg",
},
}
food = {
("food", "1"): {
"categories": ["stuff", "meals"],
"code": "1",
"classifications": [42],
"comment": "Yep",
"reference product": "stuff",
"exchanges": [
{
"amount": 0.5,
"input": ("food", "2"),
"type": "technosphere",
"production volume": 13,
},
{
"amount": 0.05,
"input": ("biosphere", "1"),
"type": "biosphere",
"uncertainty type": 4,
},
],
"location": "CA",
"name": "lunch",
"type": "process",
"unit": "kg",
"parameters": {"losses_gross_net": {"amount": 0.01}},
},
("food", "2"): {
"categories": ["stuff", "meals"],
"code": "2",
"exchanges": [
{
"amount": 0.25,
"input": ("food", "1"),
"type": "technosphere",
"uncertainty type": 0,
},
{
"amount": 0.15,
"input": ("biosphere", "2"),
"type": "biosphere",
"uncertainty type": 0,
},
],
"location": "CH",
"name": "dinner",
"type": "process",
"unit": "kg",
"parameters": [
{
"name": "rara",
"amount": 13,
"something": "else",
}
],
},
}
@pytest.fixture(scope="function")
@bw2test
def test_bw2_database():
d = Database("biosphere")
d.write(biosphere)
d = Database("food")
d.write(food)
except ImportError:
test_bw2_database = None
| 27.389474 | 65 | 0.33897 |
936c41369733236368b67a282a19742d8a255def | 146 | py | Python | myapp.py | maxvol/Streamlit | f18c5e978040cb0df07cd68c27ce6239a4ecad44 | [
"Unlicense"
] | null | null | null | myapp.py | maxvol/Streamlit | f18c5e978040cb0df07cd68c27ce6239a4ecad44 | [
"Unlicense"
] | null | null | null | myapp.py | maxvol/Streamlit | f18c5e978040cb0df07cd68c27ce6239a4ecad44 | [
"Unlicense"
] | null | null | null | import streamlit as st
import pandas as pd
st.write("""
# My first app
Hello *world*!
""")
df = pd.read_csv("timeseries.csv")
st.line_chart(df)
| 13.272727 | 34 | 0.691781 |
16584b01db7547b590a6ee9934aef711137dc35a | 1,074 | py | Python | HP Code Wars Documents/2014/Solutions/prob08_Nultimate.py | p473lr/i-urge-mafia-gear | ae19efb1af2e85ed8bcbbcc3d12ae0f024f3565e | [
"Apache-2.0"
] | null | null | null | HP Code Wars Documents/2014/Solutions/prob08_Nultimate.py | p473lr/i-urge-mafia-gear | ae19efb1af2e85ed8bcbbcc3d12ae0f024f3565e | [
"Apache-2.0"
] | null | null | null | HP Code Wars Documents/2014/Solutions/prob08_Nultimate.py | p473lr/i-urge-mafia-gear | ae19efb1af2e85ed8bcbbcc3d12ae0f024f3565e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#CodeWars 2014
#
# N-ultimate Element
#The last element in a series may be called the ultimate element.
# The penultimate element is next-to last. So, by extension,
# the N-ultimate element is the Nth element from the end.
#
#Write a program to find the N-ultimate element in a series.
#
#
#Input
#
#Each line of input starts with an integer N, followed by a series of N
# or more words/numbers/strings, terminated with a $ symbol.
# The input ends with the number zero and a $ symbol.
#
#4 PROXIMATE DISTANT EXTREME FARTHEST ULTIMATE $
#6 999 0 426 123 1337 31415 1414 5 321 $
#2 WHO WHAT WHEN WHERE WHY HOW $
#3 RED GREEN BLUE YELLOW ORANGE PURPLE BLACK WHITE $
#7 GARCIA WANG ZHANG LI SMITH MULLER GONZALEZ SMIRNOV NGUYEN HERNANDEZ $
#0 $
#
#
#Output
#
#For each line of input the program must print the N-ultimate word.
#
#DISTANT
#123
#WHY
#PURPLE
# LI
#
import sys
print ("Enter N, words, $. 0 to end.")
for line in sys.stdin:
words = line.split()
N = int(words[0])
if (N==0):
break
print (words[len(words)-N-1])
| 22.375 | 72 | 0.698324 |
6a73a7344b95c102ab36808ae36c36c69bf53bd1 | 2,933 | py | Python | lldb/examples/summaries/cocoa/metrics.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | 427 | 2018-05-29T14:21:02.000Z | 2022-03-16T03:17:54.000Z | SymbolExtractorAndRenamer/lldb/examples/summaries/cocoa/metrics.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
] | 25 | 2018-07-23T08:34:15.000Z | 2021-11-05T07:13:36.000Z | SymbolExtractorAndRenamer/lldb/examples/summaries/cocoa/metrics.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
] | 52 | 2018-07-19T19:57:32.000Z | 2022-03-11T16:05:38.000Z | """
Objective-C runtime wrapper for use by LLDB Python formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
import lldb
import time
import datetime
import inspect
class TimeMetrics:
@staticmethod
def generate(label=None):
return TimeMetrics(label)
def __init__(self, lbl=None):
self.label = "" if lbl is None else lbl
pass
def __enter__(self):
caller = inspect.stack()[1]
self.function = str(caller)
self.enter_time = time.clock()
def __exit__(self, a, b, c):
self.exit_time = time.clock()
print("It took " + str(self.exit_time - self.enter_time) +
" time units to run through " + self.function + self.label)
return False
class Counter:
def __init__(self):
self.count = 0
self.list = []
def update(self, name):
self.count = self.count + 1
# avoid getting the full dump of this ValueObject just to save its
# metrics
if isinstance(name, lldb.SBValue):
self.list.append(name.GetName())
else:
self.list.append(str(name))
def __str__(self):
return str(self.count) + " times, for items [" + str(self.list) + "]"
class MetricsPrinter_Verbose:
def __init__(self, metrics):
self.metrics = metrics
def __str__(self):
string = ""
for key, value in self.metrics.metrics.items():
string = string + "metric " + str(key) + ": " + str(value) + "\n"
return string
class MetricsPrinter_Compact:
def __init__(self, metrics):
self.metrics = metrics
def __str__(self):
string = ""
for key, value in self.metrics.metrics.items():
string = string + "metric " + \
str(key) + " was hit " + str(value.count) + " times\n"
return string
class Metrics:
def __init__(self):
self.metrics = {}
def add_metric(self, name):
self.metrics[name] = Counter()
def metric_hit(self, metric, trigger):
self.metrics[metric].update(trigger)
def __getitem__(self, key):
return self.metrics[key]
def __getattr__(self, name):
if name == 'compact':
return MetricsPrinter_Compact(self)
if name == 'verbose':
return MetricsPrinter_Verbose(self)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, name))
def __str__(self):
return str(self.verbose)
def metric_success(self, metric):
total_count = 0
metric_count = self[metric].count
for key, value in self.metrics.items():
total_count = total_count + value.count
if total_count > 0:
return metric_count / float(total_count)
return 0
| 25.955752 | 77 | 0.598704 |
977d7c3089d5bdef035080fd1585c21e7b5e0e11 | 2,324 | py | Python | analysis_interaction_on_elements.py | cetceeve/Abschlussarbeit-Log-Data-Analysis | 16cb272329c25a6fd5b51d9bb8bd4eecf0fe9487 | [
"MIT"
] | null | null | null | analysis_interaction_on_elements.py | cetceeve/Abschlussarbeit-Log-Data-Analysis | 16cb272329c25a6fd5b51d9bb8bd4eecf0fe9487 | [
"MIT"
] | null | null | null | analysis_interaction_on_elements.py | cetceeve/Abschlussarbeit-Log-Data-Analysis | 16cb272329c25a6fd5b51d9bb8bd4eecf0fe9487 | [
"MIT"
] | null | null | null | import utils
import decorators
print('setting up data analysis')
LOG_DATA = utils.import_log_data()
# get unique values for log datapoint property
@decorators.exec_all(LOG_DATA)
def unique_prop_values(arr, prop=None):
values = utils.get_property(arr, prop)
values = utils.remove_duplicate_entries(values)
# reversing order to match mental model of timeline from left to right
values.reverse()
return values
# task list is created once to ensure order
TASKS = unique_prop_values('taskID')
@decorators.exec_per_task(LOG_DATA, TASKS)
def count_interaction_per_task(arr, target, interaction=None):
values = utils.filter_by_property(arr, 'target', target)
values = utils.get_property(values, 'type')
return values.count(interaction)
USERS = list(LOG_DATA.keys())
@decorators.exec_per_user(LOG_DATA, USERS)
def count_interaction_per_user(arr, target, interaction=None):
values = utils.filter_by_property(arr, 'target', target)
values = utils.get_property(values, 'type')
return values.count(interaction)
# interaction targets are taken once to ensure order
TARGETS = unique_prop_values('target')
def create_dataset_per_task(interaction):
print('creating dataset for ' + interaction + ' interaction per task')
data = []
data.append(['target', *TASKS])
for target in TARGETS:
data.append([target, *count_interaction_per_task(target, interaction)])
return data
def create_dataset_per_user(interaction):
print('creating dataset for ' + interaction + ' interaction per user')
data = []
data.append(['target', *USERS])
for target in TARGETS:
data.append([target, *count_interaction_per_user(target, interaction)])
return data
print('crunching data')
utils.export_csv('analysis_scroll_targets_per_task.csv', create_dataset_per_task('scroll'))
utils.export_csv('analysis_click_targets_per_task.csv', create_dataset_per_task('click'))
utils.export_csv('analysis_change_targets_per_task.csv', create_dataset_per_task('change'))
utils.export_csv('analysis_scroll_targets_per_user.csv', create_dataset_per_user('scroll'))
utils.export_csv('analysis_click_targets_per_user.csv', create_dataset_per_user('click'))
utils.export_csv('analysis_change_targets_per_user.csv', create_dataset_per_user('change'))
print('analysis complete')
| 38.098361 | 91 | 0.768933 |
2a310046ffb70aefe78e867beaa4bbf858ec501f | 2,932 | py | Python | tools/gitignore/tests/test_gitignore.py | Johanna-hub/wpt | 7176f30f78dcfc600e627b8e5786ede4b79300ad | [
"BSD-3-Clause"
] | 9 | 2019-04-01T10:57:10.000Z | 2021-12-02T11:12:06.000Z | tools/gitignore/tests/test_gitignore.py | Johanna-hub/wpt | 7176f30f78dcfc600e627b8e5786ede4b79300ad | [
"BSD-3-Clause"
] | 33 | 2018-07-11T22:04:44.000Z | 2019-03-18T15:38:51.000Z | tools/gitignore/tests/test_gitignore.py | Johanna-hub/wpt | 7176f30f78dcfc600e627b8e5786ede4b79300ad | [
"BSD-3-Clause"
] | 7 | 2019-04-24T10:51:15.000Z | 2021-12-17T16:53:01.000Z | import pytest
from ..gitignore import fnmatch_translate, PathFilter
match_data = [
("foo", True, ["a/foo", "foo"]),
("*.a", True, ["foo.a", "a/foo.a", "a/b/foo.a", "a.a/foo.a"]),
("*.py[co]", True, ["a.pyc", "a.pyo", "a/b/c.pyc"]),
("\\#*", True, ["#a", "a/#b"]),
("*#", True, ["a#", "a/b#", "#a#"]),
("/*.c", True, ["a.c", ".c"]),
("**/b", False, ["a/b", "a/c/b"]),
("*b", True, ["ab"]),
("*b", True, ["a/b"]),
("**/b", False, ["a/b"]),
("a/", True, ["a"]),
("a[/]b", True, []),
("**/b", False, ["a/c/b"]),
("a?c", True, ["abc"]),
("a[^b]c", True, ["acc"]),
("a[b-c]c", True, ["abc", "acc"]),
("a[^]c", True, ["ac"]), # This is probably wrong
("a[^]c", True, ["ac"]), # This is probably wrong
]
mismatch_data = [
("foo", True, ["foob", "afoo"]),
("*.a", True, ["a", "foo:a", "a.a/foo"]),
("*.py[co]", True, ["a.pyd", "pyo", "a.py"]),
("a", True, ["ab"]),
("a?c", True, ["ac", "abbc"]),
("a[^b]c", True, ["abc"]),
("a[b-c]c", True, ["adc"]),
]
invalid_data = [
"[a",
"***/foo",
"a\\",
"**b",
"b**/",
"[[]"
]
filter_data = [
(["foo", "bar/", "/a", "*.py"],
[("", ["foo", "bar", "baz"], ["a"]),
("baz", ["a"], ["foo", "bar"])],
[(["baz"], []),
(["a"], ["bar"])]),
(["#foo", "", "a*", "!a.py"],
[("", ["foo"], ["a", "a.foo", "a.py"])],
[(["foo"], ["a.py"])]),
]
def expand_data(compact_data):
for pattern, name_only, inputs in compact_data:
for input in inputs:
yield pattern, name_only, input
@pytest.mark.parametrize("pattern, name_only, input", expand_data(match_data))
def tests_match(pattern, name_only, input):
name_only_result, regexp = fnmatch_translate(pattern)
assert name_only_result == name_only
if name_only:
input = input.rsplit("/", 1)[-1]
assert regexp.match(input) is not None
@pytest.mark.parametrize("pattern, name_only, input", expand_data(mismatch_data))
def tests_no_match(pattern, name_only, input):
name_only_result, regexp = fnmatch_translate(pattern)
assert name_only_result == name_only
if name_only:
input = input.rsplit("/", 1)[-1]
assert regexp.match(input) is None
@pytest.mark.parametrize("pattern", invalid_data)
def tests_invalid(pattern):
with pytest.raises(ValueError):
fnmatch_translate(pattern)
@pytest.mark.parametrize("rules, input, expected", filter_data)
def test_path_filter(rules, input, expected):
f = PathFilter(None, rules)
# Add some fake stat data
for i, item in enumerate(input):
repl = [input[i][0]]
for j in [1, 2]:
repl.append([(name, None) for name in input[i][j]])
input[i] = tuple(repl)
for i, output in enumerate(f(input)):
assert output[0] == input[i][0]
for j in [1, 2]:
assert [item[0] for item in output[j]] == expected[i][j-1]
| 29.029703 | 81 | 0.507844 |
90f4dc62c7b904cfe8a0e135b659e2ecd4e714dd | 9,650 | py | Python | tests/api/v1_3_1/test_sites.py | nonstdout/dnacentersdk | dbbbc4baa5300aa9e5c9193f2ea71438018095f5 | [
"MIT"
] | null | null | null | tests/api/v1_3_1/test_sites.py | nonstdout/dnacentersdk | dbbbc4baa5300aa9e5c9193f2ea71438018095f5 | [
"MIT"
] | null | null | null | tests/api/v1_3_1/test_sites.py | nonstdout/dnacentersdk | dbbbc4baa5300aa9e5c9193f2ea71438018095f5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""DNACenterAPI sites API fixtures and tests.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.3.1', reason='version does not match')
def is_valid_get_site(json_schema_validate, obj):
json_schema_validate('jsd_6fb4ab3643faa80f_v1_3_1').validate(obj)
return True
def get_site(api):
endpoint_result = api.sites.get_site(
limit='string',
name='string',
offset='string',
site_id='string',
type='string'
)
return endpoint_result
@pytest.mark.sites
def test_get_site(api, validator):
assert is_valid_get_site(
validator,
get_site(api)
)
def get_site_default(api):
endpoint_result = api.sites.get_site(
limit=None,
name=None,
offset=None,
site_id=None,
type=None
)
return endpoint_result
@pytest.mark.sites
def test_get_site_default(api, validator):
try:
assert is_valid_get_site(
validator,
get_site_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_update_site(json_schema_validate, obj):
json_schema_validate('jsd_eeb7eb4b4bd8a1dd_v1_3_1').validate(obj)
return True
def update_site(api):
endpoint_result = api.sites.update_site(
active_validation=True,
payload=None,
site={'area': {'name': 'string', 'parentName': 'string'}, 'building': {'name': 'string', 'address': 'string', 'parentName': 'string', 'latitude': 0, 'longitude': 0}, 'floor': {'name': 'string', 'rfModel': 'Cubes And Walled Offices', 'width': 0, 'length': 0, 'height': 0}},
site_id='string',
type='area'
)
return endpoint_result
@pytest.mark.sites
def test_update_site(api, validator):
assert is_valid_update_site(
validator,
update_site(api)
)
def update_site_default(api):
endpoint_result = api.sites.update_site(
active_validation=True,
payload=None,
site=None,
site_id='string',
type=None
)
return endpoint_result
@pytest.mark.sites
def test_update_site_default(api, validator):
try:
assert is_valid_update_site(
validator,
update_site_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_membership(json_schema_validate, obj):
json_schema_validate('jsd_eba669054e08a60e_v1_3_1').validate(obj)
return True
def get_membership(api):
endpoint_result = api.sites.get_membership(
site_id='string'
)
return endpoint_result
@pytest.mark.sites
def test_get_membership(api, validator):
assert is_valid_get_membership(
validator,
get_membership(api)
)
def get_membership_default(api):
endpoint_result = api.sites.get_membership(
site_id='string'
)
return endpoint_result
@pytest.mark.sites
def test_get_membership_default(api, validator):
try:
assert is_valid_get_membership(
validator,
get_membership_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_site_health(json_schema_validate, obj):
json_schema_validate('jsd_15b7aa0c4dda8e85_v1_3_1').validate(obj)
return True
def get_site_health(api):
endpoint_result = api.sites.get_site_health(
timestamp=0
)
return endpoint_result
@pytest.mark.sites
def test_get_site_health(api, validator):
assert is_valid_get_site_health(
validator,
get_site_health(api)
)
def get_site_health_default(api):
endpoint_result = api.sites.get_site_health(
timestamp=None
)
return endpoint_result
@pytest.mark.sites
def test_get_site_health_default(api, validator):
try:
assert is_valid_get_site_health(
validator,
get_site_health_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_delete_site(json_schema_validate, obj):
json_schema_validate('jsd_f083cb13484a8fae_v1_3_1').validate(obj)
return True
def delete_site(api):
endpoint_result = api.sites.delete_site(
site_id='string'
)
return endpoint_result
@pytest.mark.sites
def test_delete_site(api, validator):
assert is_valid_delete_site(
validator,
delete_site(api)
)
def delete_site_default(api):
endpoint_result = api.sites.delete_site(
site_id='string'
)
return endpoint_result
@pytest.mark.sites
def test_delete_site_default(api, validator):
try:
assert is_valid_delete_site(
validator,
delete_site_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_assign_device_to_site(json_schema_validate, obj):
json_schema_validate('jsd_eeb168eb41988e07_v1_3_1').validate(obj)
return True
def assign_device_to_site(api):
endpoint_result = api.sites.assign_device_to_site(
active_validation=True,
device=[{'ip': 'string'}],
payload=None,
site_id='string'
)
return endpoint_result
@pytest.mark.sites
def test_assign_device_to_site(api, validator):
assert is_valid_assign_device_to_site(
validator,
assign_device_to_site(api)
)
def assign_device_to_site_default(api):
endpoint_result = api.sites.assign_device_to_site(
active_validation=True,
device=None,
payload=None,
site_id='string'
)
return endpoint_result
@pytest.mark.sites
def test_assign_device_to_site_default(api, validator):
try:
assert is_valid_assign_device_to_site(
validator,
assign_device_to_site_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_site(json_schema_validate, obj):
json_schema_validate('jsd_50b589fd4c7a930a_v1_3_1').validate(obj)
return True
def create_site(api):
endpoint_result = api.sites.create_site(
active_validation=True,
payload=None,
site={'area': {'name': 'string', 'parentName': 'string'}, 'building': {'name': 'string', 'address': 'string', 'parentName': 'string', 'latitude': 0, 'longitude': 0}, 'floor': {'name': 'string', 'parentName': 'string', 'rfModel': 'Cubes And Walled Offices', 'width': 0, 'length': 0, 'height': 0}},
type='area'
)
return endpoint_result
@pytest.mark.sites
def test_create_site(api, validator):
assert is_valid_create_site(
validator,
create_site(api)
)
def create_site_default(api):
endpoint_result = api.sites.create_site(
active_validation=True,
payload=None,
site=None,
type=None
)
return endpoint_result
@pytest.mark.sites
def test_create_site_default(api, validator):
try:
assert is_valid_create_site(
validator,
create_site_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_site_count(json_schema_validate, obj):
json_schema_validate('jsd_b0b7eabc4f4b9b28_v1_3_1').validate(obj)
return True
def get_site_count(api):
endpoint_result = api.sites.get_site_count(
site_id='string'
)
return endpoint_result
@pytest.mark.sites
def test_get_site_count(api, validator):
assert is_valid_get_site_count(
validator,
get_site_count(api)
)
def get_site_count_default(api):
endpoint_result = api.sites.get_site_count(
site_id=None
)
return endpoint_result
@pytest.mark.sites
def test_get_site_count_default(api, validator):
try:
assert is_valid_get_site_count(
validator,
get_site_count_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
| 26.222826 | 304 | 0.689637 |
9d485db3919805867d6c5dcff050ea543171ec12 | 5,339 | py | Python | ansible/modules/cloud/openstack/os_server_group.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | ansible/modules/cloud/openstack/os_server_group.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | ansible/modules/cloud/openstack/os_server_group.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #!/usr/bin/python
# Copyright (c) 2016 Catalyst IT Limited
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_server_group
short_description: Manage OpenStack server groups
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Lingxian Kong (@kong)"
description:
- Add or remove server groups from OpenStack.
options:
state:
description:
- Indicate desired state of the resource. When I(state) is 'present',
then I(policies) is required.
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Server group name.
required: true
policies:
description:
- A list of one or more policy names to associate with the server
group. The list must contain at least one policy name. The current
valid policy names are anti-affinity, affinity, soft-anti-affinity
and soft-affinity.
required: false
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a server group with 'affinity' policy.
- os_server_group:
state: present
auth:
auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0
username: admin
password: admin
project_name: admin
name: my_server_group
policies:
- affinity
# Delete 'my_server_group' server group.
- os_server_group:
state: absent
auth:
auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0
username: admin
password: admin
project_name: admin
name: my_server_group
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: The name of the server group.
returned: success
type: string
policies:
description: A list of one or more policy names of the server group.
returned: success
type: list of strings
members:
description: A list of members in the server group.
returned: success
type: list of strings
metadata:
description: Metadata key and value pairs.
returned: success
type: dict
project_id:
description: The project ID who owns the server group.
returned: success
type: string
user_id:
description: The user ID who owns the server group.
returned: success
type: string
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _system_state_change(state, server_group):
if state == 'present' and not server_group:
return True
if state == 'absent' and server_group:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
policies=dict(required=False, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
**module_kwargs
)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
policies = module.params['policies']
state = module.params['state']
try:
cloud = shade.openstack_cloud(**module.params)
server_group = cloud.get_server_group(name)
if module.check_mode:
module.exit_json(
changed=_system_state_change(state, server_group)
)
changed = False
if state == 'present':
if not server_group:
if not policies:
module.fail_json(
msg="Parameter 'policies' is required in Server Group "
"Create"
)
server_group = cloud.create_server_group(name, policies)
changed = True
module.exit_json(
changed=changed,
id=server_group['id'],
server_group=server_group
)
if state == 'absent':
if server_group:
cloud.delete_server_group(server_group['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| 27.95288 | 79 | 0.646563 |
3505e714175c1f8859f6ced30e2c218383148ffe | 18,166 | py | Python | Lib/site-packages/tensorflow_probability/python/distributions/_numpy/poisson_lognormal.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/site-packages/tensorflow_probability/python/distributions/_numpy/poisson_lognormal.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/site-packages/tensorflow_probability/python/distributions/_numpy/poisson_lognormal.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The PoissonLogNormalQuadratureCompound distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensorflow_probability.python.internal.backend.numpy.compat import v2 as tf
from tensorflow_probability.python.bijectors._numpy import exp as exp_bijector
from tensorflow_probability.python.distributions._numpy import categorical
from tensorflow_probability.python.distributions._numpy import distribution
from tensorflow_probability.python.distributions._numpy import normal
from tensorflow_probability.python.distributions._numpy import poisson
from tensorflow_probability.python.distributions._numpy import transformed_distribution
from tensorflow_probability.python.internal._numpy import assert_util
from tensorflow_probability.python.internal._numpy import distribution_util
from tensorflow_probability.python.internal._numpy import dtype_util
from tensorflow_probability.python.internal._numpy import prefer_static
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal._numpy import samplers
from tensorflow_probability.python.internal._numpy import tensor_util
from tensorflow_probability.python.internal._numpy import tensorshape_util
__all__ = [
'PoissonLogNormalQuadratureCompound',
'quadrature_scheme_lognormal_gauss_hermite',
'quadrature_scheme_lognormal_quantiles',
]
def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None): # pylint: disable=unused-argument
"""Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with tf.name_scope(
name or 'vector_diffeomixture_quadrature_gauss_hermite'):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
npdt = dtype_util.as_numpy_dtype(loc.dtype)
grid = grid.astype(npdt)
probs = probs.astype(npdt)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = tf.convert_to_tensor(probs, name='probs', dtype=loc.dtype)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
grid = (loc[..., tf.newaxis] + np.sqrt(2.) * scale[..., tf.newaxis] * grid)
return grid, probs
def quadrature_scheme_lognormal_quantiles(
loc, scale, quadrature_size,
validate_args=False, name=None):
"""Use LogNormal quantiles to form quadrature on positive-reals.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with tf.name_scope(name or 'quadrature_scheme_lognormal_quantiles'):
# Create a LogNormal distribution.
dist = transformed_distribution.TransformedDistribution(
distribution=normal.Normal(loc=loc, scale=scale),
bijector=exp_bijector.Exp(),
validate_args=validate_args)
batch_ndims = tensorshape_util.rank(dist.batch_shape)
if batch_ndims is None:
batch_ndims = tf.shape(dist.batch_shape_tensor())[0]
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = tf.zeros([], dtype=dist.dtype)
edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = tf.reshape(
edges,
shape=tf.concat(
[[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
quantiles = dist.quantile(edges)
# Cyclically permute left by one.
perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = tf.transpose(a=quantiles, perm=perm)
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
new_shape = tensorshape_util.concatenate(dist.batch_shape,
[quadrature_size])
tensorshape_util.set_shape(grid, new_shape)
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = tf.fill(
dims=[quadrature_size],
value=tf.math.reciprocal(tf.cast(quadrature_size, dist.dtype)))
return grid, probs
class PoissonLogNormalQuadratureCompound(distribution.Distribution):
"""`PoissonLogNormalQuadratureCompound` distribution.
The `PoissonLogNormalQuadratureCompound` is an approximation to a
Poisson-LogNormal [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e.,
```none
p(k|loc, scale)
= int_{R_+} dl LogNormal(l | loc, scale) Poisson(k | l)
approx= sum{ prob[d] Poisson(k | lambda(grid[d])) : d=0, ..., deg-1 }
```
By default, the `grid` is chosen as quantiles of the `LogNormal` distribution
parameterized by `loc`, `scale` and the `prob` vector is
`[1. / quadrature_size]*quadrature_size`.
In the non-approximation case, a draw from the LogNormal prior represents the
Poisson rate parameter. Unfortunately, the non-approximate distribution lacks
an analytical probability density function (pdf). Therefore the
`PoissonLogNormalQuadratureCompound` class implements an approximation based
on [quadrature](https://en.wikipedia.org/wiki/Numerical_integration).
Note: although the `PoissonLogNormalQuadratureCompound` is approximately the
Poisson-LogNormal compound distribution, it is itself a valid distribution.
Viz., it possesses a `sample`, `log_prob`, `mean`, `variance`, etc. which are
all mutually consistent.
#### Mathematical Details
The `PoissonLogNormalQuadratureCompound` approximates a Poisson-LogNormal
[compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution). Using
variable-substitution and [numerical quadrature](
https://en.wikipedia.org/wiki/Numerical_integration) (default:
based on `LogNormal` quantiles) we can redefine the distribution to be a
parameter-less convex combination of `deg` different Poisson samples.
That is, defined over positive integers, this distribution is parameterized
by a (batch of) `loc` and `scale` scalars.
The probability density function (pdf) is,
```none
pdf(k | loc, scale, deg)
= sum{ prob[d] Poisson(k | lambda=exp(grid[d]))
: d=0, ..., deg-1 }
```
#### Examples
```python
tfd = tfp.distributions
# Create two batches of PoissonLogNormalQuadratureCompounds, one with
# prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`
pln = tfd.PoissonLogNormalQuadratureCompound(
loc=[0., -0.5],
scale=1.,
quadrature_size=10,
validate_args=True)
"""
def __init__(self,
loc,
scale,
quadrature_size=8,
quadrature_fn=quadrature_scheme_lognormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name='PoissonLogNormalQuadratureCompound'):
"""Constructs the PoissonLogNormalQuadratureCompound`.
Note: `probs` returned by (optional) `quadrature_fn` are presumed to be
either a length-`quadrature_size` vector or a batch of vectors in 1-to-1
correspondence with the returned `grid`. (I.e., broadcasting is only
partially supported.)
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
quadrature_fn: Python callable taking `loc`, `scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the LogNormal grid and corresponding normalized weight.
Default value: `quadrature_scheme_lognormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value '`NaN`' to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `quadrature_grid` and `quadrature_probs` have different base
`dtype`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale], tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, name='loc', dtype=dtype)
self._scale = tensor_util.convert_nonref_to_tensor(
scale, name='scale', dtype=dtype)
self._quadrature_fn = quadrature_fn
dtype_util.assert_same_float_dtype([self._loc, self._scale])
self._quadrature_size = quadrature_size
super(PoissonLogNormalQuadratureCompound, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
def poisson_and_mixture_distributions(self):
"""Returns the Poisson and Mixture distribution parameterized by the quadrature grid and weights."""
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
quadrature_grid, quadrature_probs = tuple(self._quadrature_fn(
loc, scale, self.quadrature_size, self.validate_args))
dt = quadrature_grid.dtype
if not dtype_util.base_equal(dt, quadrature_probs.dtype):
raise TypeError('Quadrature grid dtype ({}) does not match quadrature '
'probs dtype ({}).'.format(
dtype_util.name(dt),
dtype_util.name(quadrature_probs.dtype)))
dist = poisson.Poisson(
log_rate=quadrature_grid,
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats)
mixture_dist = categorical.Categorical(
logits=tf.math.log(quadrature_probs),
validate_args=self.validate_args,
allow_nan_stats=self.allow_nan_stats)
return dist, mixture_dist
@property
def loc(self):
"""Location parameter of the LogNormal prior."""
return self._loc
@property
def scale(self):
"""Scale parameter of the LogNormal prior."""
return self._scale
@property
def quadrature_size(self):
return self._quadrature_size
def _batch_shape_tensor(self, distributions=None):
if distributions is None:
distributions = self.poisson_and_mixture_distributions()
dist, mixture_dist = distributions
return tf.broadcast_dynamic_shape(
dist.batch_shape_tensor(),
prefer_static.shape(mixture_dist.logits))[:-1]
def _batch_shape(self):
dist, mixture_dist = self.poisson_and_mixture_distributions()
return tf.broadcast_static_shape(
dist.batch_shape,
mixture_dist.logits.shape)[:-1]
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
distributions = self.poisson_and_mixture_distributions()
dist, mixture_dist = distributions
batch_size = tensorshape_util.num_elements(self.batch_shape)
if batch_size is None:
batch_size = tf.reduce_prod(
self._batch_shape_tensor(distributions=distributions))
# We need to 'sample extra' from the mixture distribution if it doesn't
# already specify a probs vector for each batch coordinate.
# We only support this kind of reduced broadcasting, i.e., there is exactly
# one probs vector for all batch dims or one for each.
mixture_seed, poisson_seed = samplers.split_seed(
seed, salt='PoissonLogNormalQuadratureCompound')
ids = mixture_dist.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
mixture_dist.is_scalar_batch(),
[batch_size],
np.int32([]))),
seed=mixture_seed)
# We need to flatten batch dims in case mixture_dist has its own
# batch dims.
ids = tf.reshape(
ids,
shape=concat_vectors([n],
distribution_util.pick_vector(
self.is_scalar_batch(), np.int32([]),
np.int32([-1]))))
# Stride `quadrature_size` for `batch_size` number of times.
offset = tf.range(
start=0,
limit=batch_size * self._quadrature_size,
delta=self._quadrature_size,
dtype=ids.dtype)
ids = ids + offset
rate = tf.gather(tf.reshape(dist.rate_parameter(), shape=[-1]), ids)
rate = tf.reshape(
rate, shape=concat_vectors([n], self._batch_shape_tensor(
distributions=distributions)))
return samplers.poisson(
shape=[], lam=rate, dtype=self.dtype, seed=poisson_seed)
def _log_prob(self, x):
dist, mixture_dist = self.poisson_and_mixture_distributions()
return tf.reduce_logsumexp((mixture_dist.logits +
dist.log_prob(x[..., tf.newaxis])),
axis=-1)
def _mean(self, distributions=None):
if distributions is None:
distributions = self.poisson_and_mixture_distributions()
dist, mixture_dist = distributions
return tf.exp(
tf.reduce_logsumexp(
mixture_dist.logits + dist.log_rate,
axis=-1))
def _variance(self):
return tf.exp(self._log_variance())
def _stddev(self):
return tf.exp(0.5 * self._log_variance())
def _log_variance(self):
# Following calculation is based on law of total variance:
#
# Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]
#
# where,
#
# Z|v ~ interpolate_affine[v](dist)
# V ~ mixture_dist
#
# thus,
#
# E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }
# Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }
distributions = self.poisson_and_mixture_distributions()
dist, mixture_dist = distributions
v = tf.stack(
[
# log(dist.variance()) = log(Var[d]) = log(rate[d])
dist.log_rate,
# log((Mean[d] - Mean)**2)
2. * tf.math.log(
tf.abs(
dist.mean() -
self._mean(distributions=distributions)[..., tf.newaxis])),
],
axis=-1)
return tf.reduce_logsumexp(
mixture_dist.logits[..., tf.newaxis] + v, axis=[-2, -1])
def _default_event_space_bijector(self):
return
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_non_negative(
x, message='Sample must be non-negative.'))
return assertions
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [tf.get_static_value(x) for x in args]
if any(vec is None for vec in args_):
return tf.concat(args, axis=0)
return [val for vec in args_ for val in vec]
| 40.101545 | 104 | 0.690961 |
6d9920f4cd59e80613591cc06c7e4fa60ed049b4 | 14,496 | py | Python | mmdet/models/bbox_heads/convfc_bbox_head.py | LiGangszu/PedestrianDetection-HGPD | 3874e331c8afe4cc20fc49de7ebdbe77db277c98 | [
"Apache-2.0"
] | 9 | 2021-04-02T12:21:38.000Z | 2021-08-19T07:55:19.000Z | mmdet/models/bbox_heads/convfc_bbox_head.py | LiGangszu/PedestrianDetection-HGPD | 3874e331c8afe4cc20fc49de7ebdbe77db277c98 | [
"Apache-2.0"
] | 1 | 2021-05-02T18:34:06.000Z | 2021-05-12T04:04:57.000Z | mmdet/models/bbox_heads/convfc_bbox_head.py | LiGangszu/PedestrianDetection-HGPD | 3874e331c8afe4cc20fc49de7ebdbe77db277c98 | [
"Apache-2.0"
] | 2 | 2021-04-28T09:27:45.000Z | 2021-06-07T12:02:01.000Z | import torch.nn as nn
from ..registry import HEADS
from ..utils import ConvModule
from .bbox_head import BBoxHead
import torch
from ..utils.norm import build_norm_layer
from mmcv.cnn import constant_init
import random
from mmdet.core.bbox.geometry import bbox_overlaps
import numpy as np
import pdb
@HEADS.register_module
class ConvFCBBoxHead(BBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
*args,
**kwargs):
super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
self.concat_fc = nn.Linear(2048, 1024)
# self-attention module
self.attention_fc1 = nn.ModuleList()
self.attention_fc1.append(
nn.Linear(1024, 512))
self.attention_fc1.append(
nn.Linear(512, 128))
self.attention_logits = nn.Linear(128, 1)
self.part_out = nn.Linear(3072, 1024)
# affinity module
self.affinity_fc1 = nn.ModuleList()
self.affinity_fc1.append(
nn.Linear(1024, 64))
self.affinity_fc2 = nn.ModuleList()
self.affinity_fc2.append(
nn.Linear(1024, 64))
self.weight_fc = nn.Linear(64, 1)
self.norm_name, norm = build_norm_layer(dict(type='BN'), 3)
self.add_module(self.norm_name, norm)
self.parameter_matrix = nn.Linear(1024, 1024)
self.parameter_matrix2 = nn.Linear(1024, 1024)
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCBBoxHead, self).init_weights()
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
for m in self.attention_fc1.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
nn.init.xavier_uniform_(self.attention_logits.weight)
nn.init.constant_(self.attention_logits.bias, 0)
for m in self.affinity_fc1.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
for m in self.affinity_fc2.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
nn.init.xavier_uniform_(self.weight_fc.weight)
nn.init.constant_(self.weight_fc.bias, 0)
constant_init(self.norm, 1, bias=0)
nn.init.xavier_uniform_(self.part_out.weight)
nn.init.constant_(self.part_out.bias, 0)
nn.init.xavier_uniform_(self.concat_fc.weight)
nn.init.constant_(self.concat_fc.bias, 0)
nn.init.xavier_uniform_(self.parameter_matrix.weight)
nn.init.constant_(self.parameter_matrix.bias, 0)
nn.init.xavier_uniform_(self.parameter_matrix2.weight)
nn.init.constant_(self.parameter_matrix2.bias, 0)
def forward(self, x, rois, num_proposal_list):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
for ind, fc in enumerate(self.shared_fcs):
x = self.relu(fc(x))
if ind == 0:
# intra-proposal
intra_feature = self.intra_graph(x)
# inter-proposal
x_full = x[:, 0]
x_neighbour = self.inter_graph(x_full, rois, num_proposal_list)
inter_feature = 0.9*x_full + 0.1*x_neighbour
# perform another fc layer on full-body features for better regression
x = x_full
x_reg = x
x_concat = torch.cat((inter_feature, intra_feature), 1)
x_cls = self.relu(self.concat_fc(x_concat))
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
def intra_graph(self, x):
# Inra-proposal Graph
x_original = x[:, 1:]
full_body = x[:, 0]
body_part = x[:, 1:]
body_part_ = body_part.clone()
# affinity module
for fc in self.affinity_fc1:
full_body = self.relu(fc(full_body))
for fc in self.affinity_fc2:
body_part = self.relu(fc(body_part))
num_sample, num_body, feat_dim = body_part.size()
full_body = full_body[:, None, None, :].expand((
num_sample,
num_body, num_body,
feat_dim))
body_part = body_part[:, None, :, :].expand((
num_sample,
num_body, num_body,
feat_dim))
affinity_matrix = body_part * full_body
affinity_matrix = self.weight_fc(self.norm(affinity_matrix)).sigmoid()
# self-attention
for fc in self.attention_fc1:
body_part_ = self.relu(fc(body_part_))
body_part_value = self.attention_logits(body_part_).sigmoid().squeeze(-1)
attention_matrix = body_part_.new_zeros((
num_sample, num_body, num_body))
for i in range(num_body):
for j in range(i, num_body):
attention_matrix[:, i, j] = (body_part_value[:, i] + body_part_value[:, j])/2
for i in range(num_body):
for j in range(0, i):
attention_matrix[:, i, j] = attention_matrix[:, j, i]
fusion_matrix = torch.sqrt(affinity_matrix.squeeze(-1)*attention_matrix)
degree_matrix = self.generate_degree_matrix(fusion_matrix)
fusion_matrix = torch.matmul(degree_matrix, fusion_matrix) #Adjacent matrix
enhanced_feature = torch.matmul(fusion_matrix, x_original)
enhanced_feature = self.relu(self.parameter_matrix(enhanced_feature))
enhanced_feature = self.relu(self.part_out(
enhanced_feature.reshape(num_sample, -1)))
return enhanced_feature
def inter_graph(self, x_full, rois, num_proposal_list):
# Inter-proposal Graph
neighbour_feats = []
num_proposal = np.cumsum(np.array(num_proposal_list))
batch_size = len(num_proposal_list)
for img_ind in range(batch_size):
if img_ind == 0:
overlaps = bbox_overlaps(
rois[:num_proposal[img_ind]],
rois[:num_proposal[img_ind]])
num = num_proposal[img_ind]
x_body = x_full[:num_proposal[img_ind]]
else:
overlaps = bbox_overlaps(
rois[num_proposal[img_ind-1]: num_proposal[img_ind]],
rois[num_proposal[img_ind-1]: num_proposal[img_ind]])
num = num_proposal[img_ind] - num_proposal[img_ind-1]
x_body = x_full[num_proposal[img_ind-1]: num_proposal[img_ind]]
mask_tensor = 1 - torch.eye(num.item())
overlaps = overlaps * mask_tensor.to(overlaps)
degree_matrix = self.generate_degree_matrix(overlaps).squeeze(0)
overlaps = torch.matmul(degree_matrix, overlaps)
x_body_ = self.relu(
self.parameter_matrix2(torch.matmul(overlaps, x_body)))
neighbour_feats.append(x_body_)
x_neighbour = torch.cat(neighbour_feats, dim=0)
return x_neighbour
def generate_degree_matrix(self, matrix):
# Generate degree matrix for adjacent matric
if matrix.dim() != 3:
matrix = matrix.unsqueeze(0) # batch_size x N x N
N = matrix.size(-1)
matrix_sum = torch.sum(matrix, dim=-1)
matrix_sum_ = matrix_sum.reshape(-1)
non_zero_ind = torch.nonzero(matrix_sum_).squeeze()
matrix_sum_[non_zero_ind] = 1 / matrix_sum_[non_zero_ind]
matrix_sum_ = matrix_sum_.reshape(-1, N)
degree_matrix = matrix_sum_[:, :, None].expand_as(matrix)
degree_matrix = degree_matrix * torch.eye(N).type_as(matrix)
return degree_matrix
@property
def norm(self):
return getattr(self, self.norm_name)
@HEADS.register_module
class SharedFCBBoxHead(ConvFCBBoxHead):
def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs):
assert num_fcs >= 1
super(SharedFCBBoxHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=num_fcs,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
def jitter_gt(proposals, gts):
for gt in gts:
x1, y1, x2, y2 = gt
width, height = x2-x1+1, y2-y1+1
for i in range(10):
x_jitter = random.uniform(-0.2, 0.2)
y_jitter = random.uniform(-0.2, 0.2)
proposal = proposals.new_ones((1, 5))
proposal[:, 0] = x1+x_jitter*width
proposal[:, 1] = y1+y_jitter*height
proposal[:, 2] = x2+x_jitter*width
proposal[:, 3] = y2+y_jitter*height
proposals = torch.cat((proposals, proposal), 0)
return [proposals]
| 38.656 | 93 | 0.579746 |
aeba891d4966544734b88eadcea8040c3f4832af | 4,153 | py | Python | commands/subsystems/generic.py | AndreyCortez/Telegram-Bot | 46f5c3044460812ee4b57e53b48eeab1ccf80404 | [
"MIT"
] | null | null | null | commands/subsystems/generic.py | AndreyCortez/Telegram-Bot | 46f5c3044460812ee4b57e53b48eeab1ccf80404 | [
"MIT"
] | null | null | null | commands/subsystems/generic.py | AndreyCortez/Telegram-Bot | 46f5c3044460812ee4b57e53b48eeab1ccf80404 | [
"MIT"
] | null | null | null | from telegram import ReplyKeyboardRemove, Update, ReplyKeyboardMarkup
from telegram.ext import CallbackContext, ConversationHandler
from spreadsheet import systems
from utils import available_systems, electric_subsystems, mechanics_subsystem
from .conversation import Conversation
from ..general import reply_text
# A dictionary to store information about each conversation, identified by the sender's telegram ID
conversation_task = {}
# Returns keyboard markup based on dictionary
def __create_keyboard(elements: list) -> ReplyKeyboardMarkup:
return ReplyKeyboardMarkup([elements[i::2] for i in range(2)], one_time_keyboard=True)
# System and subsystem default keyboards
keyboards = {
"system": __create_keyboard(available_systems),
"subsystem": {
"ele": __create_keyboard(list(electric_subsystems.keys())),
"mec": __create_keyboard(list(mechanics_subsystem.keys())),
}
}
# System or subsystem lister when starting conversation
# TODO write extraction method
def check_for_system_or_subsystem():
pass
# Loads configuration and replies text when a system is selected
def load_system_info(update: Update, selected_system: str) -> any:
keyboard = keyboards["subsystem"][selected_system]
reply_text(update, f"Sistema {selected_system} selecionado\nInforme o subsistema", keyboard)
conversation = get_conversation(update)
conversation.system = selected_system
conversation.dict = systems[selected_system]["sub"]
conversation.ss = systems[selected_system]["ss"]
# Loads configuration and replies text when a subsystem is selected
def load_subsystem_info(update: Update, selected_subsystem: str) -> None:
conversation = get_conversation(update)
conversation.subsystem = selected_subsystem
conversation.tasks = get_task_lister_text(conversation.system, selected_subsystem)
reply_message = (
f"{conversation.tasks}\n\n"
"Selecione da lista acima o número da tarefa que deseja executar a ação"
)
reply_text(update, reply_message)
# Project and task listing methods
# TODO refactor functions
def get_subtasks(data: list, pos: int, counter: int) -> tuple[str, int, int]:
tasks = ""
i = pos
while i < len(data) and (not data[i][0] or i == pos):
if data[i][1] and data[i][2] != "Concluído":
tasks += f"{counter} - {data[i][1]}\n"
counter += 1
i += 1
return tasks, i, counter
def get_task_lister_text(system: str, subsystem: str) -> str:
name = systems[system]["sub"][subsystem]["name"]
ss = systems[system]["ss"].sheet(subsystem)
data = ss.get_all_values()
string = f"<b>Subsistema: {name}</b>\n\n<u>Tarefas</u>\n"
counter = 1
for i in range(1, len(data)):
if data[i][0]:
tasks, pos, counter = get_subtasks(data, i, counter)
if tasks:
string += f"\n<i>{data[i][0]}</i>\n" + tasks
i = pos
return string
# Instantiates a new conversation based on sender's username
def load_conversation(update: Update) -> None:
conversation_task[update.effective_user.username] = Conversation()
# Returns a dictionary containing all info of a certain conversation
def get_conversation(update: Update) -> Conversation:
return conversation_task[update.effective_user.username]
# Returns standardized string to begin conversation stage
def get_default_system_message(mode: str, description: str) -> str:
return (
f"<b>{mode}</b>\n"
f"{description}\n\n"
"Utilize <code>/cancel</code> a qualquer momento para cancelar a operação\n"
"Informe o sistema"
)
# Function executed whenever a timeout occurs
def timeout(update: Update, ctx: CallbackContext) -> int:
update.message.reply_text(
"Limite de tempo excedido\nInicie o processo novamente", reply_markup=ReplyKeyboardRemove()
)
return ConversationHandler.END
# Function executed whenever a conversation is cancelled
def cancel(update: Update, ctx: CallbackContext) -> int:
update.message.reply_text("Processo cancelado", reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
| 34.89916 | 99 | 0.716109 |
c06ee76ae2dd8b4962c211aa6c4eccdb8042629b | 23,297 | py | Python | src/documents/views.py | PhaseDMS/phase | 4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e | [
"MIT"
] | 2 | 2021-09-10T19:40:30.000Z | 2022-01-31T07:15:51.000Z | src/documents/views.py | PhaseDMS/phase | 4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e | [
"MIT"
] | null | null | null | src/documents/views.py | PhaseDMS/phase | 4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e | [
"MIT"
] | 1 | 2021-09-10T19:40:42.000Z | 2021-09-10T19:40:42.000Z | import json
from django.utils import timezone
from django.conf import settings
from django.http import (
HttpResponse,
Http404,
HttpResponseForbidden,
HttpResponseRedirect,
)
from wsgiref.util import FileWrapper
from django.core.exceptions import PermissionDenied
from django.views.generic import ListView, DetailView, RedirectView, DeleteView
from django.views.generic.edit import (
ModelFormMixin,
ProcessFormView,
SingleObjectTemplateResponseMixin,
)
from django.urls import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from braces.views import LoginRequiredMixin, PermissionRequiredMixin
from rest_framework.renderers import JSONRenderer
from accounts.models import get_entities
from audit_trail.models import Activity
from audit_trail.signals import activity_log
from favorites.models import Favorite
from favorites.api.serializers import FavoriteSerializer
from bookmarks.models import get_user_bookmarks
from bookmarks.api.serializers import BookmarkSerializer
from categories.views import CategoryMixin
from documents.models import Document
from documents.utils import save_document_forms
from documents.forms.models import documentform_factory
from documents.forms.filters import filterform_factory
from notifications.models import notify
from privatemedia.views import serve_model_file_field
class DocumentListMixin(CategoryMixin):
"""Base class for listing documents.
This is the base class to factorize code fetching documents
of the correct type.
"""
slug_url_kwarg = "document_key"
slug_field = "document_key"
def breadcrumb_section(self):
return None
def breadcrumb_subsection(self):
return self.category
def get_external_filtering(self):
"""This is used to filter Outgoing transmittals for
third party users"""
return get_entities(self.request.user)
def get_context_data(self, **kwargs):
self.get_external_filtering()
context = super(DocumentListMixin, self).get_context_data(**kwargs)
context.update(
{
"document_type": self.category.document_type(),
"favorites": self.get_favorites(),
"bookmarks": self.get_bookmarks(self.request.user, self.category),
}
)
return context
def get_queryset(self):
"""Get queryset for listing documents.
We get all Metadata depending on the category.
"""
DocumentClass = self.category.document_class()
qs = DocumentClass.objects.select_related().filter(
document__category=self.category
)
entities = self.get_external_filtering()
if not hasattr(DocumentClass, "recipient"):
# Recipient only belongs to Transmittals
return qs
if self.request.user.is_external and entities:
qs = qs.filter(recipient_id__in=entities)
return qs
def get_document_class(self):
"""Returns the document class hosted by this category."""
return self.category.document_class()
def get_favorites(self):
qs = Favorite.objects.select_related("user").filter(user=self.request.user)
serializer = FavoriteSerializer(qs, many=True)
return JSONRenderer().render(serializer.data).decode()
def get_bookmarks(self, user, category):
bookmarks = get_user_bookmarks(user, category)
serializer = BookmarkSerializer(bookmarks, many=True)
return JSONRenderer().render(serializer.data).decode()
class BaseDocumentList(LoginRequiredMixin, DocumentListMixin, ListView):
pass
class BaseDocumentBatchActionView(BaseDocumentList):
"""Performs a task on several documents at once.
This operation can be quite time consuming when many documents are reviewed
at once, and this is expected to be normal by the users. We display a nice
progress bar while the user waits.
Since the user is already waiting, we also perform elasticsearch indexing
synchronously, so at the end of the operation, the document list displayed
is in sync.
"""
def get_redirect_url(self, *args, **kwargs):
"""Redirects to document list after that."""
return reverse(
"category_document_list",
args=[self.kwargs.get("organisation"), self.kwargs.get("category")],
)
def post(self, request, *args, **kwargs):
document_ids = request.POST.getlist("document_ids")
document_class = self.get_document_class()
contenttype = ContentType.objects.get_for_model(document_class)
job = self.start_job(contenttype, document_ids)
poll_url = reverse("task_poll", args=[job.id])
data = {"poll_url": poll_url}
return HttpResponse(json.dumps(data), content_type="application/json")
def start_job(self, content_type, document_ids):
raise NotImplementedError()
class DocumentList(BaseDocumentList):
template_name = "documents/document_list.html"
def get_context_data(self, **kwargs):
context = super(DocumentList, self).get_context_data(**kwargs)
model = context["object_list"].model
FilterForm = filterform_factory(model)
context.update(
{
"form": FilterForm(),
"documents_active": True,
"paginate_by": settings.PAGINATE_BY,
"sort_by": model._meta.ordering[0],
"document_class": self.get_document_class(),
}
)
return context
class DocumentRedirect(RedirectView):
"""Redirects from short document url to full url."""
# Permanent redirections are cached and doc location can change, so...
permanent = False
def get_redirect_url(self, **kwargs):
key = kwargs.get("document_key")
qs = Document.objects.select_related(
"category__organisation", "category__category_template"
)
document = get_object_or_404(qs, document_key=key)
return reverse(
"document_detail",
args=[
document.category.organisation.slug,
document.category.slug,
document.document_key,
],
)
class DocumentFormMixin(object):
def breadcrumb_object(self):
return self.object
def get_form_class(self):
"""Get the document form edition form class."""
return documentform_factory(self.get_document_class())
def get_revisionform_class(self):
"""Get the correct revision form edition form class."""
document = self.object
# If there is no document (e.g when creating a new document)
# we need to create a dummy object just to get the associated
# revision class. TODO find a better way to do this
if not document:
document = self.get_document_class()()
return documentform_factory(document.get_revision_class())
def get_forms(self):
"""Returns both the document and revision forms."""
kwargs = self.get_form_kwargs()
document_form_class = self.get_form_class()
document_form = document_form_class(**kwargs)
kwargs.update({"instance": self.revision})
revision_form_class = self.get_revisionform_class()
revision_form = revision_form_class(**kwargs)
return document_form, revision_form
def get_revision(self):
"""Get the edited revision."""
revision_number = self.kwargs.get("revision", None)
if revision_number:
revision = self.object.get_revision(revision_number)
if revision is None:
raise Http404(_("This revision does not exist"))
else:
revision = self.object.latest_revision
return revision
class BaseDocumentFormView(
LoginRequiredMixin,
PermissionRequiredMixin,
DocumentListMixin,
DocumentFormMixin,
SingleObjectTemplateResponseMixin,
ModelFormMixin,
ProcessFormView,
):
"""Base view class to display a document form."""
def get(self, request, *args, **kwargs):
document_form, revision_form = self.get_forms()
return self.render_to_response(
self.get_context_data(
document_form=document_form, revision_form=revision_form
)
)
def post(self, request, *args, **kwargs):
document_form, revision_form = self.get_forms()
if document_form.is_valid() and revision_form.is_valid():
return self.form_valid(document_form, revision_form)
else:
return self.form_invalid(document_form, revision_form)
def get_form_kwargs(self):
kwargs = super(BaseDocumentFormView, self).get_form_kwargs()
# If category is not set, the "get_queryset" method was not called
# TODO clean this
if not hasattr(self, "category"):
_qs = self.get_queryset() # noqa
kwargs.update({"category": self.category})
return kwargs
def form_valid(self, document_form, revision_form):
"""Saves both the document and it's revision."""
document, self.object, self.revision = save_document_forms(
document_form, revision_form, self.category
)
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, document_form, revision_form):
"""Render the form with errors."""
return self.render_to_response(
self.get_context_data(
document_form=document_form, revision_form=revision_form
)
)
class DocumentDetail(
LoginRequiredMixin, DocumentListMixin, DocumentFormMixin, DetailView
):
context_object_name = "document"
template_name = "documents/document_detail.html"
def get(self, request, *args, **kwargs):
"""Update the favorite's timestamp for the current user if any."""
response = super(DocumentDetail, self).get(request, *args, **kwargs)
# Upgrade last time the favorite was last seen
# If not favorited, the query does nothing and it's ok
Favorite.objects.filter(document=self.object.document).filter(
user=self.request.user
).update(last_view_date=timezone.now())
return response
def get_context_data(self, **kwargs):
context = super(DocumentDetail, self).get_context_data(**kwargs)
document = self.object
DocumentForm = self.get_form_class()
form = DocumentForm(instance=document, category=self.category, read_only=True)
revisions = document.get_all_revisions()
RevisionForm = self.get_revisionform_class()
latest_revision = None
for revision in revisions:
revision.form = RevisionForm(
instance=revision,
request=self.request,
category=self.category,
read_only=True,
)
# Get latest revision without additional query
if latest_revision is None or latest_revision.revision < revision.revision:
latest_revision = revision
context.update(
{
"is_detail": True,
"form": form,
"revisions": revisions,
"latest_revision": latest_revision,
}
)
context.update(latest_revision.detail_view_context(self.request))
return context
class DocumentCreate(BaseDocumentFormView):
permission_required = "documents.add_document"
context_object_name = "document"
template_name = "documents/document_form.html"
def check_if_creation_is_available(self):
if not self.category.use_creation_form:
raise PermissionDenied("Document creation is disabled for this category")
def get(self, request, *args, **kwargs):
self.check_if_creation_is_available()
self.object = None
self.revision = None
return super(DocumentCreate, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.check_if_creation_is_available()
self.object = None
self.revision = None
return super(DocumentCreate, self).post(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(DocumentCreate, self).get_context_data(**kwargs)
context.update(
{
"document_create": True,
}
)
return context
def form_valid(self, document_form, revision_form):
"""Saves both the document and it's revision."""
doc, metadata, revision = save_document_forms(
document_form, revision_form, self.category, created_by=self.request.user
)
message_text = """You created the document
<a href="%(url)s">%(key)s (%(title)s)</a>"""
message_data = {
"url": doc.get_absolute_url(),
"key": doc.document_key,
"title": doc.title,
}
notify(self.request.user, _(message_text) % message_data)
activity_log.send(
verb="created",
target=None,
action_object=doc,
sender=None,
actor=self.request.user,
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
"""Redirect to a different URL given the button clicked by the user."""
if "save-create" in self.request.POST:
url = reverse(
"document_create",
args=[self.kwargs["organisation"], self.kwargs["category"]],
)
else:
url = reverse(
"category_document_list",
args=[self.kwargs["organisation"], self.kwargs["category"]],
)
return url
class DocumentEdit(BaseDocumentFormView):
"""Edit a document and a selected revision."""
permission_required = "documents.change_document"
context_object_name = "document"
template_name = "documents/document_form.html"
# We don't subclass UpdateView because there is too much to rewrite
# since we manage two forms at a time.
def get(self, request, *args, **kwargs):
self.object = self.get_object()
self.revision = self.get_revision()
return super(DocumentEdit, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.revision = self.get_revision()
return super(DocumentEdit, self).post(request, *args, **kwargs)
def form_valid(self, document_form, revision_form):
response = super(DocumentEdit, self).form_valid(document_form, revision_form)
activity_log.send(
verb=Activity.VERB_EDITED,
action_object=self.revision,
target=self.object.document,
sender=None,
actor=self.request.user,
)
return response
def get_context_data(self, **kwargs):
context = super(DocumentEdit, self).get_context_data(**kwargs)
# Add a context var to make the difference with creation view
context.update(
{
"is_edit": True,
"revision": self.revision,
}
)
return context
def get_success_url(self):
"""Redirect to a different URL given the button clicked by the user."""
if "save-view" in self.request.POST:
url = self.object.get_absolute_url()
else:
url = reverse(
"category_document_list",
args=[
self.kwargs["organisation"],
self.kwargs["category"],
],
)
return url
class DocumentDelete(
LoginRequiredMixin, PermissionRequiredMixin, DocumentListMixin, DeleteView
):
"""Delete a document and its revisions."""
permission_required = "documents.delete_document"
raise_exception = True
http_method_names = ["post"]
def delete(self, request, *args, **kwargs):
"""Delete the document and associated data.
We need to delete the top level document object. Thus, metadata and
revisions will also be deleted.
"""
document = self.object.document
document_str = str(document)
success_url = self.get_success_url()
document.delete()
activity_log.send(
verb=Activity.VERB_DELETED,
target=None,
action_object_str=document_str,
sender=None,
actor=self.request.user,
)
return HttpResponseRedirect(success_url)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.latest_revision.is_under_review():
return HttpResponseForbidden("Documents under review cannot be deleted")
return self.delete(request, *args, **kwargs)
def get_success_url(self):
return self.category.get_absolute_url()
class DocumentRevisionDelete(DocumentDelete):
"""Delete only the latest document revision."""
def delete(self, request, *args, **kwargs):
all_revisions = list(self.object.get_all_revisions())
if len(all_revisions) <= 1:
return HttpResponseForbidden("Cannot delete a single latest revision")
latest_revision = all_revisions[0]
previous_revision = all_revisions[1]
latest_revision_str = str(latest_revision)
self.object.latest_revision = previous_revision
self.object.save()
self.object.document.current_revision = previous_revision.revision
self.object.document.current_revision_date = previous_revision.revision_date
self.object.document.updated_on = timezone.now()
self.object.document.save()
latest_revision.delete()
activity_log.send(
verb=Activity.VERB_DELETED,
action_object_str=latest_revision_str,
target=self.object.document,
sender=self.__class__,
actor=self.request.user,
)
success_url = self.get_success_url()
return HttpResponseRedirect(success_url)
def get_success_url(self):
return self.object.get_absolute_url()
class DocumentRevise(DocumentEdit):
"""Creates a new revision for the document."""
def get(self, *args, **kwargs):
doc = self.get_object()
revision = doc.latest_revision
if revision.is_under_review():
return HttpResponseForbidden("You cannot revise a document during review")
return super(DocumentRevise, self).get(*args, **kwargs)
def get_revision(self):
"""returns an empty revision, since we are creating a new one."""
return None
def get_forms(self):
"""Returns both the document and revision forms.
We went the revision fields to be blank, so we need to get rid of
default values.
We also want to keep the previous' revision distribution list.
"""
document_form, revision_form = super(DocumentRevise, self).get_forms()
latest_revision = self.object.latest_revision
initial = latest_revision.get_new_revision_initial(revision_form)
revision_form.initial = initial
return document_form, revision_form
def form_valid(self, document_form, revision_form):
"""Saves both the document and it's revision."""
document, self.object, self.revision = save_document_forms(
document_form, revision_form, self.category
)
message_text = """You created revision %(rev)s for document
<a href="%(url)s">%(key)s (%(title)s)</a>"""
message_data = {
"rev": self.revision.name,
"url": self.object.get_absolute_url(),
"key": self.object.document_key,
"title": self.object.title,
}
notify(self.request.user, _(message_text) % message_data)
activity_log.send(
verb=Activity.VERB_CREATED,
target=self.revision,
sender=None,
actor=self.request.user,
)
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
"""Add a context var to make the difference with creation view"""
next_revision = self.object.document.current_revision + 1
context = super(DocumentRevise, self).get_context_data(**kwargs)
context.update(
{"is_revise": True, "next_revision": "{:02d}".format(next_revision)}
)
return context
class DocumentDownload(BaseDocumentList):
def post(self, request, *args, **kwargs):
_class = self.category.document_class()
form_data = self.request.POST
qs = Document.objects.filter(category=self.category)
form = _class.get_document_download_form(form_data, queryset=qs)
if form.is_valid():
data = form.cleaned_data
else:
raise Http404("Invalid parameters to download files.")
# Generates the temporary zip file
zip_filename = _class.compress_documents(data["document_ids"], **data)
file_size = zip_filename.tell()
zip_filename.seek(0)
wrapper = FileWrapper(zip_filename)
# Returns the zip file for download
response = HttpResponse(wrapper, content_type="application/zip")
response["Content-Disposition"] = "attachment; filename=download.zip"
response["Content-Length"] = file_size
return response
class BaseFileDownload(LoginRequiredMixin, CategoryMixin, DetailView):
"""Base class to download files from a Metadata or
MetadataRevision FileField."""
http_method_names = ["get"]
def get_object(self, queryset=None):
"""Get a single MetadataRevision FileField instance."""
qs = self.get_queryset()
doc_or_revision = get_object_or_404(qs)
return doc_or_revision
def get(self, request, *args, **kwargs):
"""Get a single MetadataRevision FileField instance."""
doc_or_revision = self.get_object()
field_name = self.kwargs.get("field_name")
return serve_model_file_field(doc_or_revision, field_name)
class RevisionFileDownload(BaseFileDownload):
"""Download files from a MetadataRevision FileField."""
def get_queryset(self):
key = self.kwargs.get("document_key")
revision = self.kwargs.get("revision")
qs_kwargs = {
"metadata__document__document_key": key,
"metadata__document__category": self.category,
"revision": revision,
}
return self.category.revision_class().objects.filter(**qs_kwargs)
class DocumentFileDownload(BaseFileDownload):
"""Download files from a Metadata FileField."""
def get_queryset(self):
key = self.kwargs.get("document_key")
qs_kwargs = {"document__document_key": key, "document__category": self.category}
return self.category.document_class().objects.filter(**qs_kwargs)
| 34.159824 | 88 | 0.652359 |
329a24272de4651b6ef89b070347bfd2670cd98a | 768 | py | Python | dvc/repo/metrics/diff.py | mtl-ai/dvc | e675698a8d3979b8791699ade8c0d7a6d5c04818 | [
"Apache-2.0"
] | null | null | null | dvc/repo/metrics/diff.py | mtl-ai/dvc | e675698a8d3979b8791699ade8c0d7a6d5c04818 | [
"Apache-2.0"
] | null | null | null | dvc/repo/metrics/diff.py | mtl-ai/dvc | e675698a8d3979b8791699ade8c0d7a6d5c04818 | [
"Apache-2.0"
] | null | null | null | from dvc.exceptions import NoMetricsError
from dvc.utils.diff import diff as _diff
from dvc.utils.diff import format_dict
def _get_metrics(repo, *args, revs=None, **kwargs):
try:
metrics = repo.metrics.show(*args, **kwargs, revs=revs)
return metrics
except NoMetricsError:
return {}
def diff(repo, *args, a_rev=None, b_rev=None, **kwargs):
if repo.scm.no_commits:
return {}
with_unchanged = kwargs.pop("all", False)
a_rev = a_rev or "HEAD"
b_rev = b_rev or "workspace"
metrics = _get_metrics(repo, *args, **kwargs, revs=[a_rev, b_rev])
old = metrics.get(a_rev, {})
new = metrics.get(b_rev, {})
return _diff(
format_dict(old), format_dict(new), with_unchanged=with_unchanged
)
| 25.6 | 73 | 0.65625 |
34041610d180ea861f8bcd573af920be19bd4f4e | 1,258 | py | Python | fiwareclient/lib/parser.py | YujiAzama/python-fiwareclient | 7d19034d832a1148abc6022c6e7687a52b74eef4 | [
"Apache-2.0"
] | null | null | null | fiwareclient/lib/parser.py | YujiAzama/python-fiwareclient | 7d19034d832a1148abc6022c6e7687a52b74eef4 | [
"Apache-2.0"
] | null | null | null | fiwareclient/lib/parser.py | YujiAzama/python-fiwareclient | 7d19034d832a1148abc6022c6e7687a52b74eef4 | [
"Apache-2.0"
] | null | null | null | from fiwareclient.orion.model.attribute import Attribute
from fiwareclient.orion.model.metadata import Metadata
class Parser(object):
def dict_to_attribute(self, dict_attr):
attr_name = list(dict_attr.keys())[0]
attr_type = dict_attr[attr_name]["type"]
attr_value = dict_attr[attr_name]["value"]
metadatas = []
if dict_attr[attr_name].get("metadata"):
for metadata in dict_attr[attr_name]["metadata"].keys():
metadatas.append(
Metadata(metadata,
dict_attr[attr_name]["metadata"][metadata]["type"],
dict_attr[attr_name]["metadata"][metadata]["value"]))
attr = Attribute(attr_name, attr_type, attr_value, metadatas)
return attr
def dict_to_metadata(self):
pass
if __name__ == "__main__":
dict_attr = {
"temperture": {
"value": "25",
"type": "Number",
"metadata": {
"timestamp": {
"value": "2018",
"type": "Date"
}
}
}
}
parser = Parser()
attribute = parser.dict_to_attribute(dict_attr)
print(attribute.json())
| 29.255814 | 82 | 0.537361 |
8d18a940b9d113562ff6e3cecf93d563f4885f0c | 404 | py | Python | fxwebgen.py | tiliado/fxwebgen | 5d1c5120b27fc13b6b45ee4e0017771271c3c3e0 | [
"BSD-2-Clause"
] | null | null | null | fxwebgen.py | tiliado/fxwebgen | 5d1c5120b27fc13b6b45ee4e0017771271c3c3e0 | [
"BSD-2-Clause"
] | 13 | 2018-08-06T15:25:50.000Z | 2019-04-14T14:09:22.000Z | fxwebgen.py | tiliado/fxwebgen | 5d1c5120b27fc13b6b45ee4e0017771271c3c3e0 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3.6
# Copyright 2018 Jiří Janoušek <janousek.jiri@gmail.com>
# Licensed under BSD-2-Clause license - see file LICENSE for details.
if __name__ == '__main__':
import os
import sys
fxwebgen_dir = os.path.abspath('fxwebgen')
if os.path.isfile(os.path.join(fxwebgen_dir, '__init__.py')):
sys.path.append(fxwebgen_dir)
from fxwebgen.main import run
run()
| 31.076923 | 69 | 0.700495 |
38b83eabdbd196d94ad5dafe402ba76506e2a067 | 3,197 | py | Python | cmc/modules/exchange/spot.py | Devansh3712/cmc-py | e3f9687914d92cd95bd5a7c04e6103345ba43a3d | [
"MIT"
] | 2 | 2022-02-19T15:51:22.000Z | 2022-02-20T18:26:14.000Z | cmc/modules/exchange/spot.py | Devansh3712/py-cmc | e3f9687914d92cd95bd5a7c04e6103345ba43a3d | [
"MIT"
] | 6 | 2022-02-21T10:50:43.000Z | 2022-03-03T15:44:09.000Z | cmc/modules/exchange/spot.py | Devansh3712/py-cmc | e3f9687914d92cd95bd5a7c04e6103345ba43a3d | [
"MIT"
] | 2 | 2022-02-20T01:43:35.000Z | 2022-03-13T09:34:51.000Z | #!/usr/bin/env python
"""Module for fetching spot exchange rankings from CoinMarketCap
website."""
from datetime import datetime
import os
import time
from typing import Any, Dict, List, Optional, Tuple, Union
import bs4
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from cmc.modules.base import CMCBaseClass
from cmc.utils.exceptions import ScrapeError
from cmc.utils.models import SpotData
class Spot(CMCBaseClass):
"""Class for scraping the data of top spot exchanges."""
def __init__(self, proxy: Optional[str] = None, as_dict: bool = False) -> None:
"""
Args:
proxy (Optional[str], optional): Proxy to be used for Selenium and requests Session. Defaults to None.
as_dict (bool): Return the data as a dictionary. Defaults to False.
"""
super().__init__(proxy)
self.base_url = "https://coinmarketcap.com/rankings/exchanges/"
self.out = as_dict
@property
def __get_page_data(self) -> bs4.BeautifulSoup:
"""Scrape the table from top spot exchanges page data
and return the scraped data.
Raises:
ScrapeError: Raised when data cannot be scraped from the webpage.
Returns:
bs4.BeautifulSoup: Scraped page data.
"""
driver = webdriver.Chrome(
service=self.service,
options=self.driver_options,
service_log_path=os.devnull,
)
try:
driver.get(self.base_url)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(1)
result = driver.find_element(
By.XPATH,
'//*[@id="__next"]/div/div[1]/div[2]/div/div/div[2]/table/tbody',
)
page_data = result.get_attribute("innerHTML")
driver.quit()
soup = BeautifulSoup(page_data, features="lxml")
return soup
except:
raise ScrapeError
@property
def get_data(self) -> Union[Dict[int, Dict[str, Any]], Dict[int, SpotData]]:
"""Scrape exchanges names and ranks from data returned by
__get_page_data() method.
Returns:
Union[Dict[int, Dict[str, Any]], Dict[int, SpotData]]: Exchange platform rankings.
"""
spot: Dict[int, Any] = {}
page_data = self.__get_page_data
data = page_data.find_all("tr")
for rank, content in enumerate(data):
td = content.find_all("td")[1]
try:
name: str = td.find("p", class_="sc-1eb5slv-0 iworPT").text
except:
name: str = td.text # type: ignore
cmc_link: str = td.find("a", class_="cmc-link")["href"]
result = {
"name": name,
"cmc_link": cmc_link,
"cmc_name": cmc_link.split("/")[-2],
"url": self.cmc_url + cmc_link,
"timestamp": datetime.now(),
}
if self.out:
spot[rank + 1] = result
else:
spot[rank + 1] = SpotData(**result)
return spot
| 34.376344 | 114 | 0.580231 |
36287f4bb514473c467a906e885611d0efcff110 | 3,709 | py | Python | app.py | Azka-Gilani/webservices1.3 | 272928f351d9f68ac45df22654e7cc9a210b2c9f | [
"Apache-2.0"
] | null | null | null | app.py | Azka-Gilani/webservices1.3 | 272928f351d9f68ac45df22654e7cc9a210b2c9f | [
"Apache-2.0"
] | null | null | null | app.py | Azka-Gilani/webservices1.3 | 272928f351d9f68ac45df22654e7cc9a210b2c9f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
city_names=processlocation(req)
sector_names=processSector(req)
baseurl = "https://fazendanatureza.com/bot/botarz.php?city_name="+city_names+"§or_name="+sector_names
result = urllib.urlopen(baseurl).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def processlocation(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("city")
return city
def processSector(req):
result = req.get("result")
parameters = result.get("parameters")
sector = parameters.get("Location")
return sector
def makeWebhookResult(data):
row1_id=data[0]['p_id']
row1_title = data[0]['title']
row1_location=data[0]['address']
row1_price = data[0]['price']
if row1_title is None:
return {}
row2_id=data[1]['p_id']
row2_title = data[1]['title']
row2_location=data[1]['address']
row2_price = data[1]['price']
# print(json.dumps(item, indent=4))
speech = "This is the response from server."+ row1_title +" "+row2_title
print("Response:")
print(speech)
message= {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [{
"title": row1_title,
"subtitle": row1_location,
"item_url": "http://aarz.pk/search?purpose=Sell&postedby=homepage&property_type=&locAreaOrKeyword="+row1_location,
"image_url": "http://www.aarz.pk/assets/images/properties/"+row1_id+"/"+row1_id+".actual.1.jpg" ,
"buttons": [{
"type": "web_url",
"url": "www.aarz.pk",
"title": "Open Web URL"
},
{
"type": "postback",
"title": "Call Postback",
"payload": "Payload for first bubble",
}],
},
{
"title": row2_title,
"subtitle": row2_location,
"item_url": "http://aarz.pk/search?purpose=Sell&postedby=homepage&property_type=&locAreaOrKeyword="+row2_location,
"image_url": "http://www.aarz.pk/assets/images/properties/"+row2_id+"/"+row2_id+".actual.1.jpg",
"buttons": [{
"type": "web_url",
"url": "www.aarz.pk",
"title": "Open Web URL"
},
{
"type": "postback",
"title": "Call Postback",
"payload": "Payload for second bubble",
}]
}]
}
}
}
return {
"speech": speech,
"displayText": speech,
"data": {"facebook": message},
# "contextOut": [],
#"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=False, port=port, host='0.0.0.0')
| 29.204724 | 146 | 0.551631 |
02f457eebff5ba16720bac29a2ddab6ee261b371 | 3,026 | py | Python | flask-app/application/worker.py | filak/MTW-MeSH | b4bc525b01eaefadf991304f725dd4b51c11f50e | [
"MIT"
] | 1 | 2019-10-25T09:38:39.000Z | 2019-10-25T09:38:39.000Z | flask-app/application/worker.py | filak/MTW-MeSH | b4bc525b01eaefadf991304f725dd4b51c11f50e | [
"MIT"
] | 13 | 2019-10-16T09:33:37.000Z | 2022-03-22T12:51:28.000Z | flask-app/application/worker.py | filak/MTW-MeSH | b4bc525b01eaefadf991304f725dd4b51c11f50e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
MeSH Traslation Workflow (MTW) background worker - Flask app factory
"""
import logging
from flask import Flask, abort, request
from application import utils as mtu
def create_app(debug=False, logger=None,
config_path='conf/mtw.ini',
static_url_path='/assets-mtw'):
app = Flask(__name__, instance_relative_config=True, static_url_path=static_url_path)
app.debug = debug
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
if logger:
app.logger = logger
file_handler = logging.FileHandler(mtu.get_instance_dir(app, 'logs/mtw_worker.log'))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s '))
app.logger.addHandler(file_handler)
else:
file_handler = logging.FileHandler(mtu.get_instance_dir(app, 'logs/mtw_worker_debug.log'))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s '))
app.logger.addHandler(file_handler)
app.config.update(dict(
APP_NAME = 'MTW Worker',
APP_VER = '0.1.7',
API_VER = '1.0.0',
TEMP_DIR = mtu.get_instance_dir(app, 'temp'),
local_config_file = mtu.get_instance_dir(app, config_path)
))
localConfig = mtu.getConfig(app.config['local_config_file'])
if localConfig:
with app.app_context():
d = mtu.getLocalConfValue(localConfig)
app.config.update(d)
else:
error = 'Error reading local config file: ' + app.config['local_config_file']
app.logger.error(error)
abort(500)
@app.route('/')
def hello_world():
return 'MTW worker'
@app.route('/refresh_stats/get:<stat>', methods=['GET','POST'])
def refresh_stats(stat):
if stat in ['initial','actual','all','duplicates','lookups','lookups_rest']:
app.logger.info('Stats gen started ...')
mtu.refreshStats(stat)
app.logger.info('Stats gen finished ...')
return 'OK'
else:
return 'ERROR'
@app.route('/export_data/get:<export>', methods=['GET','POST'])
def export_data(export):
if export in ['umls','umls_all','js_all','js_parsers','js_elastic','xml_desc','xml_qualif','marc']:
app.logger.info('Export '+ export +' started ...')
if export in ['umls','umls_all']:
mtu.exportData(export)
else:
if request.method == 'POST':
if request.json:
if request.json.get(export):
mtu.exportLookup(export, params=request.json.get(export))
else:
mtu.exportLookup(export)
app.logger.info('Export '+ export +' finished ...')
return 'OK'
else:
return 'ERROR'
return app
| 31.852632 | 107 | 0.594514 |
8e02b8a24bca9d7e5ae6b5aec52acd1decb5b904 | 8,833 | py | Python | salt/modules/hipchat.py | Rafflecopter/salt | 08bbfcd4d9b93351d7d5d25b097e892026b6f1cd | [
"Apache-2.0"
] | null | null | null | salt/modules/hipchat.py | Rafflecopter/salt | 08bbfcd4d9b93351d7d5d25b097e892026b6f1cd | [
"Apache-2.0"
] | null | null | null | salt/modules/hipchat.py | Rafflecopter/salt | 08bbfcd4d9b93351d7d5d25b097e892026b6f1cd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Module for sending messages to hipchat.
.. versionadded:: 2015.5.0
:configuration: This module can be used by either passing an api key and version
directly or by specifying both in a configuration profile in the salt
master/minion config.
For example:
.. code-block:: yaml
hipchat:
api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15
api_version: v1
'''
# Import Python Libs
from __future__ import absolute_import
import json
import logging
# Import 3rd-party Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext.six.moves.urllib.parse import urljoin as _urljoin
from salt.ext.six.moves.urllib.parse import urlencode as _urlencode
from salt.ext.six.moves import range
import salt.ext.six.moves.http_client
import salt.utils.http
# pylint: enable=import-error,no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
__virtualname__ = 'hipchat'
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
def _query(function,
api_key=None,
api_version=None,
room_id=None,
method='GET',
data=None):
'''
HipChat object method function to construct and execute on the API URL.
:param api_key: The HipChat api key.
:param function: The HipChat api function to perform.
:param api_version: The HipChat api version (v1 or v2).
:param method: The HTTP method, e.g. GET or POST.
:param data: The data to be sent for POST method.
:return: The json response from the API call or False.
'''
headers = {}
query_params = {}
if not api_key or not api_version:
try:
options = __salt__['config.option']('hipchat')
if not api_key:
api_key = options.get('api_key')
if not api_version:
api_version = options.get('api_version')
except (NameError, KeyError, AttributeError):
log.error("No HipChat api key or version found.")
return False
if room_id:
room_id = 'room/{0}/notification'.format(str(room_id))
else:
room_id = 'room/0/notification'
hipchat_functions = {
'v1': {
'rooms': {
'request': 'rooms/list',
'response': 'rooms',
},
'users': {
'request': 'users/list',
'response': 'users',
},
'message': {
'request': 'rooms/message',
'response': 'status',
},
},
'v2': {
'rooms': {
'request': 'room',
'response': 'items',
},
'users': {
'request': 'user',
'response': 'items',
},
'message': {
'request': room_id,
'response': None,
},
},
}
api_url = 'https://api.hipchat.com'
base_url = _urljoin(api_url, api_version + '/')
path = hipchat_functions.get(api_version).get(function).get('request')
url = _urljoin(base_url, path, False)
if api_version == 'v1':
query_params['format'] = 'json'
query_params['auth_token'] = api_key
if method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded'
if data:
if data.get('notify', None):
data['notify'] = 1
data = _urlencode(data)
elif api_version == 'v2':
headers['Authorization'] = 'Bearer {0}'.format(api_key)
if data:
data = json.dumps(data)
else:
log.error('Unsupported HipChat API version')
return False
result = salt.utils.http.query(
url,
method,
params=query_params,
data=data,
decode=True,
status=True,
header_dict=headers,
opts=__opts__,
)
if result.get('status', None) == salt.ext.six.moves.http_client.OK:
response = hipchat_functions.get(api_version).get(function).get('response')
return result.get('dict', {}).get(response, None)
elif result.get('status', None) == salt.ext.six.moves.http_client.NO_CONTENT:
return False
else:
log.debug(url)
log.debug(query_params)
log.debug(data)
log.debug(result)
if result.get('error'):
log.error(result)
return False
def list_rooms(api_key=None, api_version=None):
'''
List all HipChat rooms.
:param api_key: The HipChat admin api key.
:param api_version: The HipChat api version, if not specified in the configuration.
:return: The room list.
CLI Example:
.. code-block:: bash
salt '*' hipchat.list_rooms
salt '*' hipchat.list_rooms api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=v1
'''
foo = _query(function='rooms', api_key=api_key, api_version=api_version)
log.debug('foo {0}'.format(foo))
return foo
def list_users(api_key=None, api_version=None):
'''
List all HipChat users.
:param api_key: The HipChat admin api key.
:param api_version: The HipChat api version, if not specified in the configuration.
:return: The user list.
CLI Example:
.. code-block:: bash
salt '*' hipchat.list_users
salt '*' hipchat.list_users api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=v1
'''
return _query(function='users', api_key=api_key, api_version=api_version)
def find_room(name, api_key=None, api_version=None):
'''
Find a room by name and return it.
:param name: The room name.
:param api_key: The HipChat admin api key.
:param api_version: The HipChat api version, if not specified in the configuration.
:return: The room object.
CLI Example:
.. code-block:: bash
salt '*' hipchat.find_room name="Development Room"
salt '*' hipchat.find_room name="Development Room" api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=v1
'''
rooms = list_rooms(api_key=api_key, api_version=api_version)
if rooms:
for x in range(0, len(rooms)):
if rooms[x]['name'] == name:
return rooms[x]
return False
def find_user(name, api_key=None, api_version=None):
'''
Find a user by name and return it.
:param name: The user name.
:param api_key: The HipChat admin api key.
:param api_version: The HipChat api version, if not specified in the configuration.
:return: The user object.
CLI Example:
.. code-block:: bash
salt '*' hipchat.find_user name="Thomas Hatch"
salt '*' hipchat.find_user name="Thomas Hatch" api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 api_version=v1
'''
users = list_users(api_key=api_key, api_version=api_version)
if users:
for x in range(0, len(users)):
if users[x]['name'] == name:
return users[x]
return False
def send_message(room_id,
message,
from_name,
api_key=None,
api_version=None,
color='yellow',
notify=False):
'''
Send a message to a HipChat room.
:param room_id: The room id or room name, either will work.
:param message: The message to send to the HipChat room.
:param from_name: Specify who the message is from.
:param api_key: The HipChat api key, if not specified in the configuration.
:param api_version: The HipChat api version, if not specified in the configuration.
:param color: The color for the message, default: yellow.
:param notify: Whether to notify the room, default: False.
:return: Boolean if message was sent successfully.
CLI Example:
.. code-block:: bash
salt '*' hipchat.send_message room_id="Development Room" message="Build is done" from_name="Build Server"
salt '*' hipchat.send_message room_id="Development Room" message="Build failed" from_name="Build Server" color="red" notify=True
'''
parameters = dict()
parameters['room_id'] = room_id
parameters['from'] = from_name[:15]
parameters['message'] = message[:10000]
parameters['message_format'] = 'text'
parameters['color'] = color
parameters['notify'] = notify
result = _query(function='message',
api_key=api_key,
api_version=api_version,
room_id=room_id,
method='POST',
data=parameters)
if result:
return True
else:
return False
| 29.64094 | 136 | 0.598551 |
70aa9877b15d3750bb9b4c800ed2402b5439fb60 | 11,719 | py | Python | alphapose/utils/detector.py | 18761095968/AlphaPose | 2370191beb87848e87c83cf704a24b6e9a3a1e4a | [
"Apache-2.0"
] | 2 | 2021-06-11T08:15:18.000Z | 2021-07-04T08:55:33.000Z | alphapose/utils/detector.py | 18761095968/AlphaPose | 2370191beb87848e87c83cf704a24b6e9a3a1e4a | [
"Apache-2.0"
] | null | null | null | alphapose/utils/detector.py | 18761095968/AlphaPose | 2370191beb87848e87c83cf704a24b6e9a3a1e4a | [
"Apache-2.0"
] | 4 | 2021-07-03T15:04:18.000Z | 2021-07-04T09:08:42.000Z | import os
import sys
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.presets import SimpleTransform
from alphapose.models import builder
class DetectionLoader():
def __init__(self, input_source, detector, cfg, opt, mode='image', batchSize=1, queueSize=128):
self.cfg = cfg
self.opt = opt
self.mode = mode
self.device = opt.device
if mode == 'image':
self.img_dir = opt.inputpath
self.imglist = [os.path.join(self.img_dir, im_name.rstrip('\n').rstrip('\r')) for im_name in input_source]
self.datalen = len(input_source)
elif mode == 'video':
stream = cv2.VideoCapture(input_source)
assert stream.isOpened(), 'Cannot capture source'
self.path = input_source
self.datalen = int(stream.get(cv2.CAP_PROP_FRAME_COUNT))
self.fourcc = int(stream.get(cv2.CAP_PROP_FOURCC))
self.fps = stream.get(cv2.CAP_PROP_FPS)
self.frameSize = (int(stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.videoinfo = {'fourcc': self.fourcc, 'fps': self.fps, 'frameSize': self.frameSize}
stream.release()
self.detector = detector
self.batchSize = batchSize
leftover = 0
if (self.datalen) % batchSize:
leftover = 1
self.num_batches = self.datalen // batchSize + leftover
self._input_size = cfg.DATA_PRESET.IMAGE_SIZE
self._output_size = cfg.DATA_PRESET.HEATMAP_SIZE
self._sigma = cfg.DATA_PRESET.SIGMA
pose_dataset = builder.retrieve_dataset(self.cfg.DATASET.TRAIN)
if cfg.DATA_PRESET.TYPE == 'simple':
self.transformation = SimpleTransform(
pose_dataset, scale_factor=0,
input_size=self._input_size,
output_size=self._output_size,
rot=0, sigma=self._sigma,
train=False, add_dpg=False, gpu_device=self.device)
# initialize the queue used to store data
"""
image_queue: the buffer storing pre-processed images for object detection
det_queue: the buffer storing human detection results
pose_queue: the buffer storing post-processed cropped human image for pose estimation
"""
if opt.sp:
self._stopped = False
self.image_queue = Queue(maxsize=queueSize)
self.det_queue = Queue(maxsize=10 * queueSize)
self.pose_queue = Queue(maxsize=10 * queueSize)
else:
self._stopped = mp.Value('b', False)
self.image_queue = mp.Queue(maxsize=queueSize)
self.det_queue = mp.Queue(maxsize=10 * queueSize)
self.pose_queue = mp.Queue(maxsize=10 * queueSize)
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to pre process images for object detection
if self.mode == 'image':
image_preprocess_worker = self.start_worker(self.image_preprocess)
elif self.mode == 'video':
image_preprocess_worker = self.start_worker(self.frame_preprocess)
# start a thread to detect human in images
image_detection_worker = self.start_worker(self.image_detection)
# start a thread to post process cropped human image for pose estimation
image_postprocess_worker = self.start_worker(self.image_postprocess)
return [image_preprocess_worker, image_detection_worker, image_postprocess_worker]
def stop(self):
# clear queues
self.clear_queues()
def terminate(self):
if self.opt.sp:
self._stopped = True
else:
self._stopped.value = True
self.stop()
def clear_queues(self):
self.clear(self.image_queue)
self.clear(self.det_queue)
self.clear(self.pose_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def image_preprocess(self):
for i in range(self.num_batches):
imgs = []
orig_imgs = []
im_names = []
im_dim_list = []
for k in range(i * self.batchSize, min((i + 1) * self.batchSize, self.datalen)):
if self.stopped:
self.wait_and_put(self.image_queue, (None, None, None, None))
return
im_name_k = self.imglist[k]
# expected image shape like (1,3,h,w) or (3,h,w)
img_k = self.detector.image_preprocess(im_name_k)
if isinstance(img_k, np.ndarray):
img_k = torch.from_numpy(img_k)
# add one dimension at the front for batch if image shape (3,h,w)
if img_k.dim() == 3:
img_k = img_k.unsqueeze(0)#加上一个维度
orig_img_k = cv2.cvtColor(cv2.imread(im_name_k), cv2.COLOR_BGR2RGB) # scipy.misc.imread(im_name_k, mode='RGB') is depreciated
im_dim_list_k = orig_img_k.shape[1], orig_img_k.shape[0]#读取矩阵的长度
imgs.append(img_k)#追加对象
orig_imgs.append(orig_img_k)
im_names.append(os.path.basename(im_name_k))
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
imgs = torch.cat(imgs)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
# im_dim_list_ = im_dim_list
self.wait_and_put(self.image_queue, (imgs, orig_imgs, im_names, im_dim_list))
def frame_preprocess(self):
stream = cv2.VideoCapture(self.path)
assert stream.isOpened(), 'Cannot capture source'
for i in range(self.num_batches):
imgs = []
orig_imgs = []
im_names = []
im_dim_list = []
for k in range(i * self.batchSize, min((i + 1) * self.batchSize, self.datalen)):
(grabbed, frame) = stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed or self.stopped:
# put the rest pre-processed data to the queue
if len(imgs) > 0:
with torch.no_grad():
# Record original image resolution
imgs = torch.cat(imgs)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
self.wait_and_put(self.image_queue, (imgs, orig_imgs, im_names, im_dim_list))
self.wait_and_put(self.image_queue, (None, None, None, None))
print('===========================> This video get ' + str(k) + ' frames in total.')
sys.stdout.flush()
stream.release()
return
# expected frame shape like (1,3,h,w) or (3,h,w)
img_k = self.detector.image_preprocess(frame)
if isinstance(img_k, np.ndarray):
img_k = torch.from_numpy(img_k)
# add one dimension at the front for batch if image shape (3,h,w)
if img_k.dim() == 3:
img_k = img_k.unsqueeze(0)
im_dim_list_k = frame.shape[1], frame.shape[0]
imgs.append(img_k)
orig_imgs.append(frame[:, :, ::-1])
im_names.append(str(k) + '.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Record original image resolution
imgs = torch.cat(imgs)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
# im_dim_list_ = im_dim_list
self.wait_and_put(self.image_queue, (imgs, orig_imgs, im_names, im_dim_list))
stream.release()
def image_detection(self):
for i in range(self.num_batches):
imgs, orig_imgs, im_names, im_dim_list = self.wait_and_get(self.image_queue)
if imgs is None or self.stopped:
self.wait_and_put(self.det_queue, (None, None, None, None, None, None, None))
return
with torch.no_grad():
# pad useless images to fill a batch, else there will be a bug
for pad_i in range(self.batchSize - len(imgs)):
imgs = torch.cat((imgs, torch.unsqueeze(imgs[0], dim=0)), 0)
im_dim_list = torch.cat((im_dim_list, torch.unsqueeze(im_dim_list[0], dim=0)), 0)
dets = self.detector.images_detection(imgs, im_dim_list)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_imgs)):
self.wait_and_put(self.det_queue, (orig_imgs[k], im_names[k], None, None, None, None, None))
continue
if isinstance(dets, np.ndarray):
dets = torch.from_numpy(dets)
dets = dets.cpu()
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
if self.opt.tracking:
ids = dets[:, 6:7]
else:
ids = torch.zeros(scores.shape)
for k in range(len(orig_imgs)):
boxes_k = boxes[dets[:, 0] == k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
self.wait_and_put(self.det_queue, (orig_imgs[k], im_names[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, *self._input_size)
cropped_boxes = torch.zeros(boxes_k.size(0), 4)
self.wait_and_put(self.det_queue, (orig_imgs[k], im_names[k], boxes_k, scores[dets[:, 0] == k], ids[dets[:, 0] == k], inps, cropped_boxes))
def image_postprocess(self):
for i in range(self.datalen):
with torch.no_grad():
(orig_img, im_name, boxes, scores, ids, inps, cropped_boxes) = self.wait_and_get(self.det_queue)
if orig_img is None or self.stopped:
self.wait_and_put(self.pose_queue, (None, None, None, None, None, None, None))
return
if boxes is None or boxes.nelement() == 0:
self.wait_and_put(self.pose_queue, (None, orig_img, im_name, boxes, scores, ids, None))
continue
# imght = orig_img.shape[0]
# imgwidth = orig_img.shape[1]
for i, box in enumerate(boxes):
inps[i], cropped_box = self.transformation.test_transform(orig_img, box)
cropped_boxes[i] = torch.FloatTensor(cropped_box)
# inps, cropped_boxes = self.transformation.align_transform(orig_img, boxes)
self.wait_and_put(self.pose_queue, (inps, orig_img, im_name, boxes, scores, ids, cropped_boxes))
def read(self):
return self.wait_and_get(self.pose_queue)
@property
def stopped(self):
if self.opt.sp:
return self._stopped
else:
return self._stopped.value
@property
def length(self):
return self.datalen
| 41.704626 | 155 | 0.567711 |
1c4208b2f7a2bd869e15e83d2bdd272601022302 | 29,953 | py | Python | testsSDW__copy/card_tests/shaman_tests.py | jomyhuang/sdwle | 9b6e916567e09c7cba4a171fe0adf0f47009a8c3 | [
"MIT"
] | null | null | null | testsSDW__copy/card_tests/shaman_tests.py | jomyhuang/sdwle | 9b6e916567e09c7cba4a171fe0adf0f47009a8c3 | [
"MIT"
] | null | null | null | testsSDW__copy/card_tests/shaman_tests.py | jomyhuang/sdwle | 9b6e916567e09c7cba4a171fe0adf0f47009a8c3 | [
"MIT"
] | null | null | null | import random
import unittest
from SDWLE.cards.spells.neutral import TheCoin
from testsSDW.agents.testing_agents import OneCardPlayingAgent, MinionAttackingAgent, CardTestingAgent, \
PlayAndAttackAgent
from testsSDW.testing_utils import generate_game_for
from SDWLE.cards import *
from SDWLE.constants import MINION_TYPE
from SDWLE.agents.basic_agents import PredictableAgent, DoNothingAgent
class TestShaman(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_AlAkirTheWindlord(self):
game = generate_game_for(AlAkirTheWindlord, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 15):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Al'Akir the Windlord", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertTrue(game.players[0].minions[0].charge())
self.assertTrue(game.players[0].minions[0].divine_shield)
self.assertTrue(game.players[0].minions[0].taunt)
def test_DustDevil(self):
game = generate_game_for(DustDevil, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Dust Devil", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
self.assertEqual(2, game.players[0].upcoming_overload)
game.play_single_turn()
# Overload should cause that we start this turn with 0 mana
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(0, game.players[0].upcoming_overload)
self.assertEqual(0, game.players[0].mana)
self.assertEqual(2, game.players[0].max_mana)
def test_EarthElemental(self):
game = generate_game_for(EarthElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
# Earth Elemental should be played
for turn in range(0, 9):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Earth Elemental", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual(3, game.players[0].upcoming_overload)
def test_FireElemental(self):
game = generate_game_for(FireElemental, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
# Fire Elemental should be played, and its battlecry dealing three damage to opponent
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Fire Elemental", game.players[0].minions[0].card.name)
self.assertEqual(27, game.players[1].hero.health)
def test_FlametongueTotem(self):
game = generate_game_for(StonetuskBoar, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
# There should be three Stonetusk Boars on the board
self.assertEqual(3, len(game.players[0].minions))
# add a new Flametongue Totem at index 1
totem = FlametongueTotem()
totem.summon(game.players[0], game, 1)
# The minions to either side should have their attack increased
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# When removing the minion at index 0, we should not get an error
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
self.assertEqual(3, len(game.players[0].minions))
# When removing the minion at index 1, we should have a new minion at index 1,
# and its attack should be increased
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# Silencing this minion should have no effect on its attack
game.players[0].minions[1].silence()
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
# We should be able to add a boar on either side of the wolf, and their attack should be increased
# The attack of the boar which used to be next to the wolf should decrease
boar = StonetuskBoar()
boar.summon(game.players[0], game, 0)
boar.summon(game.players[0], game, 2)
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
# Add a new boar on the left of the totem since we haven't tested that yet
boar.summon(game.players[0], game, 1)
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
game.players[0].minions[1].die(None)
game.players[0].minions[1].activate_delayed()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
# If the totem is silenced, then the boars to either side should no longer have increased attack
game.players[0].minions[1].silence()
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[3].calculate_attack())
def test_ManaTideTotem(self):
game = generate_game_for([ManaTideTotem, WarGolem], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(25, game.players[0].deck.left)
self.assertEqual(0, len(game.players[0].minions))
# Mana Tide Totem should be played, and we should draw a card at the end of turn
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Mana Tide Totem", game.players[0].minions[0].card.name)
self.assertEqual(23, game.players[0].deck.left)
game.play_single_turn()
# Silence, we should only draw one card next turn
game.players[0].minions[0].silence()
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(22, game.players[0].deck.left)
def test_UnboundElemental(self):
game = generate_game_for([UnboundElemental, DustDevil, DustDevil], StonetuskBoar, OneCardPlayingAgent,
DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Unbound Elemental", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(4, game.players[0].minions[0].calculate_max_health())
# One Dust Devil should be played, giving the Unbound Elemental +1/+1
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[-1].calculate_attack())
self.assertEqual(5, game.players[0].minions[-1].calculate_max_health())
# Test the silence
game.players[0].minions[-1].silence()
self.assertEqual(2, game.players[0].minions[-1].calculate_attack())
self.assertEqual(4, game.players[0].minions[-1].calculate_max_health())
# Another Dust Devil, nothing should happen because of silence
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[-1].calculate_attack())
self.assertEqual(4, game.players[0].minions[-1].calculate_max_health())
def test_Windspeaker(self):
game = generate_game_for([StonetuskBoar, Windspeaker], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stonetusk Boar", game.players[0].minions[0].card.name)
self.assertFalse(game.players[0].minions[0].windfury())
# Windspeaker should be played, giving the boar windfury
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual("Windspeaker", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[1].windfury())
def test_AncestralHealing(self):
game = generate_game_for([FlametongueTotem, AncestralHealing], StonetuskBoar,
OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Flametongue Totem", game.players[0].minions[0].card.name)
self.assertEqual(3, game.players[0].minions[0].health)
self.assertFalse(game.players[0].minions[0].taunt)
game.players[0].minions[0].health = 1
game.play_single_turn()
self.assertEqual(3, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].taunt)
def test_AncestralSpirit(self):
game = generate_game_for([ArgentCommander, AncestralSpirit], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 11):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Argent Commander", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].divine_shield)
game.play_single_turn()
# Ancestral Spirit should be played on the Argent Commander
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
game.players[0].minions[0].health = 1
game.players[0].minions[0].divine_shield = False
# Let the minion die in order to test Ancestral Spirit
commander = game.players[0].minions[0]
commander.die(None)
commander.activate_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Argent Commander", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].divine_shield)
def test_AncestralSpiritDeathrattle(self):
game = generate_game_for([LootHoarder, AncestralSpirit], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, len(game.players[0].hand))
loot = game.players[0].minions[0]
loot.die(None)
loot.activate_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, len(game.players[0].hand))
def test_Bloodlust(self):
game = generate_game_for([StonetuskBoar, StonetuskBoar, StonetuskBoar, StonetuskBoar, Bloodlust], StonetuskBoar,
MinionAttackingAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(20, game.players[1].hero.health)
# Bloodlust should be played, resulting in 4 * 4 = 16 damage
game.play_single_turn()
self.assertEqual(4, game.players[1].hero.health)
# Attack power should be back to normal
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
def test_EarthShock(self):
game = generate_game_for(EarthShock, ArgentSquire, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertTrue(game.players[1].minions[0].divine_shield)
# Earth Shock should be played, resulting in silence which removes the divine shield and then 1 damage
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
def test_FarSight(self):
game = generate_game_for(FarSight, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
# Far Sight should have been played, our latest card should cost 3 - 3 = 0
self.assertEqual(0, game.players[0].hand[-1].mana_cost())
self.assertEqual(3, game.players[0].hand[0].mana_cost())
# Draw a card to make sure the new card doesn't get the effect
game.players[0].draw()
self.assertEqual(3, game.players[0].hand[-1].mana_cost())
# Our old card shouldn't have been affected
self.assertEqual(0, game.players[0].hand[-2].mana_cost())
def test_FeralSpirit(self):
game = generate_game_for(FeralSpirit, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual("Spirit Wolf", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].minions[0].card.mana)
self.assertEqual(2, game.players[0].minions[1].calculate_attack())
self.assertEqual(3, game.players[0].minions[1].health)
self.assertTrue(game.players[0].minions[1].taunt)
self.assertEqual("Spirit Wolf", game.players[0].minions[1].card.name)
self.assertEqual(2, game.players[0].minions[1].card.mana)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_VitalityTotem(self):
game = generate_game_for(VitalityTotem, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
game.players[0].hero.health = 20
game.play_single_turn()
game.play_single_turn()
self.assertEqual(24, game.players[0].hero.health)
self.assertEqual(0, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
# player now has two vitality totems in play
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(2, len(game.players[0].minions))
def test_ForkedLightning(self):
game = generate_game_for(ForkedLightning, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
# Nothing should have happened yet, since the opponent haven't got 2 minions until now
self.assertEqual(2, len(game.players[1].minions))
# Forked Lightning should be played
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(2, game.players[0].upcoming_overload)
def test_FrostShock(self):
game = generate_game_for(FrostShock, StonetuskBoar, CardTestingAgent, DoNothingAgent)
# Frost Shock should be played
game.play_single_turn()
self.assertEqual(29, game.players[1].hero.health)
self.assertTrue(game.players[1].hero.frozen)
def test_Hex(self):
game = generate_game_for(ChillwindYeti, Hex, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertFalse(game.players[0].minions[0].taunt)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual("Chillwind Yeti", game.players[0].minions[0].card.name)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertTrue(game.players[0].minions[0].taunt)
self.assertEqual(0, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual("Frog", game.players[0].minions[0].card.name)
self.assertEqual(MINION_TYPE.BEAST, game.players[0].minions[0].card.minion_type)
def test_LavaBurst(self):
game = generate_game_for(LavaBurst, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(25, game.players[1].hero.health)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_LightningBolt(self):
game = generate_game_for(LightningBolt, StonetuskBoar, CardTestingAgent, DoNothingAgent)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_LightningStorm(self):
game = generate_game_for(LightningStorm, Shieldbearer, CardTestingAgent, PlayAndAttackAgent)
for turn in range(0, 4):
game.play_single_turn()
# Lightning Storm should be played
game.play_single_turn()
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].health)
self.assertEqual(2, game.players[1].minions[2].health)
self.assertEqual(2, game.players[0].upcoming_overload)
def test_RockbiterWeapon(self):
game = generate_game_for(RockbiterWeapon, Shieldbearer, PlayAndAttackAgent, DoNothingAgent)
self.assertEqual(30, game.players[1].hero.health)
# Rockbiter Weapon should be played and used
game.play_single_turn()
self.assertEqual(27, game.players[1].hero.health)
def test_RockbiterWeapon_and_Hex(self):
game = generate_game_for([IronfurGrizzly, RockbiterWeapon, Hex], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Frog", game.current_player.minions[0].card.name)
def test_RockbiterWeapon_and_BaronGeddon(self):
game = generate_game_for([BaronGeddon, RecklessRocketeer, RockbiterWeapon], StonetuskBoar,
PlayAndAttackAgent, DoNothingAgent)
for turn in range(15):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Baron Geddon", game.current_player.minions[0].card.name)
self.assertEqual(11, game.other_player.hero.health)
def test_TotemicMight(self):
game = generate_game_for([TotemicMight, StonetuskBoar], Shieldbearer, PredictableAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Stonetusk Boar", game.players[0].minions[0].card.name)
# Hero power and Totemic Might should be played
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_max_health())
self.assertEqual("Stoneclaw Totem", game.players[0].minions[1].card.name)
self.assertEqual(4, game.players[0].minions[1].calculate_max_health())
def test_Windfury(self):
game = generate_game_for(Windfury, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertFalse(game.players[1].minions[0].windfury())
# Windfury should be played
game.play_single_turn()
self.assertTrue(game.players[1].minions[0].windfury())
def test_Doomhammer(self):
game = generate_game_for(Doomhammer, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(30, game.players[1].hero.health)
self.assertFalse(game.players[0].hero.windfury())
# Doomhammer should be played
game.play_single_turn()
self.assertTrue(game.players[0].hero.windfury())
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(6, game.players[0].weapon.durability)
self.assertEqual(2, game.players[0].upcoming_overload)
self.assertEqual(26, game.players[1].hero.health)
def test_StormforgedAxe(self):
game = generate_game_for(StormforgedAxe, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(3, game.players[0].weapon.durability)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_Crackle(self):
game = generate_game_for(Crackle, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(25, game.players[1].hero.health)
self.assertEqual(1, game.players[0].upcoming_overload)
def test_SiltfinSpiritwalker(self):
game = generate_game_for([MurlocTidecaller, MurlocTidehunter, SiltfinSpiritwalker, Deathwing],
[MurlocTidecaller, Hellfire, BaneOfDoom], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual(1, len(game.current_player.minions))
# Play Siltfin
game.play_single_turn()
self.assertEqual(4, len(game.current_player.minions))
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(4, len(game.current_player.hand))
self.assertEqual(7, len(game.other_player.hand))
# Hellfire will kill all the murlocs but the siltfin.
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(7, len(game.other_player.hand))
self.assertEqual(0, len(game.current_player.minions))
self.assertEqual(7, len(game.current_player.hand))
def test_WhirlingZapOMatic(self):
game = generate_game_for(WhirlingZapomatic, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Whirling Zap-o-matic", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].windfury())
def test_DunemaulShaman(self):
game = generate_game_for(DunemaulShaman,
[StonetuskBoar, GoldshireFootman, SilverbackPatriarch, MogushanWarden],
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, len(game.other_player.minions))
game.play_single_turn()
# The shaman's forgetful ability triggers once. It hits the warden one time (its intended target)
# and the footman one time (after triggering forgetful)
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual("Mogu'shan Warden", game.other_player.minions[0].card.name)
self.assertEqual("Silverback Patriarch", game.other_player.minions[1].card.name)
self.assertEqual("Stonetusk Boar", game.other_player.minions[2].card.name)
self.assertEqual(30, game.other_player.hero.health)
def test_Powermace(self):
game = generate_game_for([Powermace, SpiderTank, SpiderTank], Wisp, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(27, game.players[1].hero.health)
self.assertEqual(3, game.players[0].weapon.base_attack)
self.assertEqual(1, game.players[0].weapon.durability)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(24, game.players[1].hero.health)
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
def test_Neptulon(self):
game = generate_game_for([TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin, TheCoin,
Neptulon], Wisp, CardTestingAgent, DoNothingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(0, len(game.players[0].hand))
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, len(game.players[0].hand))
for card in game.players[0].hand:
self.assertEqual(MINION_TYPE.MURLOC, card.minion_type)
def test_AncestorsCall(self):
game = generate_game_for([AncestorsCall, StonetuskBoar], [Doomguard, Soulfire],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Stonetusk Boar", game.current_player.minions[0].card.name)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Doomguard", game.other_player.minions[0].card.name)
self.assertEqual(5, len(game.current_player.hand))
self.assertEqual(7, len(game.other_player.hand))
def test_LavaShock(self):
game = generate_game_for([Doomhammer, LightningBolt, LavaShock], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(11):
game.play_single_turn()
# The player should have been able to do everything AND have three mana left over
self.assertEqual(25, game.other_player.hero.health)
self.assertEqual(3, game.current_player.mana)
def test_FireguardDestroyer(self):
game = generate_game_for(FireguardDestroyer, Wisp, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(5, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(6, len(game.players[0].minions))
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(7, len(game.players[0].minions)) # Well, I was trying to get a 7/6 but no luck
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertEqual(6, game.players[0].minions[0].health)
def test_AncestralKnowledge(self):
game = generate_game_for(AncestralKnowledge, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(6, len(game.current_player.hand))
self.assertEqual(2, game.current_player.upcoming_overload)
| 43.097842 | 120 | 0.672988 |
38809ad1ffbe5c2d21602f10d5851fd5d7a6f7a1 | 2,802 | py | Python | test/functional/invalidateblock.py | twairgroup/wondercoin | c075c2d0c1a4927d9f04d5100106e369a85128e5 | [
"MIT"
] | 1 | 2021-04-29T09:04:49.000Z | 2021-04-29T09:04:49.000Z | test/functional/invalidateblock.py | twairgroup/wondercoin | c075c2d0c1a4927d9f04d5100106e369a85128e5 | [
"MIT"
] | 2 | 2021-06-08T21:50:46.000Z | 2021-06-09T14:04:30.000Z | test/functional/invalidateblock.py | twairgroup/wondercoin | c075c2d0c1a4927d9f04d5100106e369a85128e5 | [
"MIT"
] | 1 | 2021-06-09T01:09:47.000Z | 2021-06-09T01:09:47.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the invalidateblock RPC."""
from test_framework.test_framework import WondercoinTestFramework
from test_framework.util import *
class InvalidateTest(WondercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
self.log.info("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
self.log.info("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
self.log.info("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
self.log.info("Make sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
self.log.info("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
self.log.info("..and then mine a block")
self.nodes[2].generate(1)
self.log.info("Verify all nodes are at the right height")
time.sleep(5)
assert_equal(self.nodes[2].getblockcount(), 3)
assert_equal(self.nodes[0].getblockcount(), 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| 43.107692 | 100 | 0.660243 |
c55c646582326e2204c86901926d04509f0c0798 | 13,893 | py | Python | .history/train_20210815162721.py | Arcofcosmos/MyYolov4_Pytorch | 14c445503d0fc69b8a8b64ecdc87256ac4c1fce1 | [
"MIT"
] | null | null | null | .history/train_20210815162721.py | Arcofcosmos/MyYolov4_Pytorch | 14c445503d0fc69b8a8b64ecdc87256ac4c1fce1 | [
"MIT"
] | null | null | null | .history/train_20210815162721.py | Arcofcosmos/MyYolov4_Pytorch | 14c445503d0fc69b8a8b64ecdc87256ac4c1fce1 | [
"MIT"
] | null | null | null | #-------------------------------------#
# 对数据集进行训练
#-------------------------------------#
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from nets.yolo4 import YoloBody
from nets.yolo_training import LossHistory, YOLOLoss, weights_init
from utils.dataloader import YoloDataset, yolo_dataset_collate
#---------------------------------------------------#
# 获得类和先验框
#---------------------------------------------------#
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape([-1,3,2])[::-1,:,:]
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_one_epoch(net,yolo_loss,epoch,epoch_size,epoch_size_val,gen,genval,Epoch,cuda):
if Tensorboard:
global train_tensorboard_step, val_tensorboard_step
total_loss = 0
val_loss = 0
net.train()
print('Start Train')
with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_size:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = torch.from_numpy(images).type(torch.FloatTensor).cuda()
targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
else:
images = torch.from_numpy(images).type(torch.FloatTensor)
targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
#----------------------#
# 清零梯度
#----------------------#
optimizer.zero_grad()
#----------------------#
# 前向传播
#----------------------#
outputs = net(images)
losses = []
num_pos_all = 0
#----------------------#
# 计算损失
#----------------------#
for i in range(3):
loss_item, num_pos = yolo_loss(outputs[i], targets)
losses.append(loss_item)
num_pos_all += num_pos
loss = sum(losses) / num_pos_all
total_loss += loss.item()
#----------------------#
# 反向传播
#----------------------#
loss.backward()
optimizer.step()
if Tensorboard:
# 将loss写入tensorboard,每一步都写
writer.add_scalar('Train_loss', loss, train_tensorboard_step)
train_tensorboard_step += 1
pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1),
'lr' : get_lr(optimizer)})
pbar.update(1)
# 将loss写入tensorboard,下面注释的是每个世代保存一次
# if Tensorboard:
# writer.add_scalar('Train_loss', total_loss/(iteration+1), epoch)
net.eval()
print('Start Validation')
with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(genval):
if iteration >= epoch_size_val:
break
images_val, targets_val = batch[0], batch[1]
with torch.no_grad():
if cuda:
images_val = torch.from_numpy(images_val).type(torch.FloatTensor).cuda()
targets_val = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets_val]
else:
images_val = torch.from_numpy(images_val).type(torch.FloatTensor)
targets_val = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets_val]
optimizer.zero_grad()
outputs = net(images_val)
losses = []
num_pos_all = 0
for i in range(3):
loss_item, num_pos = yolo_loss(outputs[i], targets_val)
losses.append(loss_item)
num_pos_all += num_pos
loss = sum(losses) / num_pos_all
val_loss += loss.item()
# 将loss写入tensorboard, 下面注释的是每一步都写
# if Tensorboard:
# writer.add_scalar('Val_loss', loss, val_tensorboard_step)
# val_tensorboard_step += 1
pbar.set_postfix(**{'total_loss': val_loss / (iteration + 1)})
pbar.update(1)
# 将loss写入tensorboard,每个世代保存一次
if Tensorboard:
writer.add_scalar('Val_loss',val_loss / (epoch_size_val+1), epoch)
loss_history.append_loss(total_loss/(epoch_size+1), val_loss/(epoch_size_val+1))
print('Finish Validation')
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))
print('Saving state, iter:', str(epoch+1))
torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth'%((epoch+1),total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))
#----------------------------------------------------#
# 检测精度mAP和pr曲线计算参考视频
# https://www.bilibili.com/video/BV1zE411u7Vw
#----------------------------------------------------#
if __name__ == "__main__":
#-------------------------------#
# 是否使用Tensorboard
#-------------------------------#
Tensorboard = False
#-------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#-------------------------------#
Cuda = True
#------------------------------------------------------#
# 是否对损失进行归一化,用于改变loss的大小
# 用于决定计算最终loss是除上batch_size还是除上正样本数量
#------------------------------------------------------#
normalize = False
#-------------------------------#
# 输入的shape大小
# 显存比较小可以使用416x416
# 显存比较大可以使用608x608
#-------------------------------#
input_shape = (416,416)
#----------------------------------------------------#
# classes和anchor的路径,非常重要
# 训练前一定要修改classes_path,使其对应自己的数据集
#----------------------------------------------------#
anchors_path = 'datasets/WZRY/yolo_anchors.txt'
classes_path = 'model_data/wzry.txt'
#------------------------------------------------------#
# Yolov4的tricks应用
# mosaic 马赛克数据增强 True or False
# 实际测试时mosaic数据增强并不稳定,所以默认为False
# Cosine_scheduler 余弦退火学习率 True or False
# label_smoothing 标签平滑 0.01以下一般 如0.01、0.005
#------------------------------------------------------#
mosaic = False
Cosine_lr = False
smoooth_label = 0
#----------------------------------------------------#
# 获取classes和anchor
#----------------------------------------------------#
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
num_classes = len(class_names)
#------------------------------------------------------#
# 创建yolo模型
# 训练前一定要修改classes_path和对应的txt文件
#------------------------------------------------------#
model = YoloBody(len(anchors[0]), num_classes)
weights_init(model)
#------------------------------------------------------#
# 权值文件请看README,百度网盘下载
#------------------------------------------------------#
model_path = "trained_model/yolo4_weights.pth"
print('Loading weights into state dict...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_dict = model.state_dict()
pretrained_dict = torch.load(model_path, map_location=device)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print('Finished!')
net = model.train()
if Cuda:
net = torch.nn.DataParallel(model)
cudnn.benchmark = True
net = net.cuda()
yolo_loss = YOLOLoss(np.reshape(anchors,[-1,2]), num_classes, (input_shape[1], input_shape[0]), smoooth_label, Cuda, normalize)
loss_history = LossHistory("logs/")
#----------------------------------------------------#
# 获得图片路径和标签
#----------------------------------------------------#
annotation_path = './datasets/WZRY/train.txt'
#----------------------------------------------------------------------#
# 验证集的划分在train.py代码里面进行
# 2007_test.txt和2007_val.txt里面没有内容是正常的。训练不会使用到。
# 当前划分方式下,验证集和训练集的比例为1:9
#----------------------------------------------------------------------#
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
if Tensorboard:
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir='logs',flush_secs=60)
if Cuda:
graph_inputs = torch.randn(1,3,input_shape[0],input_shape[1]).type(torch.FloatTensor).cuda()
else:
graph_inputs = torch.randn(1,3,input_shape[0],input_shape[1]).type(torch.FloatTensor)
writer.add_graph(model, graph_inputs)
train_tensorboard_step = 1
val_tensorboard_step = 1
#------------------------------------------------------#
# 主干特征提取网络特征通用,冻结训练可以加快训练速度
# 也可以在训练初期防止权值被破坏。
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
#------------------------------------------------------#
if True:
lr = 1e-3
Batch_size = 4
Init_Epoch = 0
Freeze_Epoch = 50
#----------------------------------------------------------------------------#
# 我在实际测试时,发现optimizer的weight_decay起到了反作用,
# 所以去除掉了weight_decay,大家也可以开起来试试,一般是weight_decay=5e-4
#----------------------------------------------------------------------------#
optimizer = optim.Adam(net.parameters(),lr)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.92)
train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)
val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
epoch_size = num_train // Batch_size
epoch_size_val = num_val // Batch_size
if epoch_size == 0 or epoch_size_val == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
#------------------------------------#
# 冻结一定部分训练
#------------------------------------#
for param in model.backbone.parameters():
param.requires_grad = False
for epoch in range(Init_Epoch,Freeze_Epoch):
fit_one_epoch(net,yolo_loss,epoch,epoch_size,epoch_size_val,gen,gen_val,Freeze_Epoch,Cuda)
lr_scheduler.step()
if True:
lr = 1e-4
Batch_size = 2
Freeze_Epoch = 50
Unfreeze_Epoch = 100
#----------------------------------------------------------------------------#
# 我在实际测试时,发现optimizer的weight_decay起到了反作用,
# 所以去除掉了weight_decay,大家也可以开起来试试,一般是weight_decay=5e-4
#----------------------------------------------------------------------------#
optimizer = optim.Adam(net.parameters(),lr)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.92)
train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)
val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
epoch_size = num_train // Batch_size
epoch_size_val = num_val // Batch_size
if epoch_size == 0 or epoch_size_val == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
#------------------------------------#
# 解冻后训练
#------------------------------------#
for param in model.backbone.parameters():
param.requires_grad = True
for epoch in range(Freeze_Epoch,Unfreeze_Epoch):
fit_one_epoch(net,yolo_loss,epoch,epoch_size,epoch_size_val,gen,gen_val,Unfreeze_Epoch,Cuda)
lr_scheduler.step()
| 41.846386 | 148 | 0.511912 |
7b2f4627f425bd83da321827d6e01412d1a5012e | 8,059 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_hub_virtual_network_connections_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_hub_virtual_network_connections_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/_hub_virtual_network_connections_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class HubVirtualNetworkConnectionsOperations(object):
"""HubVirtualNetworkConnectionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-10-01"
self.config = config
def get(
self, resource_group_name, virtual_hub_name, connection_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the details of a HubVirtualNetworkConnection.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: HubVirtualNetworkConnection or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2018_10_01.models.HubVirtualNetworkConnection or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_10_01.models.ErrorException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HubVirtualNetworkConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'}
def list(
self, resource_group_name, virtual_hub_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the details of all HubVirtualNetworkConnections.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of HubVirtualNetworkConnection
:rtype:
~azure.mgmt.network.v2018_10_01.models.HubVirtualNetworkConnectionPaged[~azure.mgmt.network.v2018_10_01.models.HubVirtualNetworkConnection]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_10_01.models.ErrorException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.HubVirtualNetworkConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections'}
| 46.051429 | 199 | 0.668569 |
645e74f21bd0ae0484116fb94f996c8a4e22df5b | 13,058 | py | Python | sdk/identity/azure-identity/tests/test_interactive_credential.py | iamvishnuks/azure-sdk-for-python | 4df435651ab32f57b1e9f33fc65fd46632055704 | [
"MIT"
] | 1 | 2020-08-17T14:40:09.000Z | 2020-08-17T14:40:09.000Z | sdk/identity/azure-identity/tests/test_interactive_credential.py | iamvishnuks/azure-sdk-for-python | 4df435651ab32f57b1e9f33fc65fd46632055704 | [
"MIT"
] | 2 | 2020-07-17T13:57:08.000Z | 2020-07-21T18:30:37.000Z | sdk/identity/azure-identity/tests/test_interactive_credential.py | iamvishnuks/azure-sdk-for-python | 4df435651ab32f57b1e9f33fc65fd46632055704 | [
"MIT"
] | 1 | 2020-09-18T13:20:20.000Z | 2020-09-18T13:20:20.000Z | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.core.exceptions import ClientAuthenticationError
from azure.identity import (
AuthenticationRequiredError,
AuthenticationRecord,
KnownAuthorities,
CredentialUnavailableError,
)
from azure.identity._internal import InteractiveCredential
from msal import TokenCache
import pytest
try:
from unittest.mock import Mock, patch
except ImportError: # python < 3.3
from mock import Mock, patch # type: ignore
from helpers import build_aad_response
class MockCredential(InteractiveCredential):
"""Test class to drive InteractiveCredential.
Default instances have an empty in-memory cache, and raise rather than send an HTTP request.
"""
def __init__(
self, client_id="...", request_token=None, cache=None, msal_app_factory=None, transport=None, **kwargs
):
self._msal_app_factory = msal_app_factory
self._request_token_impl = request_token or Mock()
transport = transport or Mock(send=Mock(side_effect=Exception("credential shouldn't send a request")))
super(MockCredential, self).__init__(
client_id=client_id, _cache=cache or TokenCache(), transport=transport, **kwargs
)
def _request_token(self, *scopes, **kwargs):
return self._request_token_impl(*scopes, **kwargs)
def _get_app(self):
if self._msal_app_factory:
return self._create_app(self._msal_app_factory)
return super(MockCredential, self)._get_app()
def test_no_scopes():
"""The credential should raise when get_token is called with no scopes"""
request_token = Mock(side_effect=Exception("credential shouldn't begin interactive authentication"))
with pytest.raises(ValueError):
MockCredential(request_token=request_token).get_token()
def test_authentication_record_argument():
"""The credential should initialize its msal.ClientApplication with values from a given record"""
record = AuthenticationRecord("tenant-id", "client-id", "localhost", "object.tenant", "username")
def validate_app_parameters(authority, client_id, **_):
# the 'authority' argument to msal.ClientApplication should be a URL of the form https://authority/tenant
assert authority == "https://{}/{}".format(record.authority, record.tenant_id)
assert client_id == record.client_id
return Mock(get_accounts=Mock(return_value=[]))
app_factory = Mock(wraps=validate_app_parameters)
credential = MockCredential(
authentication_record=record, disable_automatic_authentication=True, msal_app_factory=app_factory,
)
with pytest.raises(AuthenticationRequiredError):
credential.get_token("scope")
assert app_factory.call_count == 1, "credential didn't create an msal application"
def test_tenant_argument_overrides_record():
"""The 'tenant_ic' keyword argument should override a given record's value"""
tenant_id = "some-guid"
authority = "localhost"
record = AuthenticationRecord(tenant_id, "client-id", authority, "object.tenant", "username")
expected_tenant = tenant_id[::-1]
expected_authority = "https://{}/{}".format(authority, expected_tenant)
def validate_authority(authority, **_):
assert authority == expected_authority
return Mock(get_accounts=Mock(return_value=[]))
credential = MockCredential(
authentication_record=record,
tenant_id=expected_tenant,
disable_automatic_authentication=True,
msal_app_factory=validate_authority,
)
with pytest.raises(AuthenticationRequiredError):
credential.get_token("scope")
def test_disable_automatic_authentication():
"""When silent auth fails the credential should raise, if it's configured not to authenticate automatically"""
expected_details = "something went wrong"
record = AuthenticationRecord("tenant-id", "client-id", "localhost", "object.tenant", "username")
msal_app = Mock(
acquire_token_silent_with_error=Mock(return_value={"error_description": expected_details}),
get_accounts=Mock(return_value=[{"home_account_id": record.home_account_id}]),
)
credential = MockCredential(
authentication_record=record,
disable_automatic_authentication=True,
msal_app_factory=lambda *_, **__: msal_app,
request_token=Mock(side_effect=Exception("credential shouldn't begin interactive authentication")),
)
scope = "scope"
with pytest.raises(AuthenticationRequiredError) as ex:
credential.get_token(scope)
# the exception should carry the requested scopes and any error message from AAD
assert ex.value.scopes == (scope,)
assert ex.value.error_details == expected_details
def test_scopes_round_trip():
"""authenticate should accept the value of AuthenticationRequiredError.scopes"""
scope = "scope"
def validate_scopes(*scopes, **_):
assert scopes == (scope,)
return {"access_token": "**", "expires_in": 42}
request_token = Mock(wraps=validate_scopes)
credential = MockCredential(disable_automatic_authentication=True, request_token=request_token)
with pytest.raises(AuthenticationRequiredError) as ex:
credential.get_token(scope)
credential.authenticate(scopes=ex.value.scopes)
assert request_token.call_count == 1, "validation method wasn't called"
@pytest.mark.parametrize(
"authority,expected_scope",
(
(KnownAuthorities.AZURE_CHINA, "https://management.core.chinacloudapi.cn//.default"),
(KnownAuthorities.AZURE_GERMANY, "https://management.core.cloudapi.de//.default"),
(KnownAuthorities.AZURE_GOVERNMENT, "https://management.core.usgovcloudapi.net//.default"),
(KnownAuthorities.AZURE_PUBLIC_CLOUD, "https://management.core.windows.net//.default"),
),
)
def test_authenticate_default_scopes(authority, expected_scope):
"""when given no scopes, authenticate should default to the ARM scope appropriate for the configured authority"""
def validate_scopes(*scopes):
assert scopes == (expected_scope,)
return {"access_token": "**", "expires_in": 42}
request_token = Mock(wraps=validate_scopes)
MockCredential(authority=authority, request_token=request_token).authenticate()
assert request_token.call_count == 1
def test_authenticate_unknown_cloud():
"""authenticate should raise when given no scopes in an unknown cloud"""
with pytest.raises(CredentialUnavailableError):
MockCredential(authority="localhost").authenticate()
@pytest.mark.parametrize("option", (True, False))
def test_authenticate_ignores_disable_automatic_authentication(option):
"""authenticate should prompt for authentication regardless of the credential's configuration"""
request_token = Mock(return_value={"access_token": "**", "expires_in": 42})
MockCredential(request_token=request_token, disable_automatic_authentication=option).authenticate()
assert request_token.call_count == 1, "credential didn't begin interactive authentication"
def test_get_token_wraps_exceptions():
"""get_token shouldn't propagate exceptions from MSAL"""
class CustomException(Exception):
pass
expected_message = "something went wrong"
record = AuthenticationRecord("tenant-id", "client-id", "localhost", "object.tenant", "username")
msal_app = Mock(
acquire_token_silent_with_error=Mock(side_effect=CustomException(expected_message)),
get_accounts=Mock(return_value=[{"home_account_id": record.home_account_id}]),
)
credential = MockCredential(msal_app_factory=lambda *_, **__: msal_app, authentication_record=record)
with pytest.raises(ClientAuthenticationError) as ex:
credential.get_token("scope")
assert expected_message in ex.value.message
assert msal_app.acquire_token_silent_with_error.call_count == 1, "credential didn't attempt silent auth"
def test_enable_persistent_cache():
"""the credential should use the persistent cache only when given enable_persistent_cache=True"""
class TestCredential(InteractiveCredential):
def __init__(self, **kwargs):
super(TestCredential, self).__init__(client_id="...", **kwargs)
def _request_token(self, *_, **__):
pass
in_memory_cache = Mock()
persistent_cache = "azure.identity._internal.persistent_cache"
# credential should default to an in memory cache
raise_when_called = Mock(side_effect=Exception("credential shouldn't attempt to load a persistent cache"))
with patch(persistent_cache + "._load_persistent_cache", raise_when_called):
with patch(InteractiveCredential.__module__ + ".msal.TokenCache", lambda: in_memory_cache):
credential = TestCredential()
assert credential._cache is in_memory_cache
# allowing an unencrypted cache doesn't count as opting in to the persistent cache
credential = TestCredential(allow_unencrypted_cache=True)
assert credential._cache is in_memory_cache
# keyword argument opts in to persistent cache
with patch(persistent_cache + ".msal_extensions") as mock_extensions:
TestCredential(enable_persistent_cache=True)
assert mock_extensions.PersistedTokenCache.call_count == 1
# opting in on an unsupported platform raises an exception
with patch(persistent_cache + ".sys.platform", "commodore64"):
with pytest.raises(NotImplementedError):
TestCredential(enable_persistent_cache=True)
with pytest.raises(NotImplementedError):
TestCredential(enable_persistent_cache=True, allow_unencrypted_cache=True)
@patch("azure.identity._internal.persistent_cache.sys.platform", "linux2")
@patch("azure.identity._internal.persistent_cache.msal_extensions")
def test_persistent_cache_linux(mock_extensions):
"""The credential should use an unencrypted cache when encryption is unavailable and the user explicitly opts in.
This test was written when Linux was the only platform on which encryption may not be available.
"""
class TestCredential(InteractiveCredential):
def __init__(self, **kwargs):
super(TestCredential, self).__init__(client_id="...", **kwargs)
def _request_token(self, *_, **__):
pass
# the credential should prefer an encrypted cache even when the user allows an unencrypted one
TestCredential(enable_persistent_cache=True, allow_unencrypted_cache=True)
assert mock_extensions.PersistedTokenCache.called_with(mock_extensions.LibsecretPersistence)
mock_extensions.PersistedTokenCache.reset_mock()
# (when LibsecretPersistence's dependencies aren't available, constructing it raises ImportError)
mock_extensions.LibsecretPersistence = Mock(side_effect=ImportError)
# encryption unavailable, no opt in to unencrypted cache -> credential should raise
with pytest.raises(ValueError):
TestCredential(enable_persistent_cache=True)
TestCredential(enable_persistent_cache=True, allow_unencrypted_cache=True)
assert mock_extensions.PersistedTokenCache.called_with(mock_extensions.FilePersistence)
def test_home_account_id_client_info():
"""when MSAL returns client_info, the credential should decode it to get the home_account_id"""
object_id = "object-id"
home_tenant = "home-tenant-id"
msal_response = build_aad_response(uid=object_id, utid=home_tenant, access_token="***", refresh_token="**")
msal_response["id_token_claims"] = {
"aud": "client-id",
"iss": "https://localhost",
"object_id": object_id,
"tid": home_tenant,
"preferred_username": "me",
"sub": "subject",
}
class TestCredential(InteractiveCredential):
def __init__(self, **kwargs):
super(TestCredential, self).__init__(client_id="...", **kwargs)
def _request_token(self, *_, **__):
return msal_response
record = TestCredential().authenticate()
assert record.home_account_id == "{}.{}".format(object_id, home_tenant)
def test_home_account_id_no_client_info():
"""the credential should use the subject claim as home_account_id when MSAL doesn't provide client_info"""
subject = "subject"
msal_response = build_aad_response(access_token="***", refresh_token="**")
msal_response["id_token_claims"] = {
"aud": "client-id",
"iss": "https://localhost",
"object_id": "some-guid",
"tid": "some-tenant",
"preferred_username": "me",
"sub": subject,
}
class TestCredential(InteractiveCredential):
def __init__(self, **kwargs):
super(TestCredential, self).__init__(client_id="...", **kwargs)
def _request_token(self, *_, **__):
return msal_response
record = TestCredential().authenticate()
assert record.home_account_id == subject
| 40.552795 | 117 | 0.721933 |
d956b2bb4ff288eb48d6c4c2c7f9697d0807d90d | 8,419 | py | Python | cogkit/modules/provider-localscheduler/examples/gce-cloud-provider/cloud.py | ketancmaheshwari/swift-k | ec4f2acbf122536b1b09f77251cb0d00b508251c | [
"Apache-2.0"
] | null | null | null | cogkit/modules/provider-localscheduler/examples/gce-cloud-provider/cloud.py | ketancmaheshwari/swift-k | ec4f2acbf122536b1b09f77251cb0d00b508251c | [
"Apache-2.0"
] | null | null | null | cogkit/modules/provider-localscheduler/examples/gce-cloud-provider/cloud.py | ketancmaheshwari/swift-k | ec4f2acbf122536b1b09f77251cb0d00b508251c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from random import randrange
import logging
import pprint
import argparse
import datetime
import time
import json
#from __future__ import print_function
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.base import NodeSize, NodeImage
from libcloud.compute.types import NodeState
import libcloud.compute.types
SWIFT_NETWORK="swift-network"
SWIFT_FIREWALL="swift-firewall"
NODESTATES = { NodeState.RUNNING : "RUNNING",
NodeState.REBOOTING : "REBOOTING",
NodeState.TERMINATED : "TERMINATED",
NodeState.STOPPED : "STOPPED",
NodeState.PENDING : "PENDING",
NodeState.UNKNOWN : "UNKNOWN" }
WORKER_USERDATA='''#!/bin/bash
export JAVA=/usr/local/bin/jdk1.7.0_51/bin
export SWIFT=/usr/local/bin/swift-trunk/bin
export PATH=$JAVA:$SWIFT:$PATH
export WORKER_LOGGING_LEVEL=TRACE
'''
def gce_create_network(driver, configs):
#current = driver.ex_list_security_groups()
networks = driver.ex_list_networks()
swift_net = [ net for net in networks if net.name == SWIFT_NETWORK ]
# Create SWIFT_NETWORK if not present
if not swift_net:
swift_net = driver.ex_create_network(SWIFT_NETWORK, "10.240.0.0/16")
# Create a new firewall if one isn't present
rules = [ {"IPProtocol": "tcp",
"ports": ["30000-60000"]},
{"IPProtocol": "tcp",
"ports": ["20-85"]},
{"IPProtocol": "udp",
"ports": ["30000-60000"]} ]
firewalls = [ fw for fw in driver.ex_list_firewalls() if fw.network.name == SWIFT_NETWORK ]
if not firewalls:
driver.ex_create_firewall(SWIFT_FIREWALL, rules, network=SWIFT_NETWORK, source_ranges=['0.0.0.0/0'])
return
# Check if the source is a gs://*image.tar.gz
#
def gce_check_image(driver, configs):
source = configs['gceworkerimage']
target = ""
if source.startswith('gs://') and source.endswith('.image.tar.gz'):
img_id = source.rstrip('.image.tar.gz')[-5:]
target = "swift-worker-" + img_id
else:
target = source
images = driver.list_images()
matches= [ img for img in images if img.name == target ]
# Copy image if there were no matches
if not matches :
#print "Copying image from source to target"
driver.ex_copy_image(target, source,
description="Swift worker image from"+source+" Timestamp: " + datetime.datetime.fromtimestamp(time.time()).strftime('%H_%M_%S'))
configs['gceimageid'] = target
return
def check_keypair(driver, configs):
if "gcekeypairname" in configs and "gcekeypairfile" in configs:
all_pairs = driver.list_key_pairs()
for pair in all_pairs:
if pair.name == configs['gcekeypairname']:
return 0
key_pair = driver.create_key_pair(name=configs['gcekeypairname'])
f = open(configs['gcekeypairfile'], 'w')
f.write(str(key_pair.private_key))
#f.close()
os.chmod(configs['gcekeypairfile'], 0600)
else:
sys.stderr.write("gcekeypairname and/or gcekeypairfile missing\n")
sys.stderr.write("Cannot proceed without gcekeypairname and gcekeypairfile\n")
exit(-1)
def node_status(driver, node_uuids):
nodes = driver.list_nodes()
for node in nodes:
if node.uuid in node_uuids :
if node.state == NodeState.RUNNING:
print node.uuid, "R"
elif node.state == NodeState.PENDING:
print node.uuid, "Q"
elif node.state == NodeState.TERMINATED:
print node.uuid, "C"
elif node.state == NodeState.STOPPED:
print node.uuid, "C"
elif node.state == NodeState.UNKNOWN:
print node.uuid, "Q" # This state could be wrong
else:
sys.stderr.write("Node state unknown/invalid " + str(NODESTATE[node.state]))
return -1
return 0
def node_start(driver, configs, WORKER_STRING):
userdata = WORKER_USERDATA + WORKER_STRING.lstrip('"').rstrip('"')
nodename = "swift-worker-" + datetime.datetime.fromtimestamp(time.time()).strftime('%H-%M-%S') + "-" + str(randrange(10000))
start_up = "/tmp/" + nodename
#print "Userdata : ", userdata
f = open(start_up, 'w')
f.write(userdata)
f.close()
#size = NodeSize(id=configs['gceworkertype'], name="swift_worker",
# ram=None, disk=None, bandwidth=None, price=None, driver=driver)
#image = NodeImage(id=configs['gceworkerimage'], name=None, driver=driver)
#print "Starting image : ", configs['gceimageid'], " with nodename : " ,nodename
'''
node = driver.deploy_node(nodename, # name
configs['gceworkertype'], # size str or GCENodeSize
configs['gceimageid'], # image str or GCENodeImage
start_up, # This must be a filename
location=configs['gcezone'], # GCEZone for execution
ex_network="default")
'''
node = driver.create_node(nodename,
configs['gceworkertype'],
configs['gceimageid'],
location=configs['gcezone'],
ex_network=SWIFT_NETWORK, #ex_network="default",
external_ip='ephemeral',
ex_metadata={'startup-script' : userdata })
print 'jobid={0}'.format(node.uuid)
# node_names is a list
def node_terminate(driver, node_uuids):
nodes = driver.list_nodes()
deleted_flag = False
for node in nodes:
if node.uuid in node_uuids and node.state == NodeState.RUNNING :
code = driver.destroy_node(node)
deleted_flag = True
return deleted_flag
def _read_conf(config_file):
cfile = open(config_file, 'r').read()
config = {}
for line in cfile.split('\n'):
# Checking if empty line or comment
if line.startswith('#') or not line :
continue
temp = line.split('=')
config[temp[0]] = temp[1].strip('\r')
return config
def init_checks(driver, configs):
gce_create_network(driver, configs)
gce_check_image(driver, configs)
def init(conf_file):
configs = _read_conf(conf_file)
driver = get_driver(Provider.GCE)
gce_driver = driver(configs['gceemailaccount'],
configs['gcekeypairfile'],
project=configs['gceprojectid'],
datacenter=configs['gcezone'])
return configs,gce_driver
# Main driver section
#configs, driver = init()
#args = sys.argv[1:]
#print "All args : ",str(args)
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
mu_group = parser.add_mutually_exclusive_group(required=True)
mu_group.add_argument("-s", "--submit", default=None , help='Takes a config file. Submits the CMD_STRING in the configs for execution on a cloud resource')
mu_group.add_argument("-t", "--status", default=None , help='gets the status of the CMD_STRING in the configs for execution on a cloud resource')
mu_group.add_argument("-c", "--cancel", default=None , help='cancels the jobs with jobids')
parser.add_argument("-v", "--verbose", help="set level of verbosity, DEBUG, INFO, WARN")
parser.add_argument("-j", "--jobid", type=str, action='append')
args = parser.parse_args()
config_file = ( args.status or args.submit or args.cancel )
configs, driver = init(config_file)
if args.submit :
# Init checks confirm keypairs and security groups to allow for access to ports
init_checks(driver, configs)
node_start(driver, configs, configs['CMD_STRING'])
elif args.status :
node_status(driver, args.jobid )
elif args.cancel :
node_terminate(driver, args.jobid)
else:
sys.stderr.write("ERROR: Undefined args, cannot be handled")
sys.stderr.write("ERROR: Exiting...")
exit(-1)
exit(0)
| 37.417778 | 160 | 0.605891 |
34b1d48452bf8fdeb2c093056210690176c1e8fc | 317 | py | Python | selia/urls/create_views/sites.py | IslasGECI/selia | 9863c32cd45db13053a1d2add67f5bdc1871b791 | [
"BSD-4-Clause"
] | null | null | null | selia/urls/create_views/sites.py | IslasGECI/selia | 9863c32cd45db13053a1d2add67f5bdc1871b791 | [
"BSD-4-Clause"
] | 13 | 2020-01-07T21:53:50.000Z | 2022-01-13T01:53:50.000Z | selia/urls/create_views/sites.py | IslasGECI/selia | 9863c32cd45db13053a1d2add67f5bdc1871b791 | [
"BSD-4-Clause"
] | 1 | 2021-05-06T19:38:09.000Z | 2021-05-06T19:38:09.000Z | from django.urls import path
from selia.views.create_views import sites
urlpatterns = [
path(
'sites/create/',
sites.CreateSiteManager.as_view(),
name='create_site'),
path(
'sites/create/1/',
sites.CreateSiteView.as_view(),
name='create_site_create_form'),
]
| 21.133333 | 42 | 0.630915 |
24af67b51354487426d08b210e4729793828c661 | 4,828 | py | Python | src/dxtbx/format/FormatSMVJHSim.py | cctbx/dxtbx | f7bd1201231f0fe94568db5281127d2cb944063a | [
"BSD-3-Clause"
] | 1 | 2020-01-27T22:34:57.000Z | 2020-01-27T22:34:57.000Z | src/dxtbx/format/FormatSMVJHSim.py | cctbx/dxtbx | f7bd1201231f0fe94568db5281127d2cb944063a | [
"BSD-3-Clause"
] | 448 | 2019-04-06T01:20:56.000Z | 2022-03-31T15:58:48.000Z | src/dxtbx/format/FormatSMVJHSim.py | cctbx/dxtbx | f7bd1201231f0fe94568db5281127d2cb944063a | [
"BSD-3-Clause"
] | 10 | 2019-04-08T13:30:32.000Z | 2021-09-30T14:48:50.000Z | """An implementation of the SMV image reader for JHSim images."""
import calendar
import sys
import time
from iotbx.detectors import SMVImage
from dxtbx.format.FormatSMV import FormatSMV
class FormatSMVJHSim(FormatSMV):
"""A class for reading SMV format JHSim images, and correctly constructing
a model for the experiment from this."""
# all ADSC detectors generate images with an ADC offset of 40
# for Mar/Rayonix it is 10
# Rigaku SMV uses 20, and 5 for image plate formats
# for one particular simulation, I used 1
ADC_OFFSET = 1
image_pedestal = 1
@staticmethod
def understand(image_file):
"""Check to see if this looks like an JHSim SMV format image, i.e. we can
make sense of it. From JH: "The best way to identify images from any of my
simulators is to look for BEAMLINE=fake in the header."."""
size, header = FormatSMV.get_smv_header(image_file)
if header.get("BEAMLINE") == "fake":
return True
else:
return False
def detectorbase_start(self):
if not hasattr(self, "detectorbase") or self.detectorbase is None:
self.detectorbase = SMVImage(self._image_file)
self.detectorbase.open_file = self.open_file
self.detectorbase.readHeader()
def _goniometer(self):
"""Return a model for a simple single-axis goniometer. This should
probably be checked against the image header."""
return self._goniometer_factory.single_axis()
def _detector(self):
"""Return a model for a simple detector, presuming no one has
one of these on a two-theta stage. Assert that the beam centre is
provided in the Mosflm coordinate frame."""
distance = float(self._header_dictionary["DISTANCE"])
beam_x = float(self._header_dictionary["BEAM_CENTER_X"])
beam_y = float(self._header_dictionary["BEAM_CENTER_Y"])
pixel_size = float(self._header_dictionary["PIXEL_SIZE"])
image_size = (
float(self._header_dictionary["SIZE1"]),
float(self._header_dictionary["SIZE2"]),
)
image_pedestal = 1
try:
image_pedestal = float(self._header_dictionary["ADC_OFFSET"])
except (KeyError):
pass
overload = 65535 - image_pedestal
underload = 1 - image_pedestal
# interpret beam center conventions
image_height_mm = pixel_size * image_size[1]
adxv_beam_center = (beam_x, beam_y)
cctbx_beam_center = (
adxv_beam_center[0] + pixel_size,
image_height_mm - adxv_beam_center[1] + pixel_size,
)
# Guess whether this is mimicking a Pilatus, if so set detector type so
# that spot-finding parameters are appropriate
if pixel_size == 0.172:
stype = "SENSOR_PAD"
else:
stype = "CCD"
return self._detector_factory.simple(
stype,
distance,
cctbx_beam_center,
"+x",
"-y",
(pixel_size, pixel_size),
image_size,
(underload, overload),
[],
pedestal=int(self._header_dictionary.get("ADC_OFFSET", 1)),
)
def _beam(self):
"""Return a simple model for the beam."""
wavelength = float(self._header_dictionary["WAVELENGTH"])
return self._beam_factory.simple(wavelength)
def _scan(self):
"""Return the scan information for this image."""
exposure_time = 1
epoch = None
# PST, PDT timezones not recognised by default...
epoch = 0
try:
date_str = self._header_dictionary["DATE"]
date_str = date_str.replace("PST", "").replace("PDT", "")
except KeyError:
date_str = ""
for format_string in ["%a %b %d %H:%M:%S %Y", "%a %b %d %H:%M:%S %Z %Y"]:
try:
epoch = calendar.timegm(time.strptime(date_str, format_string))
break
except ValueError:
pass
# assert(epoch)
osc_start = float(self._header_dictionary["OSC_START"])
osc_range = float(self._header_dictionary["OSC_RANGE"])
return self._scan_factory.single_file(
self._image_file, exposure_time, osc_start, osc_range, epoch
)
def get_raw_data(self):
"""Get the pixel intensities (i.e. read the image and return as a
flex array of integers.)"""
assert len(self.get_detector()) == 1
panel = self.get_detector()[0]
image_size = panel.get_image_size()
return self._get_endianic_raw_data(size=image_size)
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(FormatSMVJHSim.understand(arg))
| 33.068493 | 82 | 0.615369 |
0c0bd36b1c84ebcf650c8523ce49f5e3756fc24f | 1,804 | py | Python | simplemfl/urls.py | METS-Programme/simplemfl | df2e49922b9b5a1bdbec726c5e2a0c2820ecf71c | [
"MIT"
] | 1 | 2020-05-11T21:01:02.000Z | 2020-05-11T21:01:02.000Z | simplemfl/urls.py | hargi12/simplemfl | df2e49922b9b5a1bdbec726c5e2a0c2820ecf71c | [
"MIT"
] | null | null | null | simplemfl/urls.py | hargi12/simplemfl | df2e49922b9b5a1bdbec726c5e2a0c2820ecf71c | [
"MIT"
] | 1 | 2020-05-11T21:00:53.000Z | 2020-05-11T21:00:53.000Z | """simplemfl URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from facilities.views import OrgUnitViewSet, FacilityViewSet, AdminUnitViewSet, HospitalViewSet
import facilities.urls
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'facilities', FacilityViewSet, base_name='facilities')
router.register(r'adminunits', AdminUnitViewSet, base_name='adminunits')
router.register(r'orgunits', OrgUnitViewSet)
router.register(r'hospitals', HospitalViewSet, base_name='hospitals')
# router.register(r'geojson', GeoJSONOrgUnitViewSet, base_name='geojson')
urlpatterns = [
url(r'^$', facilities.views.index),
url(r'', include(facilities.urls)),
# Django Admin
url(r'^admin/', admin.site.urls),
# Django Rest Framework
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls')),
]
# Django debug toolbar support
from django.conf import settings
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns | 36.08 | 95 | 0.734479 |
bc9d3faccbf8e5f70369dc1a6087ecbd8da431ad | 416 | py | Python | barbers_accounts/migrations/0005_auto_20210921_2159.py | starsouf/Python-Django-web-app | 0af1a4f97a7b7583858bd3e487d8a1b502b4daa7 | [
"Unlicense"
] | null | null | null | barbers_accounts/migrations/0005_auto_20210921_2159.py | starsouf/Python-Django-web-app | 0af1a4f97a7b7583858bd3e487d8a1b502b4daa7 | [
"Unlicense"
] | null | null | null | barbers_accounts/migrations/0005_auto_20210921_2159.py | starsouf/Python-Django-web-app | 0af1a4f97a7b7583858bd3e487d8a1b502b4daa7 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.1.2 on 2021-09-22 01:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('barbers_accounts', '0004_auto_20210921_1936'),
]
operations = [
migrations.AlterField(
model_name='website_salon_details',
name='website_logo',
field=models.ImageField(upload_to=''),
),
]
| 21.894737 | 56 | 0.622596 |
4b262cc43127b630236ef3e89512c7f8a8690e1b | 1,398 | py | Python | utils/git_fetch.py | FerdiKirsten/coronavirus_structural_task_force | 821c58846550ef6583366ca9adcba63c3aafd4a5 | [
"MIT"
] | null | null | null | utils/git_fetch.py | FerdiKirsten/coronavirus_structural_task_force | 821c58846550ef6583366ca9adcba63c3aafd4a5 | [
"MIT"
] | null | null | null | utils/git_fetch.py | FerdiKirsten/coronavirus_structural_task_force | 821c58846550ef6583366ca9adcba63c3aafd4a5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 17:43:06 2020
@author: yunyun
"""
import os
import requests
from subprocess import call
import argparse
import pickle
_url_root = 'https://github.com/thorn-lab/coronavirus_structural_task_force/raw/master/'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-A', '--accept', help="comma-separated list of accepted key words", required='True')
parser.add_argument('-P', '--dir_prefix', help="save file to prefix")
args = parser.parse_args()
return args
def git_fetch(relpath, args):
accept = args.accept.split(',')
prefix = args.dir_prefix
for file in relpath:
if all(_ in file for _ in accept):
call(['wget', '-x', '-nH','--no-check-certificate',
'--content-disposition', '-q',
'-P', prefix,
_url_root + file])
print(_url_root + file)
def get_path():
remote_relpath = requests.get(_url_root + 'utils/relpath.pkl')
with open("relpath.tmp", "wb") as f:
f.write(remote_relpath.content)
with open('relpath.tmp', 'rb') as fp:
relpath_list = pickle.load(fp)
os.remove('relpath.tmp')
return relpath_list
if __name__ == '__main__':
args = parse_args()
relpath = get_path()
git_fetch(relpath, args)
| 26.884615 | 109 | 0.613019 |
abc8098704d32d7493494b9342ff6095096a0423 | 4,588 | py | Python | spyder/app/cli_options.py | Earthman100/spyder | 949ce0f9100a69504c70a5678e8589a05aee7d38 | [
"MIT"
] | 7,956 | 2015-02-17T01:19:09.000Z | 2022-03-31T21:52:15.000Z | spyder/app/cli_options.py | Earthman100/spyder | 949ce0f9100a69504c70a5678e8589a05aee7d38 | [
"MIT"
] | 16,326 | 2015-02-16T23:15:21.000Z | 2022-03-31T23:34:34.000Z | spyder/app/cli_options.py | Earthman100/spyder | 949ce0f9100a69504c70a5678e8589a05aee7d38 | [
"MIT"
] | 1,918 | 2015-02-20T19:26:26.000Z | 2022-03-31T19:03:25.000Z | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
import argparse
def get_options(argv=None):
"""
Convert options into commands.
Return commands, message
"""
parser = argparse.ArgumentParser(usage="spyder [options] files")
parser.add_argument(
'--new-instance',
action='store_true',
default=False,
help="Run a new instance of Spyder, even if the single "
"instance mode has been turned on (default)"
)
parser.add_argument(
'--defaults',
dest="reset_to_defaults",
action='store_true',
default=False,
help="Reset configuration settings to defaults"
)
parser.add_argument(
'--reset',
dest="reset_config_files",
action='store_true',
default=False,
help="Remove all configuration files!"
)
parser.add_argument(
'--optimize',
action='store_true',
default=False,
help="Optimize Spyder bytecode (this may require "
"administrative privileges)"
)
parser.add_argument(
'-w', '--workdir',
dest="working_directory",
default=None,
help="Default working directory"
)
parser.add_argument(
'--hide-console',
action='store_true',
default=False,
help="Hide parent console window (Windows)"
)
parser.add_argument(
'--show-console',
action='store_true',
default=False,
help="(Deprecated) Does nothing, now the default behavior "
"is to show the console"
)
parser.add_argument(
'--multithread',
dest="multithreaded",
action='store_true',
default=False,
help="Internal console is executed in another thread "
"(separate from main application thread)"
)
parser.add_argument(
'--profile',
action='store_true',
default=False,
help="Profile mode (internal test, not related "
"with Python profiling)"
)
parser.add_argument(
'--window-title',
type=str,
default=None,
help="String to show in the main window title"
)
parser.add_argument(
'-p', '--project',
default=None,
type=str,
dest="project",
help="Path that contains an Spyder project"
)
parser.add_argument(
'--opengl',
default=None,
dest="opengl_implementation",
choices=['software', 'desktop', 'gles'],
help="OpenGL implementation to pass to Qt"
)
parser.add_argument(
'--paths',
action='store_true',
default=False,
help="Show all Spyder configuration paths"
)
parser.add_argument(
'--debug-info',
default=None,
dest="debug_info",
choices=['minimal', 'verbose'],
help=("Level of internal debugging info to give. "
"'minimal' only logs a small amount of "
"confirmation messages and 'verbose' logs a "
"lot of detailed information.")
)
parser.add_argument(
'--debug-output',
default='terminal',
dest="debug_output",
choices=['terminal', 'file'],
help=("Print internal debugging info to the terminal and a file in "
"the configuration directory or to the terminal and a file "
"called spyder-debug.log in the current working directory. "
"Default is 'terminal'.")
)
parser.add_argument(
'--filter-log',
default='',
help="Comma-separated module name hierarchies whose log "
"messages should be shown. e.g., "
"spyder.plugins.completion,spyder.plugins.editor"
)
parser.add_argument(
'--safe-mode',
dest="safe_mode",
action='store_true',
default=False,
help="Start Spyder with a clean configuration directory"
)
parser.add_argument(
'--report-segfault',
dest="report_segfault",
action='store_true',
default=False,
help="Report segmentation fault to Github."
)
parser.add_argument(
'--conf-dir',
type=str,
dest="conf_dir",
default=None,
help="Choose a configuration directory to use for Spyder."
)
parser.add_argument('files', nargs='*')
options = parser.parse_args(argv)
args = options.files
return options, args
| 29.22293 | 76 | 0.579119 |
7ee8853f95d6e7cc22a7c907e8ba2d9dc37918b1 | 1,537 | py | Python | 07p/python/py_src/runDemo.py | st970703/AUTO07P-Update-to-Python-3.0 | fb2d2aebf2127fa914064d01ed62c0acb5f6421c | [
"Apache-2.0"
] | null | null | null | 07p/python/py_src/runDemo.py | st970703/AUTO07P-Update-to-Python-3.0 | fb2d2aebf2127fa914064d01ed62c0acb5f6421c | [
"Apache-2.0"
] | null | null | null | 07p/python/py_src/runDemo.py | st970703/AUTO07P-Update-to-Python-3.0 | fb2d2aebf2127fa914064d01ed62c0acb5f6421c | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
from python.py_src import AUTOExceptions, runAUTO
def runDemo(demo,**kw):
runner = runAUTO.runAUTO(**kw)
runner.runDemo(demo)
runner.config(log=None, err=None)
def test():
import os
import sys
from . import AUTOutil
log=open("log","w")
err=open("err","w")
stdout=sys.stdout
class teelog(object):
def write(self,text):
log.write(text)
stdout.write(text)
def flush(self):
log.flush()
stdout.flush()
runDemo("ab",log=teelog(),err=err,makefile="",
demos_dir=os.path.join(os.environ["AUTO_DIR"],"python"),
clean="yes")
log.close()
err.close()
diffopts = ["diff","-b","--ignore-matching-lines='.*Total Time.*'",
"--ignore-matching-lines='.*ab\.o.*'",
"--ignore-matching-lines=' [0-9] .*'"]
status,output= AUTOutil.getstatusoutput(
diffopts+["log","test_data/runDemo.log"])
if status != 0:
raise AUTOExceptions.AUTORegressionError("Log files differ")
status,output= AUTOutil.getstatusoutput(
diffopts+["err","test_data/runDemo.err"])
if status != 0:
raise AUTOExceptions.AUTORegressionError("Error files differ")
os.remove("log")
os.remove("err")
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
test()
if len(sys.argv) == 2:
runDemo(sys.argv[1])
if len(sys.argv) == 3:
runDemo(sys.argv[1],part=sys.argv[2])
| 26.964912 | 71 | 0.571893 |
5f2cc6efd0e3337f3039040ffbdd3e70ce9cc484 | 281 | py | Python | src/south/management/commands/testserver.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2022-02-09T22:56:49.000Z | 2022-02-09T22:56:49.000Z | src/south/management/commands/testserver.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 6 | 2018-10-19T10:04:23.000Z | 2019-12-09T20:29:12.000Z | src/south/management/commands/testserver.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 2 | 2021-01-26T09:53:39.000Z | 2022-03-22T09:01:47.000Z | from django.core.management.commands import testserver
from south.management.commands import patch_for_test_db_setup
class Command(testserver.Command):
def handle(self, *args, **kwargs):
patch_for_test_db_setup()
super(Command, self).handle(*args, **kwargs)
| 28.1 | 61 | 0.75089 |
c84836176dd682ef8a0798d72e47e09edccf4f8d | 1,319 | py | Python | ckanext/audioview/plugin.py | hackhit/ckan | 53b9442509b46525d653f2f705e98319752ceb2d | [
"BSD-3-Clause"
] | 6 | 2015-11-09T00:44:51.000Z | 2019-11-21T14:56:01.000Z | ckanext/audioview/plugin.py | hackhit/ckan | 53b9442509b46525d653f2f705e98319752ceb2d | [
"BSD-3-Clause"
] | 39 | 2015-02-18T17:32:23.000Z | 2022-03-11T18:03:36.000Z | ckanext/audioview/plugin.py | hackhit/ckan | 53b9442509b46525d653f2f705e98319752ceb2d | [
"BSD-3-Clause"
] | 17 | 2015-03-13T18:05:05.000Z | 2020-11-06T13:55:32.000Z | # encoding: utf-8
from six import text_type
import ckan.plugins as p
ignore_empty = p.toolkit.get_validator('ignore_empty')
unicode_safe = p.toolkit.get_validator('unicode_safe')
DEFAULT_AUDIO_FORMATS = 'wav ogg mp3'
class AudioView(p.SingletonPlugin):
'''This plugin makes views of audio resources, using an <audio> tag'''
p.implements(p.IConfigurer, inherit=True)
p.implements(p.IResourceView, inherit=True)
def update_config(self, config):
p.toolkit.add_template_directory(config, 'theme/templates')
self.formats = config.get(
'ckan.preview.audio_formats',
DEFAULT_AUDIO_FORMATS).split()
def info(self):
return {'name': 'audio_view',
'title': p.toolkit._('Audio'),
'icon': 'file-audio-o',
'schema': {'audio_url': [ignore_empty, unicode_safe]},
'iframed': False,
'always_available': True,
'default_title': p.toolkit._('Audio'),
}
def can_view(self, data_dict):
return (data_dict['resource'].get('format', '').lower()
in self.formats)
def view_template(self, context, data_dict):
return 'audio_view.html'
def form_template(self, context, data_dict):
return 'audio_form.html'
| 30.674419 | 74 | 0.620925 |
adb2a7febbad4fab6515984bd22690c2638ee36f | 2,258 | py | Python | checkerista/.env/Lib/site-packages/django/db/migrations/operations/utils.py | LybaFatimaNasir/CS311S20PID02 | bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39 | [
"MIT"
] | 15 | 2020-06-04T05:22:47.000Z | 2021-07-06T01:37:57.000Z | checkerista/.env/Lib/site-packages/django/db/migrations/operations/utils.py | LybaFatimaNasir/CS311S20PID02 | bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39 | [
"MIT"
] | 51 | 2019-10-08T01:53:02.000Z | 2021-06-04T22:02:21.000Z | checkerista/.env/Lib/site-packages/django/db/migrations/operations/utils.py | LybaFatimaNasir/CS311S20PID02 | bc29a8c4c9ee508c74d231c015a57b1ca4dfcb39 | [
"MIT"
] | 11 | 2019-09-14T20:57:30.000Z | 2022-01-19T17:59:26.000Z | from collections import namedtuple
from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT
def is_referenced_by_foreign_key(state, model_name_lower, field, field_name):
for state_app_label, state_model in state.models:
for _, f in state.models[state_app_label, state_model].fields:
if (f.related_model and
'%s.%s' % (state_app_label, model_name_lower) == f.related_model.lower() and
hasattr(f, 'to_fields')):
if (f.to_fields[0] is None and field.primary_key) or field_name in f.to_fields:
return True
return False
class ModelTuple(namedtuple('ModelTupleBase', ('app_label', 'model_name'))):
@classmethod
def from_model(cls, model, app_label=None, model_name=None):
"""
Take a model class or an 'app_label.ModelName' string and return a
ModelTuple('app_label', 'modelname'). The optional app_label and
model_name arguments are the defaults if "self" or "ModelName" are
passed.
"""
if isinstance(model, str):
if model == RECURSIVE_RELATIONSHIP_CONSTANT:
return cls(app_label, model_name)
if '.' in model:
return cls(*model.lower().split('.', 1))
return cls(app_label, model.lower())
return cls(model._meta.app_label, model._meta.model_name)
def __eq__(self, other):
if isinstance(other, ModelTuple):
# Consider ModelTuple equal if their model_name is equal and either
# one of them is missing an app_label.
return self.model_name == other.model_name and (
self.app_label is None or other.app_label is None or self.app_label == other.app_label
)
return super().__eq__(other)
def field_references_model(field, model_tuple):
"""Return whether or not field references model_tuple."""
remote_field = field.remote_field
if remote_field:
if ModelTuple.from_model(remote_field.model) == model_tuple:
return True
through = getattr(remote_field, 'through', None)
if through and ModelTuple.from_model(through) == model_tuple:
return True
return False
| 41.814815 | 102 | 0.648361 |
afc742e80b50b5eb6c91fce194df7ba6dbe91420 | 9,452 | py | Python | py/VAE.py | JuliaTagBot/Faceless.jl | db6e20659a2ba589468adf36b67cf9e7f4325bfe | [
"MIT"
] | 2 | 2015-11-29T06:25:24.000Z | 2019-07-19T17:19:32.000Z | py/VAE.py | JuliaTagBot/Faceless.jl | db6e20659a2ba589468adf36b67cf9e7f4325bfe | [
"MIT"
] | null | null | null | py/VAE.py | JuliaTagBot/Faceless.jl | db6e20659a2ba589468adf36b67cf9e7f4325bfe | [
"MIT"
] | 2 | 2016-03-27T19:08:07.000Z | 2020-02-08T11:29:35.000Z |
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
np.random.seed(0)
tf.set_random_seed(0)
# Load MNIST data in a format suited for tensorflow.
# The script input_data is available under this URL:
# https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/g3doc/tutorials/mnist/input_data.py
import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
n_samples = mnist.train.num_examples
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
self.batch_size = batch_size
# tf Graph input
self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])
# Create autoencoder network
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
self._create_loss_optimizer()
# Initializing the tensor flow variables
init = tf.initialize_all_variables()
# Launch the session
self.sess = tf.InteractiveSession()
self.sess.run(init)
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
self.z_mean, self.z_log_sigma_sq = \
self._recognition_network(network_weights["weights_recog"],
network_weights["biases_recog"])
# Draw one sample z from Gaussian distribution
n_z = self.network_architecture["n_z"]
eps = tf.random_normal((self.batch_size, n_z), 0, 1,
dtype=tf.float32)
# z = mu + sigma*epsilon
self.z = tf.add(self.z_mean,
tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
# Use generator to determine mean of
# Bernoulli distribution of reconstructed input
self.x_reconstr_mean = \
self._generator_network(network_weights["weights_gener"],
network_weights["biases_gener"])
def _initialize_weights(self, n_hidden_recog_1, n_hidden_recog_2,
n_hidden_gener_1, n_hidden_gener_2,
n_input, n_z):
all_weights = dict()
all_weights['weights_recog'] = {
'h1': tf.Variable(xavier_init(n_input, n_hidden_recog_1)),
'h2': tf.Variable(xavier_init(n_hidden_recog_1, n_hidden_recog_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_recog_2, n_z)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_recog_2, n_z))}
all_weights['biases_recog'] = {
'b1': tf.Variable(tf.zeros([n_hidden_recog_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_recog_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32))}
all_weights['weights_gener'] = {
'h1': tf.Variable(xavier_init(n_z, n_hidden_gener_1)),
'h2': tf.Variable(xavier_init(n_hidden_gener_1, n_hidden_gener_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_gener_2, n_input)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_gener_2, n_input))}
all_weights['biases_gener'] = {
'b1': tf.Variable(tf.zeros([n_hidden_gener_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_gener_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_input], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_input], dtype=tf.float32))}
return all_weights
def _recognition_network(self, weights, biases):
# Generate probabilistic encoder (recognition network), which
# maps inputs onto a normal distribution in latent space.
# The transformation is parametrized and can be learned.
layer_1 = self.transfer_fct(tf.add(tf.matmul(self.x, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
z_mean = tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean'])
z_log_sigma_sq = \
tf.add(tf.matmul(layer_2, weights['out_log_sigma']),
biases['out_log_sigma'])
return (z_mean, z_log_sigma_sq)
def _generator_network(self, weights, biases):
# Generate probabilistic decoder (decoder network), which
# maps points in latent space onto a Bernoulli distribution in data space.
# The transformation is parametrized and can be learned.
layer_1 = self.transfer_fct(tf.add(tf.matmul(self.z, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
x_reconstr_mean = \
tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean']))
return x_reconstr_mean
def _create_loss_optimizer(self):
# The loss is composed of two terms:
# 1.) The reconstruction loss (the negative log probability
# of the input under the reconstructed Bernoulli distribution
# induced by the decoder in the data space).
# This can be interpreted as the number of "nats" required
# for reconstructing the input when the activation in latent
# is given.
# Adding 1e-10 to avoid evaluatio of log(0.0)
reconstr_loss = \
-tf.reduce_sum(self.x * tf.log(1e-10 + self.x_reconstr_mean)
+ (1-self.x) * tf.log(1e-10 + 1 - self.x_reconstr_mean),
1)
# 2.) The latent loss, which is defined as the Kullback Leibler divergence
## between the distribution in latent space induced by the encoder on
# the data and some prior. This acts as a kind of regularizer.
# This can be interpreted as the number of "nats" required
# for transmitting the the latent space distribution given
# the prior.
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch
# Use ADAM optimizer
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
def partial_fit(self, X):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={self.x: X})
return cost
def transform(self, X):
"""Transform data by mapping it into the latent space."""
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, z_mu=None):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
"""
if z_mu is None:
z_mu = np.random.normal(size=self.network_architecture["n_z"])
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.z: z_mu})
def reconstruct(self, X):
""" Use VAE to reconstruct given data. """
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.x: X})
| 46.792079 | 111 | 0.609077 |
a827951b8f489ddd7c383d0904c941cde566587a | 2,763 | py | Python | prody/apps/evol_apps/evol_conserv.py | kaynakb/ProDy | 4366ad28142f51ff8a84f8a0f4ce659c0b949d55 | [
"MIT"
] | 210 | 2015-01-26T08:17:56.000Z | 2022-03-30T01:40:34.000Z | prody/apps/evol_apps/evol_conserv.py | kaynakb/ProDy | 4366ad28142f51ff8a84f8a0f4ce659c0b949d55 | [
"MIT"
] | 555 | 2015-01-05T21:51:54.000Z | 2022-03-31T16:51:41.000Z | prody/apps/evol_apps/evol_conserv.py | kaynakb/ProDy | 4366ad28142f51ff8a84f8a0f4ce659c0b949d55 | [
"MIT"
] | 99 | 2015-02-09T18:00:39.000Z | 2022-03-07T12:52:51.000Z | """Calculate conservation in an MSA using Shannon entropy."""
from ..apptools import DevelApp
from prody import LOGGER
__all__ = ['evol_conserv']
APP = DevelApp('conserv',
help='analyze conservation using Shannon entropy')
APP.setExample(
"""This application calculates conservation using Shannon entropy for a \
refined multiple sequence alignment. Following example will save entropy \
data and plot using default options:
$ evol conserv piwi_refined.slx -S""", [])
APP.addArgument('msa',
help='refined MSA file')
APP.addGroup('calc', 'calculation options')
APP.addArgument('-n', '--no-ambiguity',
dest='ambiguity',
help='treat amino acids characters B, Z, J, and X as non-ambiguous',
default=True,
action='store_false',
group='calc')
APP.addArgument('-g', '--gaps',
dest='omitgaps',
help='do not omit gap characters',
default=True,
action='store_false',
group='calc')
APP.addGroup('output', 'output options')
APP.addArgument('-p', '--prefix',
dest='prefix',
help='output filename prefix, default is '
'msa filename with _conserv suffix',
type=str,
metavar='STR',
group='output')
APP.addArgument('-f', '--number-format',
dest='numformat', type=str, default='%12g',
metavar='STR', help='number output format', group='output')
APP.addFigure('-S', '--save-plot',
dest='figent',
action='store_true',
help='save conservation plot')
def evol_conserv(msa, **kwargs):
import prody
from prody import parseMSA, calcShannonEntropy, showShannonEntropy
from prody import writeArray
from os.path import splitext
prefix = kwargs.get('prefix')
if prefix is None:
prefix, _ = splitext(msa)
if _.lower() == '.gz':
prefix, _ = splitext(prefix)
prefix += '_conserv'
msa = parseMSA(msa)
entropy = calcShannonEntropy(msa, **kwargs)
writeArray(prefix + '.txt',
entropy, format=kwargs.get('numformat', '%12g'))
if kwargs.get('figent'):
try:
import matplotlib.pyplot as plt
except ImportError:
LOGGER.warn('Matplotlib could not be imported, '
'figures are not saved.')
else:
prody.SETTINGS['auto_show'] = False
width = kwargs.get('figwidth', 8)
height = kwargs.get('figheight', 6)
figargs = kwargs.get('figargs', ())
figure = plt.figure(figsize=(width, height))
show = showShannonEntropy(entropy, msa=msa, *figargs)
format = kwargs.get('figformat', 'pdf')
figure.savefig(prefix + '.' + format, format=format,
dpi=kwargs.get('figdpi', 300))
APP.setFunction(evol_conserv)
| 29.393617 | 75 | 0.624683 |
2cb7da4182a186c5350b4125b9a9505f18a3a0db | 12,758 | py | Python | built-in/TensorFlow/Official/cv/image_classification/ShuffleNetV1-1.0x-group3_ID2129_for_TensorFlow/architecture.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/TensorFlow/Official/cv/image_classification/ShuffleNetV1-1.0x-group3_ID2129_for_TensorFlow/architecture.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/TensorFlow/Official/cv/image_classification/ShuffleNetV1-1.0x-group3_ID2129_for_TensorFlow/architecture.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | #
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from npu_bridge.npu_init import *
import tensorflow as tf
import tensorflow.contrib.slim as slim
BATCH_NORM_MOMENTUM = 0.997
BATCH_NORM_EPSILON = 1e-3
def shufflenet(images, is_training, num_classes=1000, depth_multiplier='1.0'):
"""
This is an implementation of ShuffleNet v2:
https://arxiv.org/abs/1807.11164
Arguments:
images: a float tensor with shape [batch_size, image_height, image_width, 3],
a batch of RGB images with pixel values in the range [0, 1].
is_training: a boolean.
num_classes: an integer.
depth_multiplier: a string, possible values are '0.5', '1.0', '1.5', and '2.0'.
Returns:
a float tensor with shape [batch_size, num_classes].
width_config = {
0.25: (24, 48, 96, 512),
0.33: (32, 64, 128, 512),
0.5: (48, 96, 192, 1024),
1.0: (116, 232, 464, 1024),
1.5: (176, 352, 704, 1024),
2.0: (244, 488, 976, 2048),
}
"""
possibilities = {'0.33': 32, '0.5': 48, '1.0': 116, '1.5': 176, '2.0': 224}
initial_depth = possibilities[depth_multiplier]
def batch_norm(x):
x = tf.layers.batch_normalization(
x, axis=3, center=True, scale=True,
training=is_training,
momentum=BATCH_NORM_MOMENTUM,
epsilon=BATCH_NORM_EPSILON,
fused=True, name='batch_norm'
)
return x
if False:
with tf.name_scope('image_preprocess'):
#0:bilinear,1:NEAREST,2:cubic,3:area
images=tf.image.resize_images(images,[224,224],method=0)
images=(1.0 / 255.0) * tf.to_float(images)
images=tf.reshape(images, (1,224,224,1))
with tf.name_scope('standardize_input'):
x = (2.0 * images) - 1.0
with tf.variable_scope('ShuffleNetV2'):
params = {
'padding': 'SAME', 'activation_fn': tf.nn.relu,
'normalizer_fn': batch_norm, 'data_format': 'NHWC',
'weights_initializer': tf.contrib.layers.xavier_initializer()
}
with slim.arg_scope([slim.conv2d, depthwise_conv, slim.conv2d_transpose], **params):
x = slim.conv2d(x, 24, (3, 3), stride=2, scope='Conv1')
x = slim.max_pool2d(x, (3, 3), stride=2, padding='SAME', scope='MaxPool')
x = block(x, num_units=4, out_channels=initial_depth, scope='Stage2')
x = block(x, num_units=8, scope='Stage3')
'''
z = slim.conv2d(x, 64, (1, 1), stride=1, scope='Conv6_5')
z = depthwise_conv(z, kernel=3, stride=2, padding='SAME',scope='Conv6_6')
z = slim.conv2d(z, 64, (1, 1), stride=1, scope='Conv6_7')
u = slim.conv2d(x, 64, (1, 1), stride=1, scope='Conv6_8')
u = depthwise_conv(u, kernel=3, stride=2, padding='SAME',scope='Conv6_9')
u = slim.conv2d(u, 64, (1, 1), stride=1, scope='Conv6_10')
'''
x = block(x, num_units=4, scope='Stage4')
'''
y = slim.conv2d(x, 64, (1, 1), stride=1, scope='Conv6_1')
y = tf.concat([z,y],axis=3)
y = depthwise_conv(y, kernel=3, stride=1, padding='VALID',scope='Conv6_2') #4
y = slim.conv2d(y, 64, (1, 1), stride=1, scope='Conv6_3')
y = depthwise_conv(y, kernel=3, stride=2, padding='SAME',scope='Conv6_4') #2
y = slim.conv2d_transpose(y,64, (3,3), stride=[2,2],padding="VALID")
x = slim.conv2d(x, 64, (1, 1), stride=1, scope='sConv')
x = tf.concat([x,y,u],axis=3)
'''
#shape = tf.shape(x)
#ch = shape[3]
if False:
with tf.variable_scope('RFBModule'):
x = RFBModuleB2(x, 192)
if depth_multiplier == '0.33':
final_channels = 512
elif depth_multiplier == '2.0':
final_channels = 2048
else:
final_channels = 1024
x = slim.conv2d(x, final_channels, (1, 1), stride=1, scope='Conv5')
# global average pooling
x = tf.reduce_mean(x, axis=[1, 2])
logits = slim.fully_connected(
x, num_classes, activation_fn=None, scope='classifier',
weights_initializer=tf.contrib.layers.xavier_initializer()
)
return logits
def RFBModuleB(x, in_channels):
inc=in_channels//8
with tf.variable_scope('branch0'):
conv1x1=slim.conv2d(x, 2*inc, (1, 1), stride=1, scope='conv1x1')
branch0_conv3x3=slim.conv2d(conv1x1, 2*inc, (3, 3), stride=1, padding='SAME', activation_fn=None)
with tf.variable_scope('branch1'):
conv1x1=slim.conv2d(x, 1*inc, (1, 1), stride=1, scope='conv1x1')
branch1_conv3x3=slim.conv2d(conv1x1, 2*inc, (3, 3), stride=1, padding='SAME')
branch1_conv3x3_dilation=slim.conv2d(branch1_conv3x3, 2*inc, (3, 3), stride=1, padding='SAME',rate=2, activation_fn=None)
with tf.variable_scope('branch2'):
conv1x1=slim.conv2d(x, 1*inc, (1, 1), stride=1, scope='conv1x1')
branch2_conv5x5_1=slim.conv2d(conv1x1, (inc//2)*3, (3, 3), stride=1, padding='SAME')
branch2_conv5x5_2=slim.conv2d(branch2_conv5x5_1, 2*inc, (3, 3), stride=1, padding='SAME')
branch2_conv3x3_dilation=slim.conv2d(branch2_conv5x5_2, 2*inc, (3, 3), stride=1, padding='SAME',rate=5,activation_fn=None)
shortcut=slim.conv2d(x, in_channels, (1, 1), stride=1, scope='shortcut',activation_fn=None)
shape = tf.shape(shortcut)
batch_size = shape[0]
height, width = shape[1], shape[2]
#depth = conv1x1.shape[3].value
#[batch,height,width,4,depth]
x = tf.stack([branch0_conv3x3,branch1_conv3x3_dilation,branch2_conv3x3_dilation], axis=3)
x = tf.transpose(x, [0, 1, 2, 4, 3])
x = tf.reshape(x, [batch_size, height, width, 6*inc])
x=slim.conv2d(x, in_channels, (1, 1), stride=1, scope='output',activation_fn=None)
scale=tf.fill([batch_size,1,1,in_channels],1.0)
x=x*scale+shortcut
x=tf.nn.relu(x)
return x
def RFBModuleB2(x, in_channels):
inc=in_channels//8
x, y, z, w = tf.split(x, num_or_size_splits=4, axis=3)
with tf.variable_scope('branch0'):
#conv1x1=slim.conv2d(x, 2*inc, (1, 1), stride=1, scope='conv1x1')
branch0_conv3x3=slim.conv2d(y, 2*inc, (3, 3), stride=1, padding='SAME', activation_fn=None)
with tf.variable_scope('branch1'):
#conv1x1=slim.conv2d(x, 2*inc, (1, 1), stride=1, scope='conv1x1')
branch1_conv3x3=depthwise_conv(z, kernel=3, stride=1, padding='SAME',activation_fn=tf.nn.relu)
branch1_conv3x3_dilation=slim.conv2d(branch1_conv3x3, 2*inc, (3, 3), stride=1, padding='SAME',rate=2, activation_fn=None)
with tf.variable_scope('branch2'):
#conv1x1=slim.conv2d(x, 2*inc, (1, 1), stride=1, scope='conv1x1')
branch2_conv5x5_1=depthwise_conv(w, kernel=3, stride=1, padding='SAME',activation_fn=tf.nn.relu)
branch2_conv5x5_2=depthwise_conv(branch2_conv5x5_1, kernel=3, stride=1, padding='SAME',activation_fn=tf.nn.relu,scope='depthwise_conv2')
branch2_conv3x3_dilation=slim.conv2d(branch2_conv5x5_2, 2*inc, (3, 3), stride=1, padding='SAME',rate=5,activation_fn=None)
shortcut=slim.conv2d(x, 2*inc, (1, 1), stride=1, scope='shortcut',activation_fn=None)
x = tf.concat([shortcut,branch0_conv3x3,branch1_conv3x3_dilation,branch2_conv3x3_dilation], axis=3)
x=slim.conv2d(x, in_channels, (1, 1), stride=1, scope='output')
return x
def block(x, num_units, out_channels=None, scope='stage'):
with tf.variable_scope(scope):
with tf.variable_scope('unit_1'):
x, y = basic_unit_with_downsampling(x, out_channels)
for j in range(2, num_units + 1):
with tf.variable_scope('unit_%d' % j):
x, y = concat_shuffle_split(x, y)
x = basic_unit(x)
x = tf.concat([x, y], axis=3)
return x
def concat_shuffle_split(x, y):
with tf.name_scope('concat_shuffle_split'):
shape = tf.shape(x)
batch_size = shape[0]
height, width = shape[1], shape[2]
depth = x.shape[3].value
z = tf.stack([x, y], axis=3) # shape [batch_size, height, width, 2, depth]
z = tf.transpose(z, [0, 1, 2, 4, 3])
z = tf.reshape(z, [batch_size, height, width, 2*depth])
x, y = tf.split(z, num_or_size_splits=2, axis=3)
return x, y
def basic_unit(x):
in_channels = x.shape[3].value
x = slim.conv2d(x, in_channels, (1, 1), stride=1, scope='conv1x1_before')
x = depthwise_conv(x, kernel=3, stride=1, activation_fn=None, scope='depthwise')
x = slim.conv2d(x, in_channels, (1, 1), stride=1, scope='conv1x1_after')
if False: #with SENet module
SEch=in_channels
with tf.variable_scope('SEModule'):
z= tf.reduce_mean(x, axis=[1, 2], name='globalPooling')
z=slim.fully_connected(
z, SEch // 2, activation_fn=tf.nn.relu, scope='fc1',
weights_initializer=tf.contrib.layers.xavier_initializer()
)
z=slim.fully_connected(
z, SEch, activation_fn=tf.nn.sigmoid, scope='fc2',
weights_initializer=tf.contrib.layers.xavier_initializer()
)
z=tf.reshape(z,[-1,1,1,SEch])
x=x*z
return x
def basic_unit_with_downsampling(x, out_channels=None):
in_channels = x.shape[3].value
out_channels = 2 * in_channels if out_channels is None else out_channels
y = slim.conv2d(x, in_channels, (1, 1), stride=1, scope='conv1x1_before')
y = depthwise_conv(y, kernel=3, stride=2, activation_fn=None, scope='depthwise')
y = slim.conv2d(y, out_channels // 2, (1, 1), stride=1, scope='conv1x1_after')
SEch = out_channels //2
if False: #with SENet module
with tf.variable_scope('SEModule'):
z= tf.reduce_mean(y, axis=[1, 2], name='globalPooling')
z=slim.fully_connected(
z, SEch // 16, activation_fn=tf.nn.relu, scope='fc1',
weights_initializer=tf.contrib.layers.xavier_initializer()
)
z=slim.fully_connected(
z, SEch, activation_fn=tf.nn.sigmoid, scope='fc2',
weights_initializer=tf.contrib.layers.xavier_initializer()
)
z=tf.reshape(z,[-1,1,1,out_channels // 2])
y=y*z
with tf.variable_scope('second_branch'):
x = depthwise_conv(x, kernel=3, stride=2, activation_fn=None, scope='depthwise')
x = slim.conv2d(x, out_channels // 2, (1, 1), stride=1, scope='conv1x1_after')
return x, y
@tf.contrib.framework.add_arg_scope
def depthwise_conv(
x, kernel=3, stride=1, padding='SAME',
activation_fn=None, normalizer_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
data_format='NHWC', scope='depthwise_conv'):
with tf.variable_scope(scope):
assert data_format == 'NHWC'
in_channels = x.shape[3].value
W = tf.get_variable(
'depthwise_weights',
[kernel, kernel, in_channels, 1], dtype=tf.float32,
initializer=weights_initializer
)
x = tf.nn.depthwise_conv2d(x, W, [1, stride, stride, 1], padding, data_format='NHWC')
x = normalizer_fn(x) if normalizer_fn is not None else x # batch normalization
x = activation_fn(x) if activation_fn is not None else x # nonlinearity
return x
| 43.993103 | 144 | 0.617417 |
88f30f2f31f9b77fc4097c0d9afcf620669099b3 | 6,399 | py | Python | NCube/NCubeImageOnTopographyBlockSource.py | mobigroup/ParaView-plugins | f7cf829f858dbb91f176d45b17df45cc3fe6cb99 | [
"MIT"
] | 41 | 2020-01-09T16:45:53.000Z | 2022-03-16T07:04:37.000Z | NCube/NCubeImageOnTopographyBlockSource.py | echinoids/ParaView-plugins | f7cf829f858dbb91f176d45b17df45cc3fe6cb99 | [
"MIT"
] | 1 | 2021-06-04T14:09:23.000Z | 2021-06-05T11:52:27.000Z | NCube/NCubeImageOnTopographyBlockSource.py | echinoids/ParaView-plugins | f7cf829f858dbb91f176d45b17df45cc3fe6cb99 | [
"MIT"
] | 6 | 2020-03-15T14:35:52.000Z | 2021-07-31T16:44:07.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Alexey Pechnikov. All rights reserved.
# https://orcid.org/0000-0001-9626-8615 (ORCID)
# pechnikov@mobigroup.ru (email)
# License: http://opensource.org/licenses/MIT
from paraview.util.vtkAlgorithm import *
# load error fix for paraView 5.8.1rc1 Python3
try:
import xarray
except:
import sys
print (sys.exc_info()[0])
def _NCubeImageOnTopographyToGrid(dem, image, mask_magic=False):
from vtk import vtkPoints, vtkStructuredGrid, vtkThreshold, vtkDataObject, VTK_FLOAT, VTK_UNSIGNED_CHAR
from vtk.util import numpy_support as vn
import numpy as np
# mask NaN areas
nanmask = (~np.any(np.isnan(image.values),axis=0)).astype(float)
# mask single channel zeroes if needed
if mask_magic:
# that's more correct way, only black pixels ignored
#zeromask = (~np.all(image.values==0,axis=0)).astype(float)
# that's magic for better borders
zeromask = (~np.any(image.values==0,axis=0)).astype(float)
mask = nanmask*zeromask
else:
mask = nanmask
mask[mask==0] = np.nan
xs = dem.x.values
ys = dem.y.values
values = mask * dem.values
# create raster mask by geometry and for NaNs
(yy,xx) = np.meshgrid(ys, xs)
vtk_points = vtkPoints()
points = np.column_stack((xx.ravel('F'),yy.ravel('F'),values.ravel('C')))
_points = vn.numpy_to_vtk(points, deep=True)
vtk_points.SetData(_points)
sgrid = vtkStructuredGrid()
sgrid.SetDimensions(len(xs), len(ys), 1)
sgrid.SetPoints(vtk_points)
array = vn.numpy_to_vtk(values.ravel(), deep=True, array_type=VTK_FLOAT)
array.SetName("z")
sgrid.GetPointData().AddArray(array)
bands = image.band.shape[0]
print ("bands", bands)
if bands == 3:
# RGB
colors = np.round(image.values)
array = vn.numpy_to_vtk(colors.reshape(3,-1).T, deep=True, array_type=VTK_UNSIGNED_CHAR)
array.SetName("colors")
sgrid.GetPointData().AddArray(array)
elif bands == 1:
arr = image.values
array = vn.numpy_to_vtk(arr.reshape(1,-1).T, deep=True, array_type=VTK_FLOAT)
array.SetName("band")
sgrid.GetPointData().AddArray(array)
else:
print ("Unsupported bands count (should be 1 or 3)", bands)
thresh = vtkThreshold()
thresh.SetInputData(sgrid)
thresh.SetInputArrayToProcess(0, 0, 0, vtkDataObject.FIELD_ASSOCIATION_POINTS, "z")
thresh.ThresholdBetween(-1e30, 1e30)
thresh.Update()
# return sgrid
return thresh.GetOutput()
#------------------------------------------------------------------------------
# N-Cube Image On Topography Source
#------------------------------------------------------------------------------
@smproxy.source(name="NCubeImageOnTopographySource",
label="N-Cube Image On Topography Source")
class NCubeImageOnTopographySource(VTKPythonAlgorithmBase):
def __init__(self):
VTKPythonAlgorithmBase.__init__(self,
nInputPorts=0,
nOutputPorts=1,
outputType='vtkUnstructuredGrid')
self._imagename = None
self._toponame = None
self._usesealevel = 0
self._mask_magic = 1
def RequestData(self, request, inInfo, outInfo):
from vtk import vtkUnstructuredGrid
import xarray as xr
import numpy as np
import time
if self._toponame is None or self._imagename is None:
return 1
t0 = time.time()
# load the full topography raster
dem = xr.open_rasterio(self._toponame).squeeze()
if dem.values.dtype not in [np.dtype('float16'),np.dtype('float32'),np.dtype('float64'),np.dtype('float128')]:
dem.values = dem.values.astype("float32")
dem.values[dem.values == dem.nodatavals[0]] = np.nan
if self._usesealevel:
dem.values[dem.values <= 0] = 0
# load the full image raster
image = xr.open_rasterio(self._imagename)
image = image.interp_like(dem)
#dem = dem.interp_like(image)
vtk_ugrid = _NCubeImageOnTopographyToGrid(dem, image, self._mask_magic)
output = vtkUnstructuredGrid.GetData(outInfo, 0)
output.ShallowCopy(vtk_ugrid)
t1 = time.time()
print ("t1-t0", t1-t0)
return 1
@smproperty.stringvector(name="Image File Name")
@smdomain.filelist()
@smhint.filechooser(extensions=["tif", "TIF", "nc"], file_description="GeoTIFF, NetCDF")
def SetShapeFileName(self, name):
"""Specify filename for the image to read."""
print ("SetImageFileName", name)
name = name if name != 'None' else None
if self._imagename != name:
self._imagename = name
self.Modified()
@smproperty.stringvector(name="Topography File Name")
@smdomain.filelist()
@smhint.filechooser(extensions=["tif", "TIF", "nc"], file_description="GeoTIFF, NetCDF")
def SetTopographyFileName(self, name):
"""Specify filename for the topography file to read."""
print ("SetTopographyFileName", name)
name = name if name != 'None' else None
if self._toponame != name:
self._toponame = name
self.Modified()
@smproperty.xml("""
<IntVectorProperty name="Use Sea Level For Negative Topography"
command="SetTopographySeaLevel"
number_of_elements="1"
default_values="0">
<BooleanDomain name="bool" />
<Documentation>
Use this checkbox to replace negative topography by sea level.
</Documentation>
</IntVectorProperty>
""")
def SetTopographySeaLevel(self, value):
print ("TopographySeaLevel", value)
self._usesealevel = value
self.Modified()
@smproperty.xml("""
<IntVectorProperty name="Use Magic Image Mask"
command="SetUseImageMagicMask"
number_of_elements="1"
default_values="1">
<BooleanDomain name="bool" />
<Documentation>
Unset this checkbox when you see some missed pixels.
</Documentation>
</IntVectorProperty>
""")
def SetUseImageMagicMask(self, value):
print ("SetImageMagicMask", value)
self._mask_magic = value
self.Modified()
| 34.967213 | 118 | 0.616346 |
a3522a402ac17344dc75f0899f42ec973a2579a6 | 5,297 | py | Python | docs/conf.py | lucas7bm/pychord | eb179289919ab9fec0dd27e10fa52d3e395082d1 | [
"MIT"
] | 1 | 2018-11-18T22:44:40.000Z | 2018-11-18T22:44:40.000Z | docs/conf.py | lucas7bm/pychord | eb179289919ab9fec0dd27e10fa52d3e395082d1 | [
"MIT"
] | null | null | null | docs/conf.py | lucas7bm/pychord | eb179289919ab9fec0dd27e10fa52d3e395082d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# pychord documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 31 14:51:42 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../pychord'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pychord'
copyright = u'2016, Yuma Mihira'
author = u'Yuma Mihira'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pychorddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pychord.tex', u'pychord Documentation',
u'Author', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pychord', u'pychord Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pychord', u'pychord Documentation',
author, 'pychord', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| 29.427778 | 92 | 0.678497 |
6dc603f2b760141d2254c0cbbbe095cc2cc7da80 | 9,167 | py | Python | docs/conf.py | timothyb0912/genvi | b6382f488ffcea89355876f04ffdc2b122c09509 | [
"MIT"
] | 1 | 2021-01-22T07:50:30.000Z | 2021-01-22T07:50:30.000Z | docs/conf.py | timothyb0912/genvi | b6382f488ffcea89355876f04ffdc2b122c09509 | [
"MIT"
] | null | null | null | docs/conf.py | timothyb0912/genvi | b6382f488ffcea89355876f04ffdc2b122c09509 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/genvi")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'genvi'
copyright = u'2020, Timothy Brathwaite'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'sidebar_width': '300px',
'page_width': '1200px'
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from genvi import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'genvi-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'genvi Documentation',
u'Timothy Brathwaite', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
| 33.578755 | 85 | 0.703284 |
3598bd430da64b169889803820f0ad8cdb82662e | 5,717 | py | Python | enroll/tests/model_tests.py | maciektr/enrollXchange | 1b579a6e4b92360bade28836686c733581f68c37 | [
"MIT"
] | null | null | null | enroll/tests/model_tests.py | maciektr/enrollXchange | 1b579a6e4b92360bade28836686c733581f68c37 | [
"MIT"
] | 10 | 2021-04-08T11:36:41.000Z | 2021-06-05T21:09:31.000Z | enroll/tests/model_tests.py | maciektr/enrollXchange | 1b579a6e4b92360bade28836686c733581f68c37 | [
"MIT"
] | 1 | 2021-05-29T20:33:12.000Z | 2021-05-29T20:33:12.000Z | import datetime as dt
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.utils import DataError, IntegrityError
from enroll.models import User, Student, Lecturer, Enrollment, ClassTime
from enroll.types import UserType
from enroll.utils import time_plus_minutes
class UtilsTestCase(TestCase):
def test_time_plus_minutes(self):
"""
Assert time_plus_minutes helper functions returns correct
result of time + minutes calculation.
"""
time = dt.time()
self.assertEqual(time, time_plus_minutes(time, 0))
self.assertEqual(time, dt.time())
time = dt.time(hour=23, minute=59)
self.assertEqual(time_plus_minutes(time, 30), dt.time(hour=0, minute=29))
self.assertNotEqual(time_plus_minutes(time, 0), dt.time(hour=12, minute=0))
class UserTestCase(TestCase):
def setUp(self):
self.new_user = User.objects.create(
user_type=UserType.get_by_name("new_user"),
username="testuser1",
password="12345",
)
self.student = User.objects.create(
user_type=UserType.get_by_name("student"),
username="testuser2",
password="12345",
)
self.teacher = User.objects.create(
user_type=UserType.get_by_name("teacher"),
username="testuser3",
password="12345",
)
self.moderator = User.objects.create(
user_type=UserType.get_by_name("moderator"),
username="testuser4",
password="12345",
)
class StudentTestCase(UserTestCase):
def setUp(self):
super().setUp()
def test_student_id_validation(self):
"""
Assert that ValidationError is raised when student has incorrect student id number set.
"""
with self.assertRaises(ValidationError):
student = Student.objects.create(
account=self.student,
student_id="1234",
)
student.clean()
with self.assertRaises(ValidationError):
student.delete()
student = Student.objects.create(
account=self.student,
student_id="123a56",
)
student.clean()
with self.assertRaises(DataError):
student.delete()
with transaction.atomic():
student = Student.objects.create(
account=self.student,
student_id="12345678",
)
student.clean()
student = Student.objects.create(
account=self.student,
student_id="123456",
)
student.clean()
with self.assertRaises(IntegrityError):
# sid has to be unique
student = Student.objects.create(
account=self.student,
student_id="123456",
)
student.clean()
def test_student_user_type_validation(self):
"""
Assert that ValidationError is raised when Student account
has user_type different than student.
"""
Student.objects.create(
account=self.student,
student_id="123456",
).clean()
with self.assertRaises(ValidationError):
Student.objects.create(
account=self.teacher,
student_id="123457",
).clean()
with self.assertRaises(ValidationError):
Student.objects.create(
account=self.moderator,
student_id="123458",
).clean()
class LecturerTestCase(UserTestCase):
def setUp(self):
super().setUp()
def test_lecturer_account_validator(self):
"""
Assert that ValidationError is raised when Lecturer account
has user_type different than teacher.
"""
Lecturer.objects.create(account=self.teacher).clean()
with self.assertRaises(ValidationError):
Lecturer.objects.create(account=self.new_user).clean()
with self.assertRaises(ValidationError):
Lecturer.objects.create(account=self.student).clean()
class EnrollmentTestCase(UserTestCase):
def setUp(self):
super().setUp()
self.time = ClassTime.objects.create(
day="1",
frequency=ClassTime.FrequencyType.EVERY_WEEK,
start=dt.time(),
duration_minutes=0,
seats=0,
)
def test_student_typing(self):
"""
Assert that ValidationError is raised when Enrollment is linked to user other than student.
"""
Enrollment.objects.create(
student=Student.objects.create(account=self.student, student_id="123456"),
class_time=self.time,
).clean()
with self.assertRaises(ValueError):
Enrollment.objects.create(student=self.new_user, class_time=self.time).clean()
with self.assertRaises(ValueError):
Enrollment.objects.create(student=self.teacher, class_time=self.time).clean()
class ClassTimeCase(TestCase):
def test_end_property(self):
"""
Assert that ClassTime.end property returns correct end time
(ClassTime.start + ClassTime.duration_minutes).
"""
time = dt.time(hour=23, minute=59)
duration = 10
ct = ClassTime.objects.create(
day="1",
frequency=ClassTime.FrequencyType.EVERY_WEEK,
start=time,
duration_minutes=duration,
seats=0,
)
self.assertEqual(time_plus_minutes(time, duration), ct.end)
| 33.629412 | 99 | 0.60014 |
ef40f62e694e83a4eb749c613a7593c495035d2c | 7,946 | py | Python | tensor2tensor/data_generators/mnist.py | sivaramakrishna7/tensor2tensor | eb0118d3f459913133e3d68a96944480a928bff1 | [
"Apache-2.0"
] | 5 | 2019-03-28T03:52:32.000Z | 2021-02-24T07:09:26.000Z | tensor2tensor/data_generators/mnist.py | sivaramakrishna7/tensor2tensor | eb0118d3f459913133e3d68a96944480a928bff1 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/data_generators/mnist.py | sivaramakrishna7/tensor2tensor | eb0118d3f459913133e3d68a96944480a928bff1 | [
"Apache-2.0"
] | 2 | 2018-08-07T03:43:09.000Z | 2019-12-09T06:41:40.000Z | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MNIST."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import random
# Dependency imports
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import image_utils
from tensor2tensor.utils import registry
import tensorflow as tf
# URLs and filenames for MNIST data.
_MNIST_URL = "http://yann.lecun.com/exdb/mnist/"
_MNIST_TRAIN_DATA_FILENAME = "train-images-idx3-ubyte.gz"
_MNIST_TRAIN_LABELS_FILENAME = "train-labels-idx1-ubyte.gz"
_MNIST_TEST_DATA_FILENAME = "t10k-images-idx3-ubyte.gz"
_MNIST_TEST_LABELS_FILENAME = "t10k-labels-idx1-ubyte.gz"
_MNIST_IMAGE_SIZE = 28
def _get_mnist(directory):
"""Download all MNIST files to directory unless they are there."""
for filename in [
_MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME,
_MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME
]:
generator_utils.maybe_download(directory, filename, _MNIST_URL + filename)
def _extract_mnist_images(filename, num_images):
"""Extract images from an MNIST file into a numpy array.
Args:
filename: The path to an MNIST images file.
num_images: The number of images in the file.
Returns:
A numpy array of shape [number_of_images, height, width, channels].
"""
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1)
return data
def _extract_mnist_labels(filename, num_labels):
"""Extract labels from an MNIST file into integers.
Args:
filename: The path to an MNIST labels file.
num_labels: The number of labels in the file.
Returns:
A int64 numpy array of shape [num_labels]
"""
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def mnist_common_generator(tmp_dir,
training,
how_many,
data_filename,
label_filename,
start_from=0):
"""Image generator for MNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
data_filename: file that contains features data.
label_filename: file that contains labels.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
"""
data_path = os.path.join(tmp_dir, data_filename)
labels_path = os.path.join(tmp_dir, label_filename)
images = _extract_mnist_images(data_path, 60000 if training else 10000)
labels = _extract_mnist_labels(labels_path, 60000 if training else 10000)
# Shuffle the data to make sure classes are well distributed.
data = list(zip(images, labels))
random.shuffle(data)
images, labels = list(zip(*data))
return image_utils.image_generator(images[start_from:start_from + how_many],
labels[start_from:start_from + how_many])
def mnist_generator(tmp_dir, training, how_many, start_from=0):
"""Image generator for MNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
"""
_get_mnist(tmp_dir)
d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME
l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME
return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
@registry.register_problem
class ImageMnistTune(image_utils.Image2ClassProblem):
"""MNIST, tuning data."""
@property
def num_channels(self):
return 1
@property
def is_small(self):
return True
@property
def num_classes(self):
return 10
@property
def class_labels(self):
return [str(c) for c in range(self.num_classes)]
@property
def train_shards(self):
return 10
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image.set_shape([_MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1])
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return mnist_generator(tmp_dir, True, 55000)
else:
return mnist_generator(tmp_dir, True, 5000, 55000)
@registry.register_problem
class ImageMnist(ImageMnistTune):
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return mnist_generator(tmp_dir, True, 60000)
else:
return mnist_generator(tmp_dir, False, 10000)
# URLs and filenames for MNIST data.
_FASHION_MNIST_URL = ("http://fashion-mnist.s3-website.eu-central-1"
".amazonaws.com/")
_FASHION_MNIST_LOCAL_FILE_PREFIX = "fashion-"
_FASHION_MNIST_IMAGE_SIZE = 28
def _get_fashion_mnist(directory):
"""Download all FashionMNIST files to directory unless they are there."""
# Fashion mnist files have the same names as MNIST.
# We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir.
for filename in [
_MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME,
_MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME
]:
generator_utils.maybe_download(directory,
_FASHION_MNIST_LOCAL_FILE_PREFIX + filename,
_FASHION_MNIST_URL + filename)
def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0):
"""Image generator for FashionMNIST.
Args:
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces MNIST images.
"""
_get_fashion_mnist(tmp_dir)
d = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME)
l = _FASHION_MNIST_LOCAL_FILE_PREFIX + (
_MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME)
return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
@registry.register_problem
class ImageFashionMnist(image_utils.Image2ClassProblem):
"""Fashion MNIST."""
@property
def is_small(self):
return True
@property
def num_classes(self):
return 10
@property
def class_labels(self):
return [str(c) for c in range(self.num_classes)]
@property
def train_shards(self):
return 10
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return fashion_mnist_generator(tmp_dir, True, 60000)
else:
return fashion_mnist_generator(tmp_dir, False, 10000)
| 31.531746 | 80 | 0.73106 |
b80ee527cc2ee351644cf4a81440f300f26551c1 | 1,709 | py | Python | kingfisher_scrapy/commands/crawlall.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 7 | 2020-07-24T13:15:37.000Z | 2021-12-11T22:40:07.000Z | kingfisher_scrapy/commands/crawlall.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 418 | 2020-04-27T22:15:27.000Z | 2022-03-31T23:49:34.000Z | kingfisher_scrapy/commands/crawlall.py | open-contracting/kingfisher-collect | 2fbbd6361a0ec959e0603343a4b363f97fae3815 | [
"BSD-3-Clause"
] | 6 | 2020-05-28T16:06:53.000Z | 2021-03-16T02:54:15.000Z | from scrapy.commands import ScrapyCommand
from scrapy.exceptions import UsageError
class CrawlAll(ScrapyCommand):
def syntax(self):
return '[options] [spider ...]'
def short_desc(self):
return 'Run all spiders'
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option('--dry-run', action='store_true', help='Runs the spiders without writing any files')
parser.add_option('--sample', type=int, help='The number of files to write')
def run(self, args, opts):
if not (bool(opts.dry_run) ^ bool(opts.sample)):
raise UsageError('Exactly one of --dry-run or --sample must be set.')
if opts.sample is not None and opts.sample <= 0:
raise UsageError('--sample must be a positive integer.')
kwargs = {}
extensions = {'scrapy.extensions.telnet.TelnetConsole': None}
if opts.dry_run:
kwargs['sample'] = 1
else:
extensions['kingfisher_scrapy.extensions.FilesStore'] = 100
if opts.sample:
kwargs['sample'] = opts.sample
# Stop after one item or error.
self.settings.set('CLOSESPIDER_ERRORCOUNT', 1)
# Disable LogStats extension.
self.settings.set('LOGSTATS_INTERVAL', None)
# Disable custom and Telnet extensions.
self.settings.set('EXTENSIONS', extensions)
for spider_name in self.crawler_process.spider_loader.list():
if not args or spider_name in args:
spidercls = self.crawler_process.spider_loader.load(spider_name)
self.crawler_process.crawl(spidercls, **kwargs)
self.crawler_process.start()
| 35.604167 | 110 | 0.640726 |
437e20163c585ebc942948a499eaf5e79e6b5257 | 2,454 | py | Python | {{cookiecutter.github_repository}}/tests/conftest.py | ricardoesc25/django-init | e48e6e238d967cca191db122ae753e1c0bcaad50 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.github_repository}}/tests/conftest.py | ricardoesc25/django-init | e48e6e238d967cca191db122ae753e1c0bcaad50 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.github_repository}}/tests/conftest.py | ricardoesc25/django-init | e48e6e238d967cca191db122ae753e1c0bcaad50 | [
"BSD-3-Clause"
] | null | null | null | """
This module is used to provide configuration, fixtures, and plugins for pytest.
It may be also used for extending doctest's context:
1. https://docs.python.org/3/library/doctest.html
2. https://docs.pytest.org/en/latest/doctest.html
"""
# Standard Library
import functools
from unittest import mock
# Third Party Stuff
import pytest
class PartialMethodCaller:
def __init__(self, obj, **partial_params):
self.obj = obj
self.partial_params = partial_params
def __getattr__(self, name):
return functools.partial(getattr(self.obj, name), **self.partial_params)
@pytest.fixture(autouse=True, scope="function")
def cleared_cache():
"""Fixture that exposes django cache, which is empty to start with.
This fixture also makes sures that cache is cleared before running each and every test case.
"""
from django.core.cache import cache
cache.clear()
return cache
@pytest.fixture(autouse=True, scope="function")
def media_root(settings, tmpdir_factory):
"""Forces django to save media files into temp folder."""
settings.MEDIA_ROOT = tmpdir_factory.mktemp("media", numbered=True)
return settings.MEDIA_ROOT
@pytest.fixture
def client():
"""Django Test Client, with some convenient overriden methods.
"""
from django.test import Client
class _Client(Client):
def login(
self,
user=None,
backend="django.contrib.auth.backends.ModelBackend",
**credentials
):
"""Modified login method, which allows setup an authenticated session with just passing in the
user object, if provided.
"""
if user is None:
return super().login(**credentials)
with mock.patch("django.contrib.auth.authenticate") as authenticate:
user.backend = backend
authenticate.return_value = user
return super().login(**credentials)
@property
def json(self):
"""Add json method on the client for sending json type request.
Usages:
>>> import json
>>> url = reverse("api-login")
>>> client.json.get(url)
>>> client.json.post(url, data=json.dumps(payload))
"""
return PartialMethodCaller(
obj=self, content_type='application/json;charset="utf-8"'
)
return _Client()
| 29.566265 | 106 | 0.635289 |
dbb09eace40f317230211ec2d07e918da543525a | 1,568 | py | Python | pytorch/resume_replicate_model_genesis.py | mistermoutan/ModelsGenesis | 98af7075b93311fe655e9692773eb1ce015b8bd0 | [
"MIT"
] | null | null | null | pytorch/resume_replicate_model_genesis.py | mistermoutan/ModelsGenesis | 98af7075b93311fe655e9692773eb1ce015b8bd0 | [
"MIT"
] | null | null | null | pytorch/resume_replicate_model_genesis.py | mistermoutan/ModelsGenesis | 98af7075b93311fe655e9692773eb1ce015b8bd0 | [
"MIT"
] | null | null | null | from finetune_config import FineTuneConfig
from config import models_genesis_config
from dataset import Dataset
from finetune import Trainer
from utils import make_dir
def resume_replication_of_results_pretrain(run_nr:int):
config = models_genesis_config()
config.override_dirs(run_nr)
config.resume_ss = True
config.scheduler_ss = "ReduceLROnPlateau"
config.display()
x_train_filenames = ["bat_32_s_64x64x32_" + str(i) + ".npy" for i in config.train_fold]
x_val_filenames = ["bat_32_s_64x64x32_" + str(i) + ".npy" for i in config.valid_fold]
x_test_filenames = ["bat_32_s_64x64x32_" + str(i) + ".npy" for i in config.test_fold] #Dont know in what sense they use this for
files = [x_train_filenames, x_val_filenames, x_test_filenames]
dataset = Dataset(config.data_dir, train_val_test=(0.8, 0.2, 0), file_names=files) # train_val_test is non relevant as is overwritten by files
trainer_mg_replication = Trainer(config, dataset)
trainer_mg_replication.load_model(from_latest_checkpoint=True) #still requires override dirs to find the specific checkpoint to resume from
trainer_mg_replication.finetune_self_supervised()
trainer_mg_replication.add_hparams_to_writer()
trainer_mg_replication.get_stats()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--run", required=True, dest="run", type=int)
args = parser.parse_args()
print("RESUMING RUN {}".format(args.run))
resume_replication_of_results_pretrain(args.run) | 44.8 | 146 | 0.757015 |
871891319498320b06b63d5daee75e3e4f0a4f5a | 3,790 | py | Python | bigbench/models/human_model.py | dimmollo/BIG-bench | f0dffeb4f16ef5489686a81e2d63362d251cda3e | [
"Apache-2.0"
] | null | null | null | bigbench/models/human_model.py | dimmollo/BIG-bench | f0dffeb4f16ef5489686a81e2d63362d251cda3e | [
"Apache-2.0"
] | null | null | null | bigbench/models/human_model.py | dimmollo/BIG-bench | f0dffeb4f16ef5489686a81e2d63362d251cda3e | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bigbench.api.model as model
import bigbench.api.util as util
import numpy as np
import scipy
class HumanModel(model.Model):
def __init__(self):
self.queries = []
def generate_text(
self, inputs, max_length=1000, stop_string=None, output_regex=None
):
if isinstance(inputs, str):
inputs = [inputs]
outputs = []
print(
f"Please write a continuation of each of the following {len(inputs)} input strings."
)
for i, context in enumerate(inputs):
print(f"Input {i+1} of {len(inputs)}")
print(context)
output = input()
output = util.postprocess_output(
output, max_length, stop_string, output_regex
)
outputs.append(output)
samples = [
{"input": inputs[i], "output": outputs[i]} for i in range(len(inputs))
]
self.queries.append(
{
"function": "human_text_generation_fn",
"max_length": max_length,
"stop_string": stop_string,
"output_regex": output_regex,
"samples": samples,
}
)
if len(inputs) == 1:
outputs = outputs[0]
return outputs
def cond_log_prob(self, inputs, targets, absolute_normalization=False):
if isinstance(inputs, str):
inputs = [inputs]
targets = [targets]
outputs = []
print(
f"Please provide the most natural continuation of each of the following {len(inputs)} text inputs from a multiple choice list, by entering the number that corresponds to your choice."
)
for i, context in enumerate(inputs):
num_choices = len(targets[i])
print(f"Input {i+1} of {len(inputs)}")
print(context)
for j, target in enumerate(targets[i]):
print(f"Option {j+1}: {target}")
user_choice = input("The best option is:")
while user_choice not in [str(i) for i in range(1, 1 + num_choices)]:
print(
f"Your answer {user_choice} is not valid, please try again with a number between 1 and {num_choices}."
)
user_choice = input("The best option is:")
output = [-np.inf] * num_choices
output[int(user_choice) - 1] = 0
outputs.append(output)
samples = [
{"input": inputs[i], "targets": targets[i], "outputs": outputs[i]}
for i in range(len(inputs))
]
self.queries.append(
{"function": "human_conditional_prob_fn", "samples": samples}
)
if len(inputs) == 1:
outputs = outputs[0]
return outputs
def model_data(self):
return model.ModelData(model_family='Human', model_name='Human',
total_params=2, non_embedding_params=1,
flop_matched_non_embedding_params=1,
training_batch_size=1,
training_steps=1,
description='Human evaluation')
| 34.770642 | 195 | 0.571768 |
405861eeb752e999df788ac666d6ebbd3579f2e9 | 5,608 | py | Python | homeassistant/helpers/discovery.py | MoshonkaKita/Golovastik | df2ab62ce9b245b9b0f976af8c9868d9b416733b | [
"Apache-2.0"
] | 3 | 2019-01-31T13:41:37.000Z | 2020-05-20T14:22:18.000Z | homeassistant/helpers/discovery.py | MoshonkaKita/Golovastik | df2ab62ce9b245b9b0f976af8c9868d9b416733b | [
"Apache-2.0"
] | 5 | 2021-02-08T20:32:11.000Z | 2022-01-13T01:19:23.000Z | homeassistant/helpers/discovery.py | MoshonkaKita/Golovastik | df2ab62ce9b245b9b0f976af8c9868d9b416733b | [
"Apache-2.0"
] | 1 | 2021-05-31T08:13:56.000Z | 2021-05-31T08:13:56.000Z | """Helper methods to help with platform discovery.
There are two different types of discoveries that can be fired/listened for.
- listen/discover is for services. These are targeted at a component.
- listen_platform/discover_platform is for platforms. These are used by
components to allow discovery of their platforms.
"""
from homeassistant import setup, core
from homeassistant.loader import bind_hass
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, EVENT_PLATFORM_DISCOVERED)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import DEPENDENCY_BLACKLIST
from homeassistant.util.async_ import run_callback_threadsafe
EVENT_LOAD_PLATFORM = 'load_platform.{}'
ATTR_PLATFORM = 'platform'
@bind_hass
def listen(hass, service, callback):
"""Set up listener for discovery of specific service.
Service can be a string or a list/tuple.
"""
run_callback_threadsafe(
hass.loop, async_listen, hass, service, callback).result()
@core.callback
@bind_hass
def async_listen(hass, service, callback):
"""Set up listener for discovery of specific service.
Service can be a string or a list/tuple.
"""
if isinstance(service, str):
service = (service,)
else:
service = tuple(service)
@core.callback
def discovery_event_listener(event):
"""Listen for discovery events."""
if ATTR_SERVICE in event.data and event.data[ATTR_SERVICE] in service:
hass.async_add_job(callback, event.data[ATTR_SERVICE],
event.data.get(ATTR_DISCOVERED))
hass.bus.async_listen(EVENT_PLATFORM_DISCOVERED, discovery_event_listener)
@bind_hass
def discover(hass, service, discovered=None, component=None, hass_config=None):
"""Fire discovery event. Can ensure a component is loaded."""
hass.add_job(
async_discover(hass, service, discovered, component, hass_config))
@bind_hass
async def async_discover(hass, service, discovered=None, component=None,
hass_config=None):
"""Fire discovery event. Can ensure a component is loaded."""
if component in DEPENDENCY_BLACKLIST:
raise HomeAssistantError(
'Cannot discover the {} component.'.format(component))
if component is not None and component not in hass.config.components:
await setup.async_setup_component(
hass, component, hass_config)
data = {
ATTR_SERVICE: service
}
if discovered is not None:
data[ATTR_DISCOVERED] = discovered
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, data)
@bind_hass
def listen_platform(hass, component, callback):
"""Register a platform loader listener."""
run_callback_threadsafe(
hass.loop, async_listen_platform, hass, component, callback
).result()
@bind_hass
def async_listen_platform(hass, component, callback):
"""Register a platform loader listener.
This method must be run in the event loop.
"""
service = EVENT_LOAD_PLATFORM.format(component)
@core.callback
def discovery_platform_listener(event):
"""Listen for platform discovery events."""
if event.data.get(ATTR_SERVICE) != service:
return
platform = event.data.get(ATTR_PLATFORM)
if not platform:
return
hass.async_run_job(
callback, platform, event.data.get(ATTR_DISCOVERED)
)
hass.bus.async_listen(
EVENT_PLATFORM_DISCOVERED, discovery_platform_listener)
@bind_hass
def load_platform(hass, component, platform, discovered, hass_config):
"""Load a component and platform dynamically.
Target components will be loaded and an EVENT_PLATFORM_DISCOVERED will be
fired to load the platform. The event will contain:
{ ATTR_SERVICE = LOAD_PLATFORM + '.' + <<component>>
ATTR_PLATFORM = <<platform>>
ATTR_DISCOVERED = <<discovery info>> }
Use `listen_platform` to register a callback for these events.
"""
hass.add_job(
async_load_platform(hass, component, platform, discovered,
hass_config))
@bind_hass
async def async_load_platform(hass, component, platform, discovered,
hass_config):
"""Load a component and platform dynamically.
Target components will be loaded and an EVENT_PLATFORM_DISCOVERED will be
fired to load the platform. The event will contain:
{ ATTR_SERVICE = LOAD_PLATFORM + '.' + <<component>>
ATTR_PLATFORM = <<platform>>
ATTR_DISCOVERED = <<discovery info>> }
Use `listen_platform` to register a callback for these events.
Warning: Do not await this inside a setup method to avoid a dead lock.
Use `hass.async_create_task(async_load_platform(..))` instead.
This method is a coroutine.
"""
assert hass_config, 'You need to pass in the real hass config'
if component in DEPENDENCY_BLACKLIST:
raise HomeAssistantError(
'Cannot discover the {} component.'.format(component))
setup_success = True
if component not in hass.config.components:
setup_success = await setup.async_setup_component(
hass, component, hass_config)
# No need to fire event if we could not set up component
if not setup_success:
return
data = {
ATTR_SERVICE: EVENT_LOAD_PLATFORM.format(component),
ATTR_PLATFORM: platform,
}
if discovered is not None:
data[ATTR_DISCOVERED] = discovered
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, data)
| 31.863636 | 79 | 0.696327 |
82a6292ed2190ec0f075399ecb6fb0bd5af1c7a1 | 728 | py | Python | example/urls.py | callowayproject/django-stories | ea0398d69ea597819d0a6c75d4a3f65820321e13 | [
"Apache-2.0"
] | 10 | 2015-06-25T23:35:29.000Z | 2021-08-20T04:22:00.000Z | example/urls.py | callowayproject/django-stories | ea0398d69ea597819d0a6c75d4a3f65820321e13 | [
"Apache-2.0"
] | null | null | null | example/urls.py | callowayproject/django-stories | ea0398d69ea597819d0a6c75d4a3f65820321e13 | [
"Apache-2.0"
] | 2 | 2017-03-21T04:10:29.000Z | 2020-04-06T12:38:12.000Z | from django.conf.urls.defaults import patterns, include
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'^news/', include('stories.urls')),
(r'^people/', include('simpleprofile.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
| 34.666667 | 100 | 0.697802 |
653d77228864bce74662507330838416facd1ce3 | 1,898 | py | Python | docs/source/conf.py | joelegner/leglib | 5f7f4cc48112302bb48857d85435c42fb8c72169 | [
"MIT"
] | null | null | null | docs/source/conf.py | joelegner/leglib | 5f7f4cc48112302bb48857d85435c42fb8c72169 | [
"MIT"
] | null | null | null | docs/source/conf.py | joelegner/leglib | 5f7f4cc48112302bb48857d85435c42fb8c72169 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../leglib/'))
# -- Project information -----------------------------------------------------
project = 'leglib'
copyright = '2020, Joe Legner'
author = 'Joe Legner'
# The full version, including alpha/beta/rc tags
release = '0.0.5'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 34.509091 | 79 | 0.664384 |
4030cced5ed7949568eaac0501baffd721fcdf3f | 1,975 | py | Python | Python Programs/zero-arrow-pattern-printing.py | muhammad-masood-ur-rehman/Skillrack | 71a25417c89d0efab40ee6229ccd758b26ae4312 | [
"CC0-1.0"
] | 2 | 2021-06-26T21:50:59.000Z | 2021-09-18T04:55:51.000Z | Python Programs/zero-arrow-pattern-printing.py | muhammad-masood-ur-rehman/Skillrack | 71a25417c89d0efab40ee6229ccd758b26ae4312 | [
"CC0-1.0"
] | null | null | null | Python Programs/zero-arrow-pattern-printing.py | muhammad-masood-ur-rehman/Skillrack | 71a25417c89d0efab40ee6229ccd758b26ae4312 | [
"CC0-1.0"
] | null | null | null | Zero Arrow Pattern Printing
The program must accept an integer N as the input. The program must print the pattern of (2*N)-1 lines based on the following conditions.
- Each row of the pattern contains N characters.
- In the 1st row, the Nth character is 0 and the remaining characters are asterisks.
- In the 2nd row, the Nth, (N-1)th characters are 0 and the remaining characters are asterisks.
- In the 3rd row, the Nth, (N-2)th characters are 0 and the remaining characters are asterisks.
- Similarly, the first N lines of the pattern are printed.
- In the (N+1)th row, the Nth, 2nd characters are 0 and the remaining characters are asterisks.
- In the (N+2)th row, the Nth, 3rd characters are 0 and the remaining characters are asterisks.
- Similarly, the remaining lines of the pattern are printed.
Note: All characters in each row must be separated by a space.
Boundary Condition(s):
3 <= N <= 100
Input Format:
The first line contains N.
Output Format:
The first (2*N)-1 lines containing the desired pattern as per the given conditions.
Example Input/Output 1:
Input:
3
Output:
* * 0
* 0 0
0 * 0
* 0 0
* * 0
Explanation:
Here N = 3, so the pattern contains 5 lines ((2*3)-1).
In the 1st row, the 3rd character is 0 and the remaining characters are asterisks.
* * 0
In the 2nd row, the 3rd, 2nd characters are 0 and the remaining character is an asterisk.
* 0 0
In the 3rd row, the 3rd, 1st characters are 0 and the remaining character is an asterisk.
0 * 0
In the 4th row, the 3rd, 2nd characters are 0 and the remaining character is an asterisk.
* 0 0
In the 5th row, the 3rd character is 0 and the remaining characters are asterisks.
* * 0
Example Input/Output 2:
Input:
5
Output:
* * * * 0
* * * 0 0
* * 0 * 0
* 0 * * 0
0 * * * 0
* 0 * * 0
* * 0 * 0
* * * 0 0
* * * * 0
n=int(input())
for i in range((2*n)-1):
for j in range(n):
if((j+1)%n==0 or j==abs(n-1-i)%n):
print("0",end=" ")
else:
print("*",end=" ")
print()
| 32.916667 | 137 | 0.687595 |
8d8aba894c8a0fda7798c50e2e3519ecf14badd0 | 2,465 | py | Python | sdk/gcc_arm_embedded_4_9_mac/arm-none-eabi/lib/armv7-m/libstdc++.a-gdb.py | Bardo91/fruitymesh_grvc | 872d3cb5e92a7fa6d4823b7295d20f459058fb19 | [
"OLDAP-2.4"
] | null | null | null | sdk/gcc_arm_embedded_4_9_mac/arm-none-eabi/lib/armv7-m/libstdc++.a-gdb.py | Bardo91/fruitymesh_grvc | 872d3cb5e92a7fa6d4823b7295d20f459058fb19 | [
"OLDAP-2.4"
] | 2 | 2017-09-19T11:46:02.000Z | 2017-09-19T11:49:14.000Z | sdk/gcc_arm_embedded_4_9_mac/arm-none-eabi/lib/armv7-m/libstdc++.a-gdb.py | Bardo91/fruitymesh_grvc | 872d3cb5e92a7fa6d4823b7295d20f459058fb19 | [
"OLDAP-2.4"
] | null | null | null | # -*- python -*-
# Copyright (C) 2009-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Users/build/GCC-4-9-build/gcc-arm-none-eabi-4_9-2015q2-20150609/install-native/share/gcc-arm-none-eabi'
libdir = '/Users/build/GCC-4-9-build/gcc-arm-none-eabi-4_9-2015q2-20150609/install-native/arm-none-eabi/lib/armv7-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| 40.409836 | 117 | 0.723732 |
fdf79b5389882e2e0a8870933819f79a5bb0a977 | 1,164 | py | Python | tests/unit/test_lists.py | staticdev/human-readable | 1c3328560f9b8097e1bc3ec6fceefa486c264fd5 | [
"MIT"
] | 5 | 2021-03-10T21:22:31.000Z | 2022-03-23T04:38:07.000Z | tests/unit/test_lists.py | staticdev/human-readable | 1c3328560f9b8097e1bc3ec6fceefa486c264fd5 | [
"MIT"
] | 59 | 2021-02-13T10:08:23.000Z | 2022-03-14T19:43:55.000Z | tests/unit/test_lists.py | staticdev/human-readable | 1c3328560f9b8097e1bc3ec6fceefa486c264fd5 | [
"MIT"
] | null | null | null | """Tests for listing humanization."""
from __future__ import annotations
import pytest
import human_readable.lists as lists
@pytest.mark.parametrize(
"params, expected",
[
(([], ","), ""), # empty list
((["jorbas"], ","), "jorbas"), # one element
((["jorbas", "maria"], ","), "jorbas, maria"), # two elements
((["jorbas", "maria"], ""), "jorbas maria"), # empty separator
],
)
def test_listing(params: tuple[list[str], str], expected: str) -> None:
"""Listing with separator."""
assert lists.listing(*params) == expected
@pytest.mark.parametrize(
"params, expected",
[
(([], ";", "or"), ""), # empty list
((["jorbas"], ";", "or"), "jorbas"), # one element
((["jorbas", "maria"], ";", "or"), "jorbas or maria"), # two elements
(
(["jorbas", "maria", "gustavo"], ";", "or"),
"jorbas; maria or gustavo",
), # three elements
],
)
def test_listing_with_conjunction(
params: tuple[list[str], str, str], expected: str
) -> None:
"""Listing with separator and conjunction."""
assert lists.listing(*params) == expected
| 29.1 | 78 | 0.547251 |
416b0993bdf4655333c7f7f7dcf494c0ebd4c043 | 13,694 | py | Python | assignments/assignment4/starter_code/main.py | mebusy/cs234_RL_2019_stanford | 6ca051294f8af5257a051d2933fcc6a39177f24d | [
"MIT"
] | null | null | null | assignments/assignment4/starter_code/main.py | mebusy/cs234_RL_2019_stanford | 6ca051294f8af5257a051d2933fcc6a39177f24d | [
"MIT"
] | null | null | null | assignments/assignment4/starter_code/main.py | mebusy/cs234_RL_2019_stanford | 6ca051294f8af5257a051d2933fcc6a39177f24d | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
import numpy as np
import csv
import os
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from data import load_data, LABEL_KEY
import pdb
def dose_class(weekly_dose):
if weekly_dose < 21:
return 'low'
elif 21 <= weekly_dose and weekly_dose <= 49:
return 'medium'
else:
return 'high'
# Base classes
class BanditPolicy(ABC):
@abstractmethod
def choose(self, x): pass
@abstractmethod
def update(self, x, a, r): pass
class StaticPolicy(BanditPolicy):
def update(self, x, a, r): pass
class RandomPolicy(StaticPolicy):
def __init__(self, probs=None):
self.probs = probs if probs is not None else [1./3., 1./3., 1./3.]
def choose(self, x):
return np.random.choice(('low', 'medium', 'high'), p=self.probs)
# Baselines
class FixedDosePolicy(StaticPolicy):
def choose(self, x):
"""
Args:
x: Dictionary containing the possible patient features.
Returns:
output: string containing one of ('low', 'medium', 'high')
TODO:
Please implement the fixed dose algorithm.
"""
#######################################################
######### YOUR CODE HERE - ~1 lines. #############
return ""
#######################################################
#########
class ClinicalDosingPolicy(StaticPolicy):
def choose(self, x):
"""
Args:
x: Dictionary containing the possible patient features.
Returns:
output: string containing one of ('low', 'medium', 'high')
TODO:
Please implement the Clinical Dosing algorithm.
Hint:
- You may need to do a little data processing here.
- Look at the "main" function to see the key values of the features you can use. The
age in decades is implemented for you as an example.
- You can treat Unknown race as missing or mixed race.
- Use dose_class() implemented for you.
"""
age_in_decades = x['Age in decades']
#######################################################
######### YOUR CODE HERE - ~2-10 lines. #############
return ""
#######################################################
#########
# Upper Confidence Bound Linear Bandit
class LinUCB(BanditPolicy):
def __init__(self, n_arms, features, alpha=1.):
"""
See Algorithm 1 from paper:
"A Contextual-Bandit Approach to Personalized News Article Recommendation"
Args:
n_arms: int, the number of different arms/ actions the algorithm can take
features: list of strings, contains the patient features to use
alpha: float, hyperparameter for step size.
TODO:
Please initialize the following internal variables for the Disjoint Linear Upper Confidence Bound Bandit algorithm.
Please refer to the paper to understadard what they are.
Please feel free to add additional internal variables if you need them, but they are not necessary.
Hints:
Keep track of a seperate A, b for each action (this is what the Disjoint in the algorithm name means)
"""
#######################################################
######### YOUR CODE HERE - ~5 lines. #############
self.n_arms = None
self.features = None
self.alpha = None
self.A = None
self.b = None
#######################################################
######### END YOUR CODE. ############
def choose(self, x):
"""
See Algorithm 1 from paper:
"A Contextual-Bandit Approach to Personalized News Article Recommendation"
Args:
x: Dictionary containing the possible patient features.
Returns:
output: string containing one of ('low', 'medium', 'high')
TODO:
Please implement the "forward pass" for Disjoint Linear Upper Confidence Bound Bandit algorithm.
"""
#######################################################
######### YOUR CODE HERE - ~7 lines. #############
return ""
#######################################################
#########
def update(self, x, a, r):
"""
See Algorithm 1 from paper:
"A Contextual-Bandit Approach to Personalized News Article Recommendation"
Args:
x: Dictionary containing the possible patient features.
a: string, indicating the action your algorithem chose ('low', 'medium', 'high')
r: the reward you recieved for that action
Returns:
Nothing
TODO:
Please implement the update step for Disjoint Linear Upper Confidence Bound Bandit algorithm.
Hint: Which parameters should you update?
"""
#######################################################
######### YOUR CODE HERE - ~4 lines. #############
#######################################################
######### END YOUR CODE. ############
# eGreedy Linear bandit
class eGreedyLinB(LinUCB):
def __init__(self, n_arms, features, alpha=1.):
super(eGreedyLinB, self).__init__(n_arms, features, alpha=1.)
self.time = 0
def choose(self, x):
"""
Args:
x: Dictionary containing the possible patient features.
Returns:
output: string containing one of ('low', 'medium', 'high')
TODO:
Instead of using the Upper Confidence Bound to find which action to take,
compute the probability of each action using a simple dot product between Theta & the input features.
Then use an epsilion greedy algorithm to choose the action.
Use the value of epsilon provided
"""
self.time += 1
epsilon = float(1./self.time)* self.alpha
#######################################################
######### YOUR CODE HERE - ~7 lines. #############
return ""
#######################################################
#########
# Thompson Sampling
class ThomSampB(BanditPolicy):
def __init__(self, n_arms, features, alpha=1.):
"""
See Algorithm 1 and section 2.2 from paper:
"Thompson Sampling for Contextual Bandits with Linear Payoffs"
Args:
n_arms: int, the number of different arms/ actions the algorithm can take
features: list of strings, contains the patient features to use
alpha: float, hyperparameter for step size.
TODO:
Please initialize the following internal variables for the Disjoint Thompson Sampling Bandit algorithm.
Please refer to the paper to understadard what they are.
Please feel free to add additional internal variables if you need them, but they are not necessary.
Hints:
- Keep track of a seperate B, mu, f for each action (this is what the Disjoint in the algorithm name means)
- Unlike in section 2.2 in the paper where they sample a single mu_tilde, we'll sample a mu_tilde for each arm
based on our saved B, f, and mu values for each arm. Also, when we update, we only update the B, f, and mu
values for the arm that we selected
- What the paper refers to as b in our case is the medical features vector
- The paper uses a summation (from time =0, .., t-1) to compute the model paramters at time step (t),
however if you can't access prior data how might one store the result from the prior time steps.
"""
#######################################################
######### YOUR CODE HERE - ~6 lines. #############
self.n_arms = None
self.features = None
#Simply use aplha for the v mentioned in the paper
self.v2 = alpha
self.B = []
#Variable used to keep track of data needed to compute mu
self.f = []
#You can actually compute mu from B and f at each time step. So you don't have to use this.
self.mu = []
#######################################################
######### END YOUR CODE. ############
def choose(self, x):
"""
See Algorithm 1 and section 2.2 from paper:
"Thompson Sampling for Contextual Bandits with Linear Payoffs"
Args:
x: Dictionary containing the possible patient features.
Returns:
output: string containing one of ('low', 'medium', 'high')
TODO:
Please implement the "forward pass" for Disjoint Thompson Sampling Bandit algorithm.
Please use the gaussian distribution like they do in the paper
"""
#######################################################
######### YOUR CODE HERE - ~8 lines. #############
return ""
#######################################################
######### END YOUR CODE. ############
def update(self, x, a, r):
"""
See Algorithm 1 and section 2.2 from paper:
"Thompson Sampling for Contextual Bandits with Linear Payoffs"
Args:
x: Dictionary containing the possible patient features.
a: string, indicating the action your algorithem chose ('low', 'medium', 'high')
r: the reward you recieved for that action
Returns:
Nothing
TODO:
Please implement the update step for Disjoint Thompson Sampling Bandit algorithm.
Please use the gaussian distribution like they do in the paper
Hint: Which parameters should you update?
"""
#######################################################
######### YOUR CODE HERE - ~6 lines. #############
#######################################################
######### END YOUR CODE. ############
def run(data, learner, large_error_penalty=False):
# Shuffle
data = data.sample(frac=1)
T = len(data)
n_egregious = 0
correct = np.zeros(T, dtype=bool)
for t in range(T):
x = dict(data.iloc[t])
label = x.pop(LABEL_KEY)
action = learner.choose(x)
correct[t] = (action == dose_class(label))
reward = int(correct[t]) - 1
if (action == 'low' and dose_class(label) == 'high') or (action == 'high' and dose_class(label) == 'low'):
n_egregious += 1
reward = large_error_penalty
learner.update(x, action, reward)
return {
'total_fraction_correct': np.mean(correct),
'average_fraction_incorrect': np.mean([
np.mean(~correct[:t]) for t in range(1,T) ]),
'fraction_incorrect_per_time': [
np.mean(~correct[:t]) for t in range(1,T)],
'fraction_egregious': float(n_egregious) / T
}
def main(args):
data = load_data()
frac_incorrect = []
features = [
'Age in decades',
'Height (cm)', 'Weight (kg)',
'Male', 'Female',
'Asian', 'Black', 'White', 'Unknown race',
'Carbamazepine (Tegretol)',
'Phenytoin (Dilantin)',
'Rifampin or Rifampicin',
'Amiodarone (Cordarone)'
]
extra_features = [
'VKORC1AG', 'VKORC1AA', 'VKORC1UN',
'CYP2C912', 'CYP2C913', 'CYP2C922',
'CYP2C923', 'CYP2C933', 'CYP2C9UN'
]
features = features + extra_features
if args.run_fixed:
avg = []
for i in range(args.runs):
print('Running fixed')
results = run(data, FixedDosePolicy())
avg.append(results["fraction_incorrect_per_time"])
print([(x,results[x]) for x in results if x != "fraction_incorrect_per_time"])
frac_incorrect.append(("Fixed", np.mean(np.asarray(avg),0)))
if args.run_clinical:
avg = []
for i in range(args.runs):
print('Runnining clinical')
results = run(data, ClinicalDosingPolicy())
avg.append(results["fraction_incorrect_per_time"])
print([(x,results[x]) for x in results if x != "fraction_incorrect_per_time"])
frac_incorrect.append(("Clinical", np.mean(np.asarray(avg),0)))
if args.run_linucb:
avg = []
for i in range(args.runs):
print('Running LinUCB bandit')
results = run(data, LinUCB(3, features, alpha=args.alpha), large_error_penalty=args.large_error_penalty)
avg.append(results["fraction_incorrect_per_time"])
print([(x,results[x]) for x in results if x != "fraction_incorrect_per_time"])
frac_incorrect.append(("LinUCB", np.mean(np.asarray(avg),0)))
if args.run_egreedy:
avg = []
for i in range(args.runs):
print('Running eGreedy bandit')
results = run(data, eGreedyLinB(3, features, alpha=args.ep), large_error_penalty=args.large_error_penalty)
avg.append(results["fraction_incorrect_per_time"])
print([(x,results[x]) for x in results if x != "fraction_incorrect_per_time"])
frac_incorrect.append(("eGreedy", np.mean(np.asarray(avg),0)))
if args.run_thompson:
avg = []
for i in range(args.runs):
print('Running Thompson Sampling bandit')
results = run(data, ThomSampB(3, features, alpha=args.v2), large_error_penalty=args.large_error_penalty)
avg.append(results["fraction_incorrect_per_time"])
print([(x,results[x]) for x in results if x != "fraction_incorrect_per_time"])
frac_incorrect.append(("Thompson", np.mean(np.asarray(avg),0)))
os.makedirs('results', exist_ok=True)
if frac_incorrect != []:
for algorithm, results in frac_incorrect:
with open(f'results/{algorithm}.csv', 'w') as f:
csv.writer(f).writerows(results.reshape(-1, 1).tolist())
frac_incorrect = []
for filename in os.listdir('results'):
if filename.endswith('.csv'):
algorithm = filename.split('.')[0]
with open(os.path.join('results', filename), 'r') as f:
frac_incorrect.append((algorithm, np.array(list(csv.reader(f))).astype('float64').squeeze()))
plt.xlabel("examples seen")
plt.ylabel("fraction_incorrect")
legend = []
for name, values in frac_incorrect:
legend.append(name)
plt.plot(values[10:])
plt.ylim(0.0, 1.0)
plt.legend(legend)
plt.savefig(os.path.join('results', 'fraction_incorrect.png'))
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--run-fixed', action='store_true')
parser.add_argument('--run-clinical', action='store_true')
parser.add_argument('--run-linucb', action='store_true')
parser.add_argument('--run-egreedy', action='store_true')
parser.add_argument('--run-thompson', action='store_true')
parser.add_argument('--alpha', type=float, default=1.)
parser.add_argument('--ep', type=float, default=1)
parser.add_argument('--v2', type=float, default=0.001)
parser.add_argument('--runs', type=int, default=5)
parser.add_argument('--large-error-penalty', type=float, default=-1)
args = parser.parse_args()
main(args) | 33.318735 | 118 | 0.625091 |
2c8b3ebe7a08746e737d92e83663aae432be124f | 6,936 | py | Python | improver/cube_combiner.py | LaurenceBeard/improver | b7cfe44f3a802d2a3d65f76a325215033c9de074 | [
"BSD-3-Clause"
] | null | null | null | improver/cube_combiner.py | LaurenceBeard/improver | b7cfe44f3a802d2a3d65f76a325215033c9de074 | [
"BSD-3-Clause"
] | 2 | 2020-03-30T17:25:18.000Z | 2021-06-25T15:30:29.000Z | improver/cube_combiner.py | LaurenceBeard/improver | b7cfe44f3a802d2a3d65f76a325215033c9de074 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module containing plugin for CubeCombiner."""
import numpy as np
from improver import BasePlugin
from improver.utilities.cube_manipulation import expand_bounds
class CubeCombiner(BasePlugin):
"""Plugin for combining cubes.
"""
COMBINE_OPERATORS = {
"+": np.add,
"add": np.add,
"-": np.subtract,
"subtract": np.subtract,
"*": np.multiply,
"multiply": np.multiply,
"max": np.maximum,
"min": np.minimum,
"mean": np.add} # mean is calculated in two steps: sum and normalise
def __init__(self, operation, warnings_on=False):
"""
Create a CubeCombiner plugin
Args:
operation (str):
Operation (+, - etc) to apply to the incoming cubes.
warnings_on (bool):
If True output warnings for mismatching metadata.
Raises:
ValueError: Unknown operation.
"""
try:
self.operator = self.COMBINE_OPERATORS[operation]
except KeyError:
msg = 'Unknown operation {}'.format(operation)
raise ValueError(msg)
self.operation = operation
self.warnings_on = warnings_on
def __repr__(self):
"""Represent the configured plugin instance as a string."""
desc = ('<CubeCombiner: operation=' +
'{}, warnings_on = {}>'.format(self.operation,
self.warnings_on))
return desc
@staticmethod
def _check_dimensions_match(cube_list):
"""
Check all coordinate dimensions on the input cubes are equal
Args:
cube_list (iris.cube.CubeList or list):
List of cubes to compare
Raises:
ValueError: If dimension coordinates do not match
"""
ref_coords = cube_list[0].coords(dim_coords=True)
for cube in cube_list[1:]:
coords = cube.coords(dim_coords=True)
compare = [a == b for a, b in zip(coords, ref_coords)]
if not np.all(compare):
msg = ("Cannot combine cubes with different dimensions:\n"
"{} and {}".format(repr(cube_list[0]), repr(cube)))
raise ValueError(msg)
@staticmethod
def _get_expanded_coord_names(cube_list):
"""
Get names of coordinates whose bounds need expanding and points
recalculating after combining cubes. These are the scalar coordinates
that are present on all input cubes, but have different values.
Args:
cube_list (iris.cube.CubeList or list):
List of cubes to that will be combined
Returns:
list of str:
List of coordinate names to expand
"""
shared_scalar_coords = {
coord.name() for coord in cube_list[0].coords(dim_coords=False)}
for cube in cube_list[1:]:
cube_scalar_coords = {
coord.name() for coord in cube.coords(dim_coords=False)}
shared_scalar_coords = shared_scalar_coords & cube_scalar_coords
expanded_coords = []
for cube in cube_list[1:]:
for coord in shared_scalar_coords:
if (cube.coord(coord) != cube_list[0].coord(coord) and
coord not in expanded_coords):
expanded_coords.append(coord)
return expanded_coords
def process(self, cube_list, new_diagnostic_name, use_midpoint=False):
"""
Combine data and metadata from a list of input cubes into a single
cube, using the specified operation to combine the cube data.
Args:
cube_list (iris.cube.CubeList or list):
List of cubes to combine.
new_diagnostic_name (str):
New name for the combined diagnostic.
use_midpoint (bool):
Determines the nature of the points and bounds for expanded
coordinates. If False, the upper bound of the coordinate is
used as the point values. If True, the midpoint is used.
Returns:
iris.cube.Cube:
Cube containing the combined data.
Raises:
ValueError: If the cubelist contains only one cube.
"""
if len(cube_list) < 2:
msg = 'Expecting 2 or more cubes in cube_list'
raise ValueError(msg)
self._check_dimensions_match(cube_list)
# perform operation (add, subtract, min, max, multiply) cumulatively
result = cube_list[0].copy()
for cube in cube_list[1:]:
result.data = self.operator(result.data, cube.data)
# normalise mean (for which self.operator is np.add)
if self.operation == 'mean':
result.data = result.data / len(cube_list)
# update any coordinates that have been expanded, and rename output
expanded_coord_names = self._get_expanded_coord_names(cube_list)
if expanded_coord_names:
result = expand_bounds(result, cube_list, expanded_coord_names,
use_midpoint=use_midpoint)
result.rename(new_diagnostic_name)
return result
| 38.320442 | 79 | 0.625433 |
2efa448db51d675785b4d69ee659a2f2618cef73 | 360 | py | Python | lib/utils/utils.py | bugcrowd/methodology-taxonomy | 428503ae17f83de56e17762c9dd8daeb6f14dd6a | [
"Apache-2.0"
] | 4 | 2021-09-28T18:17:12.000Z | 2022-02-14T04:47:12.000Z | lib/utils/utils.py | bugcrowd/methodology-taxonomy | 428503ae17f83de56e17762c9dd8daeb6f14dd6a | [
"Apache-2.0"
] | 4 | 2021-09-21T10:05:30.000Z | 2022-01-28T04:21:45.000Z | lib/utils/utils.py | bugcrowd/methodology-taxonomy | 428503ae17f83de56e17762c9dd8daeb6f14dd6a | [
"Apache-2.0"
] | 2 | 2021-11-15T21:13:04.000Z | 2022-02-27T04:52:00.000Z | import json
import git
SCHEMA_FILENAME = 'schema.json'
METHODOLOGIES_DIR = 'methodologies'
MAPPING_DIR = 'mappings'
TEMPLATE_FILENAME = 'templates.json'
TEMPLATE_SCHEMA = 'templates.schema.json'
TEMPLATE_BASE_URL = 'https://github.com/bugcrowd/templates/tree/master/'
def get_json(filename):
with open(filename) as f:
return json.loads(f.read()) | 25.714286 | 72 | 0.758333 |
f0de2a13040231a89f303923dd22650b976fab94 | 3,324 | py | Python | contrib/zmq/zmq_sub3.4.py | pavhash5/bitcoinroyale | 74711b2767e1a64cd4af172d40fada969e03505c | [
"MIT"
] | 5 | 2019-09-19T22:24:28.000Z | 2020-08-26T00:07:59.000Z | contrib/zmq/zmq_sub3.4.py | pavhash5/bitcoinroyale | 74711b2767e1a64cd4af172d40fada969e03505c | [
"MIT"
] | 6 | 2019-10-01T00:00:54.000Z | 2021-07-26T12:57:40.000Z | contrib/zmq/zmq_sub3.4.py | pavhash5/bitcoinroyale | 74711b2767e1a64cd4af172d40fada969e03505c | [
"MIT"
] | 3 | 2019-09-30T15:03:26.000Z | 2019-12-09T18:47:52.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Bitcoin should be started with the command line arguments:
broyaled -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if (sys.version_info.major, sys.version_info.minor) < (3, 4):
print("This example only works with Python 3.4 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = asyncio.get_event_loop()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| 36.527473 | 107 | 0.649819 |
9cee43be4808b943423dfccb358a70ef341cfea6 | 11,243 | py | Python | vhdl_interface.py | Remillard/VHDL-Mode | 35d2b6ad022b30bdd23e4e89779f9b8b0486fee2 | [
"MIT"
] | 40 | 2017-07-11T20:11:35.000Z | 2021-12-01T06:28:29.000Z | vhdl_interface.py | Remillard/VHDL-Mode | 35d2b6ad022b30bdd23e4e89779f9b8b0486fee2 | [
"MIT"
] | 120 | 2017-07-13T18:22:44.000Z | 2021-11-04T19:39:05.000Z | vhdl_interface.py | Remillard/VHDL-Mode | 35d2b6ad022b30bdd23e4e89779f9b8b0486fee2 | [
"MIT"
] | 12 | 2018-01-03T20:01:25.000Z | 2021-09-05T16:03:49.000Z | """
Port Copying Module -- Contains the editor commands related to
copying and pasting an interface declaration into various forms.
"""
import time
import re
import sublime
import sublime_plugin
from . import vhdl_lang as vhdl
from . import vhdl_util as util
_interface = vhdl.Interface()
#----------------------------------------------------------------
class vhdlModeCopyPortsCommand(sublime_plugin.TextCommand):
"""
The copy ports command requires the user to have placed the
point somewhere in the interface to be extracted. The
routine then scans upwards to find a known interface beginning
and then down to find the end point. If a good interface
can be determined, then it uses the VHDL language classes to
parse the text from the editor and store the structural
elements for later pasting in other forms.
"""
def find_start(self, point, interface):
# Abstracting the loop for finding the beginning
# of the declaration.
# Moving point to beginning of line which avoids
# checking a line twice due to line lengths.
next_point = util.move_to_bol(self, point)
while True:
check = interface.interface_start(util.line_at_point(self, next_point))
if check is None:
if util.is_top_line(self, next_point):
print('vhdl-mode: Interface not found.')
return None
else:
next_point = util.move_up(self, next_point)
else:
print('vhdl-mode: Interface beginning found.')
return self.view.text_point(self.view.rowcol(next_point)[0], check)
def find_end(self, point, interface):
# Stepping forward to find the end of the interface.
next_point = util.move_to_bol(self, point)
while True:
check = interface.interface_end(util.line_at_point(self, next_point))
if check is None:
if util.is_end_line(self, next_point):
print('vhdl-mode: End of interface not found.')
return None
else:
next_point = util.move_down(self, next_point)
else:
print('vhdl-mode: Interface end found.')
return self.view.text_point(self.view.rowcol(next_point)[0], check)
def is_visible(self):
return self.view.match_selector(0, "source.vhdl")
def run(self, edit):
global _interface
# Save the starting point location. In the case of a
# multi-selection, save point A of the first region.
# This command does not have any meaning for a multi-
# selection.
region = self.view.sel()[0]
original_point = region.begin()
# Search for the starting entity string.
startpoint = self.find_start(original_point, _interface)
if startpoint is None:
util.set_cursor(self, original_point)
return
# Search for the endpoint based on the start point.
endpoint = self.find_end(startpoint, _interface)
if endpoint is None:
util.set_cursor(self, original_point)
return
# At this point, we should have a start and end point. Extract
# the string that contains the interface by creating a region
# with the points. At this point, all the processing should be
# in the interface class.
block = sublime.Region(startpoint, endpoint)
_interface.if_string = self.view.substr(block)
_interface.parse_block()
# At the very end, move the point back to where we
# started
util.set_cursor(self, original_point)
#----------------------------------------------------------------
class vhdlModePasteAsSignalCommand(sublime_plugin.TextCommand):
"""
Once we've copied an interface, we can paste the data back as
signals (ports only, not generics.)
"""
def description(self):
return "Paste {} as Signals".format(_interface.name)
def is_visible(self):
return self.view.match_selector(0, "source.vhdl") and bool(_interface.name)
def run(self, edit):
global _interface
# Get the current point location.
region = self.view.sel()[0]
original_point = region.begin()
# Move to the beginning of the line the point is on.
next_point = util.move_to_bol(self, original_point)
lines = []
# Construct structure and insert
block_str = _interface.signals()
if block_str is not None:
num_chars = self.view.insert(edit, next_point, block_str)
print('vhdl-mode: Inserted interface as signal(s).')
util.set_cursor(self, next_point+num_chars)
else:
print('vhdl-mode: No valid ports in interface for signal(s).')
# Set the point to original location
util.set_cursor(self, original_point)
#----------------------------------------------------------------
class vhdlModePasteAsComponentCommand(sublime_plugin.TextCommand):
"""
Pasting the current written interface as a component
"""
def description(self):
return "Paste {} as Component".format(_interface.name)
def is_visible(self):
return self.view.match_selector(0, "source.vhdl") and bool(_interface.name)
def run(self, edit):
# Get the current point location.
region = self.view.sel()[0]
original_point = region.begin()
# Move to the beginning of the line the point is on.
next_point = util.move_to_bol(self, original_point)
block_str = _interface.component()
num_chars = self.view.insert(edit, next_point, block_str)
print('vhdl-mode: Inserted interface as component.')
# Set point to the end of insertion.
util.set_cursor(self, next_point+num_chars)
#----------------------------------------------------------------
class vhdlModePasteAsEntityCommand(sublime_plugin.TextCommand):
"""
Pasting the currently copied interface as an entity.
"""
def description(self):
return "Paste {} as Entity".format(_interface.name)
def is_visible(self):
return self.view.match_selector(0, "source.vhdl") and bool(_interface.name)
def run(self, edit):
# Get the current point location.
region = self.view.sel()[0]
original_point = region.begin()
# Move to the beginning of the line the point is on.
next_point = util.move_to_bol(self, original_point)
block_str = _interface.entity()
num_chars = self.view.insert(edit, next_point, block_str)
print('vhdl-mode: Inserted interface as entity.')
# Set the point to end of insertion
util.set_cursor(self, next_point+num_chars)
#----------------------------------------------------------------
class vhdlModePasteAsInstanceCommand(sublime_plugin.TextCommand):
"""
Pastes the currently copied interface into the source as
an instantiation. Currently does not keep track of other
instances of the same interface in the source.
"""
def description(self):
return "Paste {} as Instance".format(_interface.name)
def is_visible(self):
return self.view.match_selector(0, "source.vhdl") and bool(_interface.name)
def run(self, edit):
# Get the current point location.
region = self.view.sel()[0]
original_point = region.begin()
# Move to the beginning of the line the point is on.
next_point = util.move_to_bol(self, original_point)
# Construct structure. Get the file structure.
instances = util.scan_instantiations(self)
block_str = _interface.instance(instances=instances)
num_chars = self.view.insert(edit, next_point, block_str)
print('vhdl-mode: Inserted interface as instance.')
#----------------------------------------------------------------
class vhdlModePasteAsTestbenchCommand(sublime_plugin.WindowCommand):
"""
After copying a port, this will open a new window and
inject the skeleton of a testbench. Note, this isn't a
TextCommand, but rather a WindowCommand so the run method
has slightly different parameters.
"""
def description(self):
return "Paste {} as Testbench".format(_interface.name)
def is_visible(self):
# I can't do the usual source file check because this is a
# WindowCommand and not a TextCommand which has an associated view.
# At the moment, simply checking to see if there is a valid interface
# that's been copied.
return self.window.active_view().match_selector(0, 'source.vhdl') and bool(_interface.name)
def run(self):
"""Sublime TextCommand run method"""
# Assigning this to a string to keep command shorter later.
template = "Packages/VHDL Mode/Snippets/vhdl-testbench.sublime-snippet"
tb_view = self.window.new_file()
tb_view.assign_syntax('Packages/VHDL Mode/Syntax/VHDL.sublime-syntax')
tb_view.set_name('{}_tb.vhd'.format(_interface.name))
entity_name = '{}_tb'.format(_interface.name)
signals_str = _interface.signals()
constants_str = _interface.constants()
instance_str = _interface.instance(name="DUT")
# Inserting template/snippet
tb_view.run_command("insert_snippet",
{
"name" : template,
"ENAME" : entity_name,
"CONSTANTS": constants_str,
"SIGNALS" : signals_str,
"INSTANCE" : instance_str
})
tb_view.run_command("vhdl_mode_insert_header")
print('vhdl-mode: Created testbench from interface.')
#----------------------------------------------------------------
class vhdlModeFlattenPortsCommand(sublime_plugin.TextCommand):
"""
This command scans over the internal data structure
for the interface and wherever there is a port or generic
that has multiple items on the same line, it'll separate them
onto their own lines.
"""
def is_visible(self):
return self.view.match_selector(0, "source.vhdl") and bool(_interface.name)
def run(self, edit):
global _interface
_interface.flatten()
print('vhdl-mode: Flattening ports for next paste.')
#----------------------------------------------------------------
class vhdlModeReversePortsCommand(sublime_plugin.TextCommand):
"""
This command scans over the internal data structure
for the interface and flips in and out/buffer modes on
the ports.
"""
def is_visible(self):
return self.view.match_selector(0, "source.vhdl") and bool(_interface.name)
def run(self, edit):
global _interface
_interface.reverse()
print('vhdl-mode: Reversing ports for next paste.')
| 39.868794 | 100 | 0.606511 |
a73998f0e28e2aa66b20fb086da70dcd06a029b0 | 10,679 | py | Python | test/unit/controllers/request_api_test.py | hazmat345/brew-view | effd67819f7e995595471e0dc1c4e03a63942b96 | [
"MIT"
] | null | null | null | test/unit/controllers/request_api_test.py | hazmat345/brew-view | effd67819f7e995595471e0dc1c4e03a63942b96 | [
"MIT"
] | null | null | null | test/unit/controllers/request_api_test.py | hazmat345/brew-view | effd67819f7e995595471e0dc1c4e03a63942b96 | [
"MIT"
] | null | null | null | import copy
import datetime
import json
from mock import Mock
from bg_utils.models import Request, Job, RequestTemplate, DateTrigger
from . import TestHandlerBase
class RequestAPITest(TestHandlerBase):
def setUp(self):
self.request_mock = Mock()
self.ts_epoch = 1451606400000
self.ts_dt = datetime.datetime(2016, 1, 1)
self.request_dict = {
'children': [],
'parent': None,
'system': 'system_name',
'system_version': '0.0.1',
'instance_name': 'default',
'command': 'say',
'id': '58542eb571afd47ead90d25f',
'parameters': {},
'comment': 'bye!',
'output': 'nested output',
'output_type': 'STRING',
'status': 'IN_PROGRESS',
'command_type': 'ACTION',
'created_at': self.ts_epoch,
'updated_at': self.ts_epoch,
'error_class': None,
'metadata': {},
'has_parent': True,
'requester': None
}
self.job_dict = {
'name': 'job_name',
'trigger_type': 'date',
'trigger': {
'run_date': self.ts_epoch,
'timezone': 'utc',
},
'request_template': {
'system': 'system',
'system_version': '1.0.0',
'instance_name': 'default',
'command': 'speak',
'parameters': {'message': 'hey!'},
'comment': 'hi!',
'metadata': {'request': 'stuff'},
},
'misfire_grace_time': 3,
'coalesce': True,
'next_run_time': self.ts_epoch,
'success_count': 0,
'error_count': 0,
}
db_dict = copy.deepcopy(self.job_dict)
db_dict['request_template'] = RequestTemplate(**db_dict['request_template'])
db_dict['trigger']['run_date'] = self.ts_dt
db_dict['trigger'] = DateTrigger(**db_dict['trigger'])
db_dict['next_run_time'] = self.ts_dt
self.job = Job(**db_dict)
db_dict = copy.deepcopy(self.request_dict)
db_dict['created_at'] = self.ts_dt
db_dict['updated_at'] = self.ts_dt
self.request = Request(**db_dict)
super(RequestAPITest, self).setUp()
def tearDown(self):
Request.objects.delete()
Job.objects.delete()
def test_get(self):
self.request.save()
response = self.fetch('/api/v1/requests/' + str(self.request.id))
self.assertEqual(200, response.code)
data = json.loads(response.body.decode('utf-8'))
data.pop('updated_at')
self.request_dict.pop('updated_at')
self.assertEqual(self.request_dict, data)
def test_patch_replace_duplicate(self):
self.request.status = 'SUCCESS'
self.request.output = 'output'
self.request.save()
body = json.dumps({
"operations": [
{
"operation": "replace",
"path": "/output",
"value": "output"
},
{
"operation": "replace",
"path": "/status",
"value": "SUCCESS"
},
]
})
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'}
)
self.assertEqual(200, response.code)
self.request.reload()
self.assertEqual("SUCCESS", self.request.status)
self.assertEqual("output", self.request.output)
def test_patch_replace_status(self):
self.request.save()
body = json.dumps({"operations": [{"operation": "replace", "path": "/status",
"value": "SUCCESS"}]})
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'}
)
self.assertEqual(200, response.code)
self.request.reload()
self.assertEqual("SUCCESS", self.request.status)
def test_patch_replace_output(self):
self.request.output = 'old_output_but_not_done_with_progress'
self.request.save()
body = json.dumps({"operations": [{"operation": "replace", "path": "/output",
"value": "output"}]})
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'}
)
self.assertEqual(200, response.code)
self.request.reload()
self.assertEqual("output", self.request.output)
def test_patch_replace_error_class(self):
self.request.error_class = 'Klazz1'
body = json.dumps({"operations": [{"operation": "replace", "path": "/error_class",
"value": "error"}]})
self.request.save()
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'}
)
self.request.reload()
self.assertEqual(200, response.code)
self.assertEqual("error", self.request.error_class)
def test_patch_replace_bad_status(self):
self.request.save()
body = json.dumps({"operations": [{"operation": "replace", "path": "/status",
"value": "bad"}]})
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'}
)
self.assertGreaterEqual(response.code, 400)
def test_patch_update_output_for_complete_request(self):
self.request.status = 'SUCCESS'
self.request.output = 'old_value'
self.request.save()
body = json.dumps({"operations": [{"operation": "replace", "path": "/output",
"value": "shouldnt work"}]})
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'}
)
self.request.reload()
self.assertGreaterEqual(response.code, 400)
self.assertEqual(self.request.output, 'old_value')
def test_patch_no_system(self):
good_id_does_not_exist = ''.join('1' for _ in range(24))
response = self.fetch(
'/api/v1/requests/' + good_id_does_not_exist,
method='PATCH',
body='{"operations": [{"operation": "fake"}]}',
headers={'content-type': 'application/json'}
)
self.assertEqual(response.code, 404)
def test_patch_replace_bad_path(self):
self.request.save()
body = json.dumps({"operations": [{"operation": "replace", "path": "/bad",
"value": "error"}]})
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'}
)
self.assertGreaterEqual(response.code, 400)
def test_patch_bad_operation(self):
self.request.save()
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body='{"operations": [{"operation": "fake"}]}',
headers={'content-type': 'application/json'}
)
self.assertGreaterEqual(response.code, 400)
def test_prometheus_endpoint(self):
handler = self.app.find_handler(request=Mock(path='/api/v1/requests'))
c = handler.handler_class(
self.app,
Mock(path='/api/v1/requests/111111111111111111111111')
)
assert c.prometheus_endpoint == '/api/v1/requests/<ID>'
def test_update_job_numbers(self):
self.job.save()
self.request.metadata['_bg_job_id'] = str(self.job.id)
self.request.save()
body = json.dumps(
{
"operations": [
{
"operation": "replace",
"path": "/status",
"value": "SUCCESS"
}
]
}
)
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'},
)
self.assertEqual(response.code, 200)
self.job.reload()
self.assertEqual(self.job.success_count, 1)
self.assertEqual(self.job.error_count, 0)
def test_update_job_numbers_error(self):
self.job.save()
self.request.metadata['_bg_job_id'] = str(self.job.id)
self.request.save()
body = json.dumps(
{
"operations": [
{
"operation": "replace",
"path": "/status",
"value": "ERROR"
}
]
}
)
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'},
)
self.assertEqual(response.code, 200)
self.job.reload()
self.assertEqual(self.job.success_count, 0)
self.assertEqual(self.job.error_count, 1)
def test_update_job_invalid_id(self):
self.request.metadata['_bg_job_id'] = ''.join(['1' for _ in range(24)])
self.request.save()
body = json.dumps(
{
"operations": [
{
"operation": "replace",
"path": "/status",
"value": "ERROR"
}
]
}
)
response = self.fetch(
'/api/v1/requests/' + str(self.request.id),
method='PATCH',
body=body,
headers={'content-type': 'application/json'},
)
self.assertEqual(response.code, 200)
| 34.785016 | 90 | 0.50192 |
a87addeb204daaad47dac5917d1a24ac6238fde1 | 5,765 | py | Python | fresh_tomatoes.py | gurugithub/movie-trailer-website | e997b031dd0691b9af9e7c3bff9f982f23419573 | [
"Unlicense"
] | null | null | null | fresh_tomatoes.py | gurugithub/movie-trailer-website | e997b031dd0691b9af9e7c3bff9f982f23419573 | [
"Unlicense"
] | null | null | null | fresh_tomatoes.py | gurugithub/movie-trailer-website | e997b031dd0691b9af9e7c3bff9f982f23419573 | [
"Unlicense"
] | null | null | null | # Modified Guru Shetti 3/25/2015 Included additional fields Storyline and Rating
import webbrowser
import os
import re
# Styles and scripting for the page
main_page_head = '''
<head>
<meta charset="utf-8">
<title>Gurus Flix!</title>
<!-- Bootstrap 3 -->
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap.min.css">
<link rel="stylesheet" href="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/css/bootstrap-theme.min.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js"></script>
<script src="https://netdna.bootstrapcdn.com/bootstrap/3.1.0/js/bootstrap.min.js"></script>
<style type="text/css" media="screen">
body {
padding-top: 80px;
background: url("https://secure.netflix.com/us/layout/ecweb/login/login_lifestyle_tall_close_crop.jpg");
}
#trailer .modal-dialog {
margin-top: 200px;
width: 640px;
height: 480px;
}
.hanging-close {
position: absolute;
top: -12px;
right: -12px;
z-index: 9001;
}
#trailer-video {
width: 100%;
height: 100%;
}
.movie-tile {
margin-bottom: 20px;
padding-top: 20px;
}
.movie-tile:hover {
background-color: #EEE;
cursor: pointer;
}
.scale-media {
padding-bottom: 56.25%;
position: relative;
}
.scale-media iframe {
border: none;
height: 100%;
position: absolute;
width: 100%;
left: 0;
top: 0;
background-color: white;
}
</style>
<script type="text/javascript" charset="utf-8">
// Pause the video when the modal is closed
$(document).on('click', '.hanging-close, .modal-backdrop, .modal', function (event) {
// Remove the src so the player itself gets removed, as this is the only
// reliable way to ensure the video stops playing in IE
$("#trailer-video-container").empty();
});
// Start playing the video whenever the trailer modal is opened
$(document).on('click', '.movie-tile', function (event) {
var trailerYouTubeId = $(this).attr('data-trailer-youtube-id')
var sourceUrl = 'http://www.youtube.com/embed/' + trailerYouTubeId + '?autoplay=1&html5=1';
$("#trailer-video-container").empty().append($("<iframe></iframe>", {
'id': 'trailer-video',
'type': 'text-html',
'src': sourceUrl,
'frameborder': 0
}));
});
// Animate in the movies when the page loads
$(document).ready(function () {
$('.movie-tile').hide().first().show("fast", function showNext() {
$(this).next("div").show("fast", showNext);
});
});
</script>
</head>
'''
# The main page layout and title bar
main_page_content = '''
<!DOCTYPE html>
<html lang="en">
<body>
<!-- Trailer Video Modal -->
<div class="modal" id="trailer">
<div class="modal-dialog">
<div class="modal-content">
<a href="#" class="hanging-close" data-dismiss="modal" aria-hidden="true">
<img src="https://lh5.ggpht.com/v4-628SilF0HtHuHdu5EzxD7WRqOrrTIDi_MhEG6_qkNtUK5Wg7KPkofp_VJoF7RS2LhxwEFCO1ICHZlc-o_=s0#w=24&h=24"/>
</a>
<div class="scale-media" id="trailer-video-container">
</div>
</div>
</div>
</div>
<!-- Main Page Content -->
<div class="container">
<div class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<a class="navbar-brand" href="#">Fresh Tomatoes Movie Trailers</a>
</div>
</div>
</div>
</div>
<div class="container">
{movie_tiles}
</div>
</body>
</html>
'''
# A single movie entry html template
movie_tile_content = '''
<div class="col-md-6 col-lg-4 movie-tile text-center" data-trailer-youtube-id="{trailer_youtube_id}" data-toggle="modal" data-target="#trailer">
<img src="{poster_image_url}" width="220" height="342">
<h2>{movie_title} - {rating}</h2> <p>{storyline}</p>
</div>
'''
def create_movie_tiles_content(movies):
# The HTML content for this section of the page
content = ''
for movie in movies:
# Extract the youtube ID from the url
youtube_id_match = re.search(r'(?<=v=)[^&#]+', movie.youtube_trailer)
youtube_id_match = youtube_id_match or re.search(r'(?<=be/)[^&#]+', movie.youtube_trailer)
trailer_youtube_id = youtube_id_match.group(0) if youtube_id_match else None
# Append the tile for the movie with its content filled in
content += movie_tile_content.format(
movie_title=movie.title,
poster_image_url=movie.poster_image,
trailer_youtube_id= movie.youtube_trailer,
storyline=movie.storyline,
rating = movie.rating
)
return content
def open_movies_page(movies):
# Create or overwrite the output file
output_file = open('fresh_tomatoes.html', 'w')
# Replace the placeholder for the movie tiles with the actual dynamically generated content
rendered_content = main_page_content.format(movie_tiles=create_movie_tiles_content(movies))
# Output the file
output_file.write(main_page_head + rendered_content)
output_file.close()
# open the output file in the browser
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2) # open in a new tab, if possible | 35.368098 | 144 | 0.590113 |
ac6fa392ca29d7042c7466c6f26610497be9999d | 13,084 | py | Python | main.py | zyzkevin/PKUAutoSubmit | ef81367c4d81bd32dd15038bd8808f8a895049bc | [
"Apache-2.0"
] | null | null | null | main.py | zyzkevin/PKUAutoSubmit | ef81367c4d81bd32dd15038bd8808f8a895049bc | [
"Apache-2.0"
] | null | null | null | main.py | zyzkevin/PKUAutoSubmit | ef81367c4d81bd32dd15038bd8808f8a895049bc | [
"Apache-2.0"
] | null | null | null | from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import Firefox, Chrome
from selenium import webdriver
from argparse import ArgumentParser
from urllib.parse import quote
import time
import copy
import sys
import os
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
TIMEOUT = 20
TIMESLP = 3
my_sender = '1692484707@qq.com' # 发件人邮箱账号
my_pass = 'fujkixpkjiyhcaji' # 发件人邮箱密码
my_user = 'antonchen@outlook.com' # 收件人邮箱账号
def mail():
ret = True
try:
cur_time = time.strftime("%d/%m/%Y")
msgRoot = MIMEMultipart('related')
msgRoot['From'] = Header('PKU-AutoSubmit', 'utf-8')
msgRoot['To'] = Header('student', 'utf-8')
subject = cur_time + ' 报备成功!'
msgRoot['Subject'] = Header(subject, 'utf-8')
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
mail_msg = """
<p>自动报备成功!</p>
<p>截图:</p>
<p><img src="cid:image1"></p>
"""
msgAlternative.attach(MIMEText(mail_msg, 'html', 'utf-8'))
# 指定图片为当前目录
fp = open('result.png', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# 定义图片 ID,在 HTML 文本中引用
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
server = smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器,端口是25
server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(my_sender, [my_user, ], msgRoot.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() # 关闭连接
except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False
ret = False
return ret
def login(driver, username, password, retry=0):
if retry == 3:
raise Exception('门户登录失败')
print('门户登陆中...')
appID = 'portal2017'
iaaaUrl = 'https://iaaa.pku.edu.cn/iaaa/oauth.jsp'
appName = quote('北京大学校内信息门户新版')
redirectUrl = 'https://portal.pku.edu.cn/portal2017/ssoLogin.do'
driver.get('https://portal.pku.edu.cn/portal2017/')
driver.get(
f'{iaaaUrl}?appID={appID}&appName={appName}&redirectUrl={redirectUrl}')
WebDriverWait(driver, 5).until(
EC.visibility_of_element_located((By.ID, 'logon_button')))
driver.find_element_by_id('user_name').send_keys(username)
time.sleep(0.1)
driver.find_element_by_id('password').send_keys(password)
time.sleep(0.1)
driver.find_element_by_id('logon_button').click()
# print(driver.current_url)
driver.get('https://portal.pku.edu.cn/portal2017/#/bizCenter')
print(driver.page_source)
try:
WebDriverWait(driver,
5).until(EC.visibility_of_element_located((By.ID, 'stuCampusExEnReq')))
print('门户登录成功!')
except:
print('Retrying...')
login(driver, username, password, retry + 1)
if failed == 3:
raise Exception('门户登录失败')
# # iaaaUrl = 'https://iaaa.pku.edu.cn/iaaa/oauth.jsp'
# # appName = quote('北京大学校内信息门户新版')
# # redirectUrl = 'https://portal.pku.edu.cn/portal2017/ssoLogin.do'
# # driver.get('https://portal.pku.edu.cn/portal2017/')
# # driver.get(
# # f'{iaaaUrl}?appID=portal2017&appName={appName}&redirectUrl={redirectUrl}'
# # )
# portalUrl = 'https://portal.pku.edu.cn/portal2017/#/bizCenter'
# driver.get(portalUrl)
# print('门户登陆中...')
# driver.find_element_by_id('user_name').send_keys(username)
# time.sleep(TIMESLP)
# driver.find_element_by_id('password').send_keys(password)
# time.sleep(TIMESLP)
# driver.find_element_by_id('logon_button').click()
# try:
# WebDriverWait(driver, TIMEOUT).until(
# EC.visibility_of_element_located((By.LINK_TEXT, '我知道了')))
# except:
# pass
# else:
# driver.find_element_by_link_text('我知道了').click()
# try:
# WebDriverWait(driver, TIMEOUT).until(
# EC.visibility_of_element_located((By.ID, 'all')))
# except:
# login(driver, username, password, failed + 1)
# else:
# print('门户登录成功!')
def go_to_application_out(driver):
driver.find_element_by_id('stuCampusExEnReq').click()
# WebDriverWait(driver, TIMEOUT).until(
# EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
# driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-input__inner')))
def go_to_application_in(driver):
driver.get('https://portal.pku.edu.cn/portal2017/#/bizCenter')
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'stuCampusExEnReq')))
# driver.find_element_by_id('all').click()
# WebDriverWait(driver, TIMEOUT).until(
# EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
# driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-select')))
def select_past_out(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"出校")]').click()
time.sleep(TIMESLP)
def select_past_in(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"入校")]').click()
time.sleep(TIMESLP)
def select_in_out(driver, way):
driver.find_element_by_class_name('el-select').click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{way}"]').click()
def select_reason(driver, choice):
driver.find_element_by_class_name('el-select').click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{choice}"]').click()
def select_campus(driver, campus):
driver.find_elements_by_class_name('el-select')[1].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{campus}"]').click()
def select_destination(driver, destination):
driver.find_elements_by_class_name('el-select')[2].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{destination}"]').click()
def select_district(driver, district):
driver.find_elements_by_class_name('el-select')[3].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{district}"]').click()
def write_reason(driver, reason):
driver.find_element_by_class_name('el-textarea__inner').send_keys(
f'{reason}')
time.sleep(TIMESLP)
def write_track(driver, track):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{track}')
time.sleep(TIMESLP)
def write_street(driver, street):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{street}')
time.sleep(TIMESLP)
def click_check(driver):
driver.find_element_by_class_name('el-checkbox__label').click()
time.sleep(TIMESLP)
def click_inPeking(driver):
driver.find_element_by_class_name('el-radio__inner').click()
time.sleep(TIMESLP)
def submit(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"保存")]').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located(
(By.XPATH, '(//button/span[contains(text(),"提交")])[3]')))
driver.find_element_by_xpath(
'(//button/span[contains(text(),"提交")])[3]').click()
time.sleep(TIMESLP)
def screen_capture(driver):
driver.back()
driver.back()
WebDriverWait(driver, 5).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
driver.find_elements_by_class_name('el-card__body')[1].click()
WebDriverWait(driver, 5).until(
EC.visibility_of_element_located(
(By.XPATH, '//button/span[contains(text(),"加载更多")]')))
driver.maximize_window()
time.sleep(0.1)
driver.save_screenshot('result.png')
print('备案历史截图已保存')
def fill_out(driver, campus, reason, destination, track):
print('开始填报出校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '出校')
print('Done')
print('选择校区 ', end='')
select_campus(driver, campus)
print('Done')
print('选择出入校理由 ', end='')
select_reason(driver, '学业')
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
print('选择出校目的地 ', end='')
select_destination(driver, destination)
print('Done')
print('填写出校行动轨迹 ', end='')
write_track(driver, track)
print('Done')
click_check(driver)
submit(driver)
print('出校备案填报完毕!')
def fill_in(driver, campus, reason, habitation, district, street):
print('开始填报入校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '入校')
print('Done')
print('选择出入校事由 ', end='')
select_reason(driver, '学业')
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
if habitation != '北京':
raise Exception('暂不支持京外入校备案,请手动填写')
print('选择居住地所在区 ', end='')
select_district(driver, district)
print('Done')
print('填写居住地所在街道 ', end='')
write_street(driver, street)
print('Done')
click_inPeking(driver)
click_check(driver)
submit(driver)
print('入校备案填报完毕!')
def new_run(driver, username, password):
login(driver, username, password)
print('=================================')
go_to_application_out(driver)
select_past_out(driver)
click_check(driver)
submit(driver)
print('出校备案完成')
print('=================================')
go_to_application_in(driver)
select_past_in(driver)
click_inPeking(driver)
click_check(driver)
submit(driver)
print('入校备案完成')
print('=================================')
screen_capture(driver)
print('=================================')
print('可以愉快的玩耍啦!')
def run(driver, username, password, campus, reason, destination, track,
habitation, district, street):
login(driver, username, password)
print('=================================')
go_to_application_out(driver)
fill_out(driver, campus, reason, destination, track)
print('=================================')
go_to_application_in(driver)
fill_in(driver, campus, reason, habitation, district, street)
print('=================================')
screen_capture(driver)
print('=================================')
print('可以愉快的玩耍啦!')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--username', '-u', type=str, help='用户名')
parser.add_argument('--password', '-p', type=str, help='密码')
parser.add_argument('--campus', type=str, help='所在校区, 燕园、万柳、畅春园、圆明园、中关新园', default='燕园')
parser.add_argument('--reason', type=str, help='出校原因, eg. 吃饭', default='上课 返回宿舍')
parser.add_argument('--destination', type=str, help='出校目的地, eg. 北京', default='北京')
parser.add_argument('--track', type=str, help='出校轨迹, eg. 畅春园食堂', default='东南门-理教-勺园—东南门')
parser.add_argument('--habitation', type=str, help='入校前居住地, eg. 北京', default='北京')
parser.add_argument('--district', type=str, help='入校前居住所在区, eg. 海淀区', default='海淀区')
parser.add_argument('--street', type=str, help='入校前居住所在街道, eg. 燕园街道', default='燕园街道')
args = parser.parse_args()
args_public = copy.deepcopy(args)
args_public.password = 'xxxxxxxx'
print('Arguments: {}'.format(args_public))
print('Driver Launching...')
# driver = Firefox()
# driver = Chrome()
if sys.platform == 'darwin': # macOS
phantomjs_path = os.path.join('phantomjs', 'phantomjs-darwin')
elif sys.platform == 'linux': # linux
phantomjs_path = os.path.join('phantomjs', 'phantomjs-linux-x86_64')
else: # windows
phantomjs_path = os.path.join('phantomjs', 'phantomjs-windows.exe')
driver = webdriver.PhantomJS(executable_path=phantomjs_path)
# run(driver, args.username, args.password, args.campus, args.reason,
# args.destination, args.track, args.habitation, args.district,
# args.street)
new_run(driver, args.username, args.password)
driver.close()
| 31.83455 | 99 | 0.651559 |
68b17349f891b74ced3e53b628b2f8826118546b | 35,535 | py | Python | amulog/eval/maketpl.py | cpflat/amulog | b7a8c7478d2e5253158f0bce3a7f7109d23e40cb | [
"BSD-3-Clause"
] | 5 | 2019-07-03T09:57:30.000Z | 2021-02-13T13:15:47.000Z | amulog/eval/maketpl.py | cpflat/amulog | b7a8c7478d2e5253158f0bce3a7f7109d23e40cb | [
"BSD-3-Clause"
] | null | null | null | amulog/eval/maketpl.py | cpflat/amulog | b7a8c7478d2e5253158f0bce3a7f7109d23e40cb | [
"BSD-3-Clause"
] | 1 | 2021-09-09T02:21:42.000Z | 2021-09-09T02:21:42.000Z | #!/usr/bin/env python
# coding: utf-8
import logging
import json
import numpy as np
from collections import defaultdict
import amulog.manager
from amulog import common
from amulog import lt_common
from amulog.eval import cluster_metrics
from amulog.eval import structure_metrics
_logger = logging.getLogger(__package__.partition(".")[0])
class MeasureLTGen:
"""Record and load measurement data of tempalte generation.
This is implemented in memory-saving architecture;
The calculated data for each line is appended into text file."""
SPLITTER = "@@"
LABEL_NONE = "N"
LABEL_DESCRIPTION = "D"
LABEL_VARIABLE = "V"
FILEPATH_DIGIT_LENGTH = 2
def __init__(self, conf, n_trial):
self._conf = conf
self._n_trial = n_trial
self._current_trial = None
self._d_answer = None
self._d_trial = None
def _init_answer_info(self):
common.mkdir(self._output_dir_answer(self._conf))
self._d_answer = {"l_tid": list(),
"n_lines": int(),
"d_n_lines": defaultdict(int),
"n_words": int(),
"d_n_words": defaultdict(int),
}
def _init_trial_info(self):
common.mkdir(self._output_dir_trial(self._conf))
if self._n_trial is None:
return
assert self._n_trial < 10 ** self.FILEPATH_DIGIT_LENGTH
self._d_trial = {"l_tid": list(),
"n_c_lines": int(),
"d_n_c_lines": defaultdict(int),
"n_c_words": int(),
"d_n_c_words": defaultdict(int),
}
# file IO methods
@staticmethod
def _output_dir_answer(conf):
return conf["eval"]["ltgen_answer_dir"]
@staticmethod
def _output_dir_trial(conf):
return conf["eval"]["ltgen_trial_dir"]
def _org_word_path(self):
return "{0}/word".format(self._output_dir_answer(self._conf))
def _answer_label_path(self):
return "{0}/label_answer".format(self._output_dir_answer(self._conf))
def _trial_label_path(self, trial_id):
str_trial_id = str(trial_id).zfill(self.FILEPATH_DIGIT_LENGTH)
return "{0}/label_trial{1}".format(self._output_dir_trial(self._conf),
str_trial_id)
def _answer_info_path(self):
return "{0}/info_answer".format(self._output_dir_answer(self._conf))
def _trial_info_path(self, trial_id):
str_trial_id = str(trial_id).zfill(self.FILEPATH_DIGIT_LENGTH)
return "{0}/info_trial{1}".format(self._output_dir_trial(self._conf),
str_trial_id)
def init_answer(self):
common.rm(self._org_word_path())
common.rm(self._answer_label_path())
self._init_answer_info()
def init_trial(self, trial_id):
common.rm(self._trial_label_path(trial_id))
self._current_trial = trial_id
self._init_trial_info()
def _load_answer_info(self):
with open(self._answer_info_path(), 'r', encoding='utf-8') as f:
obj = json.load(f)
self._d_answer = obj
def _load_trial_info(self):
trial_id = self._current_trial
with open(self._trial_info_path(trial_id), 'r', encoding='utf-8') as f:
obj = json.load(f)
self._d_trial = obj
def _dump_answer_info(self):
obj = self._d_answer
with open(self._answer_info_path(), 'w', encoding='utf-8') as f:
json.dump(obj, f)
def _dump_trial_info(self):
trial_id = self._current_trial
obj = self._d_trial
with open(self._trial_info_path(trial_id), 'w', encoding='utf-8') as f:
json.dump(obj, f)
def load(self, trial_id=None):
self._load_answer_info()
if trial_id is not None:
self.load_trial(trial_id)
def load_trial(self, trial_id):
self._current_trial = trial_id
self._load_trial_info()
def dump_answer(self):
self._dump_answer_info()
def dump_trial(self):
self._dump_trial_info()
# data APIs
@classmethod
def _tpl2dump(cls, tpl, l_w):
if tpl is None:
return cls.LABEL_NONE + "\n"
else:
l_label = [cls.LABEL_VARIABLE if w_tpl == lt_common.REPLACER
else cls.LABEL_DESCRIPTION
for w_tpl, w_org in zip(tpl, l_w)]
return "".join(l_label) + "\n"
@classmethod
def _labels_isnone(cls, labels):
return cls.LABEL_NONE in labels
@classmethod
def restore_tpl(cls, labels, l_w):
if labels is None or labels == cls.LABEL_NONE:
return None
else:
return [lt_common.REPLACER if label == cls.LABEL_VARIABLE
else w
for label, w in zip(labels, l_w)]
@classmethod
def restore_result(cls, labels, l_w):
if labels is None or labels == cls.LABEL_NONE:
return None
else:
return [lt_common.REPLACER_HEAD + w + lt_common.REPLACER_TAIL
if label == cls.LABEL_VARIABLE else w
for label, w in zip(labels, l_w)]
def add_org(self, l_w):
added_line = self.SPLITTER.join(l_w) + "\n"
with open(self._org_word_path(), 'a') as f:
f.write(added_line)
def add_answer(self, tid, tpl, l_w):
self._update_stat_answer(tid, tpl)
added_line = self._tpl2dump(tpl, l_w)
with open(self._answer_label_path(), 'a') as f:
f.write(added_line)
def add_trial(self, tid_trial, tpl_trial,
tid_answer, tpl_answer, l_w):
self._update_stat_trial(tid_trial, tpl_trial,
tid_answer, tpl_answer)
added_line = self._tpl2dump(tpl_trial, l_w)
with open(self._trial_label_path(self._current_trial), 'a') as f:
f.write(added_line)
def _update_stat_answer(self, tid, tpl):
self._d_answer["l_tid"].append(tid)
if tid is not None:
self._d_answer["n_lines"] += 1
self._d_answer["d_n_lines"][str(tid)] += 1
n_words = len(tpl)
self._d_answer["n_words"] += n_words
self._d_answer["d_n_words"][str(tid)] += n_words
def _update_stat_trial(self, tid_trial, tpl_trial,
tid_answer, tpl_answer):
self._d_trial["l_tid"].append(tid_trial)
if tid_answer is not None:
if tpl_trial == tpl_answer:
self._d_trial["n_c_lines"] += 1
self._d_trial["d_n_c_lines"][str(tid_answer)] += 1
assert len(tpl_trial) == len(tpl_answer)
for w_trial, w_answer in zip(tpl_trial, tpl_answer):
if w_trial == w_answer:
self._d_trial["n_c_words"] += 1
self._d_trial["d_n_c_words"][str(tid_answer)] += 1
def iter_org(self):
with open(self._org_word_path(), 'r') as f:
for line in f:
yield line.strip().split(self.SPLITTER)
def iter_label_answer(self, pass_none=False):
with open(self._answer_label_path(), 'r') as f:
for line in f:
labels_str = line.strip()
if self._labels_isnone(labels_str):
if pass_none:
pass
else:
yield None
else:
yield labels_str
def iter_label_trial(self, pass_none=False):
with open(self._trial_label_path(self._current_trial), 'r') as f:
for line in f:
labels_str = line.strip()
if self._labels_isnone(labels_str):
if pass_none:
pass
else:
yield None
else:
yield labels_str
def iter_tpl_answer(self, pass_none=False, fill_wildcard=False):
for l_w, labels in zip(self.iter_org(), self.iter_label_answer()):
if labels is None:
if pass_none:
pass
else:
yield None
elif fill_wildcard:
yield self.restore_result(labels, l_w)
else:
yield self.restore_tpl(labels, l_w)
def iter_tpl_trial(self, pass_none=False, fill_wildcard=False):
for l_w, labels in zip(self.iter_org(), self.iter_label_trial()):
if labels is None:
if pass_none:
pass
else:
yield None
elif fill_wildcard:
yield self.restore_result(labels, l_w)
else:
yield self.restore_tpl(labels, l_w)
def tid_list_answer(self, pass_none=False):
if pass_none:
return [tid for tid in self._d_answer["l_tid"]
if tid is not None]
else:
return self._d_answer["l_tid"]
def tid_list_trial(self, pass_none=False):
if pass_none:
return [tid for tid in self._d_trial["l_tid"]
if tid is not None]
else:
return self._d_trial["l_tid"]
def valid_tid_list_answer(self):
return self.tid_list_answer(pass_none=True)
def valid_tid_list_trial(self):
return self.tid_list_trial(pass_none=True)
def iter_cluster_answer(self):
return self._d_answer["n_lines"].keys().__iter__()
def iter_cluster_trial(self):
return np.unique(self.tid_list_trial(pass_none=True))
def number_of_trials(self):
return self._n_trial
def number_of_messages(self):
return self._d_answer["n_lines"]
def number_of_answer_clusters(self):
return len(self._d_answer["d_n_lines"])
def number_of_answer_cluster_members(self):
d = {}
for key, val in self._d_answer["d_n_lines"].items():
tid = int(key)
d[tid] = val
return d
def number_of_trial_clusters(self):
return np.unique(self.valid_tid_list_trial()).shape[0]
# accuracy methods
def word_accuracy(self, recalculation=False):
if recalculation:
iterable_tpl_answer = self.iter_tpl_answer(pass_none=True)
iterable_tpl_trial = self.iter_tpl_trial(pass_none=True)
return structure_metrics.word_accuracy(
iterable_tpl_answer, iterable_tpl_trial)
else:
# n_words: Number of all words in dataset
n_words = self._d_answer["n_words"]
# n_c_words: Number of words correctly labeled in dataset
n_c_words = self._d_trial["n_c_words"]
return 1.0 * n_c_words / n_words
def line_accuracy(self, recalculation=False):
if recalculation:
iterable_tpl_answer = self.iter_tpl_answer(pass_none=True)
iterable_tpl_trial = self.iter_tpl_trial(pass_none=True)
return structure_metrics.line_accuracy(
iterable_tpl_answer, iterable_tpl_trial)
else:
# n_lines: Number of all lines in dataset
n_lines = self._d_answer["n_lines"]
# n_c_lines: Number of lines correctly labeled in dataset
n_c_lines = self._d_trial["n_c_lines"]
return 1.0 * n_c_lines / n_lines
def tpl_word_accuracy(self, recalculation=False):
if recalculation:
iterable_tpl_answer = self.iter_tpl_answer(pass_none=True)
iterable_tpl_trial = self.iter_tpl_trial(pass_none=True)
l_tid_answer = self.tid_list_answer(pass_none=True)
return structure_metrics.tpl_word_accuracy(
iterable_tpl_answer, iterable_tpl_trial, l_tid_answer)
else:
# d_n_words: Number of words in a template cluster
d_n_words = self._d_answer["d_n_words"]
# d_n_c_words: Number of words correctly labeled in a template cluster
d_n_c_words = self._d_trial["d_n_c_words"]
l_acc = []
for key in d_n_words: # key: str(tid)
l_acc.append(d_n_c_words.get(key, 0) / d_n_words.get(key, 0))
return np.average(l_acc)
def tpl_accuracy(self, recalculation=False):
if recalculation:
iterable_tpl_answer = self.iter_tpl_answer(pass_none=True)
iterable_tpl_trial = self.iter_tpl_trial(pass_none=True)
l_tid_answer = self.tid_list_answer(pass_none=True)
return structure_metrics.tpl_accuracy(
iterable_tpl_answer, iterable_tpl_trial, l_tid_answer)
else:
# d_n_lines: Number of lines in a template cluster
d_n_lines = self._d_answer["d_n_lines"]
# d_n_c_lines: Number of lines correctly labeled in a template cluster
d_n_c_lines = self._d_trial["d_n_c_lines"]
l_acc = []
for key in d_n_lines:
l_acc.append(d_n_c_lines.get(key, 0) / d_n_lines.get(key, 0))
return np.average(l_acc)
def tpl_word_accuracy_dist(self):
# d_n_words: Number of words in a template cluster
d_n_words = self._d_answer["d_n_words"]
# d_n_c_words: Number of words correctly labeled in a template cluster
d_n_c_words = self._d_trial["d_n_c_words"]
ret = {}
for key in d_n_words: # key: str(tid)
tid = int(key)
ret[tid] = d_n_c_words.get(key, 0) / d_n_words.get(key, 0)
return ret
def tpl_line_accuracy_dist(self):
# d_n_lines: Number of lines in a template cluster
d_n_lines = self._d_answer["d_n_lines"]
# d_n_c_lines: Number of lines correctly labeled in a template cluster
d_n_c_lines = self._d_trial["d_n_c_lines"]
ret = {}
for key in d_n_lines: # key: str(tid)
tid = int(key)
ret[tid] = d_n_c_lines.get(key, 0) / d_n_lines.get(key, 0)
return ret
def tpl_description_accuracy(self):
iterable_tpl_answer = self.iter_tpl_answer(pass_none=True)
iterable_tpl_trial = self.iter_tpl_trial(pass_none=True)
l_tid_answer = self.tid_list_answer(pass_none=True)
return structure_metrics.tpl_desc_accuracy(
iterable_tpl_answer, iterable_tpl_trial, l_tid_answer)
def tpl_variable_accuracy(self):
iterable_tpl_answer = self.iter_tpl_answer(pass_none=True)
iterable_tpl_trial = self.iter_tpl_trial(pass_none=True)
l_tid_answer = self.tid_list_answer(pass_none=True)
return structure_metrics.tpl_var_accuracy(
iterable_tpl_answer, iterable_tpl_trial, l_tid_answer)
def rand_score(self):
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return cluster_metrics.rand_score(l_tid_answer, l_tid_trial)
def adjusted_rand_score(self):
from sklearn.metrics import adjusted_rand_score as score
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return score(l_tid_answer, l_tid_trial)
def f1_score(self):
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return cluster_metrics.precision_recall_fscore(
l_tid_answer, l_tid_trial)[2]
def parsing_accuracy(self):
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return cluster_metrics.parsing_accuracy(l_tid_answer, l_tid_trial)
def cluster_accuracy(self):
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return cluster_metrics.cluster_accuracy(l_tid_answer, l_tid_trial)
def overdiv_ratio(self):
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return cluster_metrics.over_division_cluster_ratio(l_tid_answer,
l_tid_trial)
def overagg_ratio(self):
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return cluster_metrics.over_aggregation_cluster_ratio(
l_tid_answer, l_tid_trial)
def homogeneity_score(self):
from sklearn.metrics import homogeneity_score as score
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return score(l_tid_answer, l_tid_trial)
def completeness_score(self):
from sklearn.metrics import completeness_score as score
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return score(l_tid_answer, l_tid_trial)
def v_measure_score(self, beta=1.0):
from sklearn.metrics import v_measure_score as score
l_tid_answer = self.valid_tid_list_answer()
l_tid_trial = self.valid_tid_list_trial()
return score(l_tid_answer, l_tid_trial, beta=beta)
def measure_accuracy_answer(conf, targets, n_trial=None):
timer = common.Timer("measure-accuracy answer", output=_logger)
timer.start()
mlt = MeasureLTGen(conf, n_trial)
mlt.init_answer()
from amulog import lt_import
table_answer = lt_common.TemplateTable()
ltgen_answer = lt_import.init_ltgen_import(conf, table_answer)
for pline in amulog.manager.iter_plines(conf, targets):
tid, _ = ltgen_answer.process_line(pline)
if tid is None:
tpl = None
else:
tpl = ltgen_answer.get_tpl(tid)
mlt.add_org(pline["words"])
mlt.add_answer(tid, tpl, pline["words"])
mlt.dump_answer()
timer.stop()
return mlt
def measure_accuracy_trial_offline(conf, targets, n_trial=None, mlt=None):
if n_trial is None:
n_trial = int(conf["eval"]["n_trial"])
if mlt is None:
mlt = MeasureLTGen(conf, n_trial)
mlt.load()
for trial_id in range(n_trial):
timer = common.Timer("measure-accuracy-offline trial{0}".format(
trial_id), output=_logger)
timer.start()
mlt.init_trial(trial_id)
table = lt_common.TemplateTable()
ltgen = amulog.manager.init_ltgen_methods(conf, table)
input_lines = list(amulog.manager.iter_plines(conf, targets))
d_plines = {mid: pline for mid, pline in enumerate(input_lines)}
d_tid = ltgen.process_offline(d_plines)
iterobj = zip(input_lines,
mlt.tid_list_answer(),
mlt.iter_tpl_answer())
for mid, (pline, tid_answer, tpl_answer) in enumerate(iterobj):
if tid_answer is None:
tid_trial = None
tpl_trial = None
else:
tid_trial = d_tid[mid]
if tid_trial is None:
tpl_trial = None
else:
tpl_trial = ltgen.get_tpl(tid_trial)
mlt.add_trial(tid_trial, tpl_trial,
tid_answer, tpl_answer, pline["words"])
mlt.dump_trial()
timer.stop()
return mlt
def measure_accuracy_trial_online(conf, targets_train, targets_test,
n_trial=None, mlt=None):
if n_trial is None:
n_trial = int(conf["eval"]["n_trial"])
if mlt is None:
mlt = MeasureLTGen(conf, n_trial)
mlt.load()
from amulog import log_db
for trial_id in range(n_trial):
timer = common.Timer("measure-accuracy-online trial{0}".format(
trial_id), output=_logger)
timer.start()
mlt.init_trial(trial_id)
table = lt_common.TemplateTable()
ltgen = amulog.manager.init_ltgen_methods(conf, table)
if targets_train is not None:
iterobj = amulog.manager.iter_plines(conf, targets_train)
d_plines = {mid: pline for mid, pline in enumerate(iterobj)}
ltgen.process_offline(d_plines)
iterobj = zip(amulog.manager.iter_plines(conf, targets_test),
mlt.tid_list_answer(),
mlt.iter_tpl_answer())
for pline, tid_answer, tpl_answer in iterobj:
if tid_answer is None:
tid_trial = None
tpl_trial = None
else:
tid_trial, _ = ltgen.process_line(pline)
tpl_trial = ltgen.get_tpl(tid_trial)
mlt.add_trial(tid_trial, tpl_trial,
tid_answer, tpl_answer, pline["words"])
mlt.dump_trial()
timer.stop()
return mlt
def get_accuracy_average(conf, n_trial, functions):
mlt = MeasureLTGen(conf, n_trial)
mlt.load()
results = []
for trial_id in range(n_trial):
mlt.load_trial(trial_id)
d_values = {}
for func_name in functions:
d_values[func_name] = getattr(mlt, func_name)()
results.append(d_values)
d_average = {func_name: np.average([d_values[func_name] for d_values in results])
for func_name in functions}
return d_average
def get_templates(conf, n_trial, trial_id=0, answer=False, mlt=None):
"""Get template list after all log parsing.
In online algorithms, template structure can be changed while processing.
This function pick up a result for the last message with each template.
"""
if mlt is None:
mlt = MeasureLTGen(conf, n_trial)
mlt.load(trial_id)
if answer:
tids = np.array(mlt.tid_list_answer())
iterobj = mlt.iter_tpl_answer()
else:
tids = np.array(mlt.tid_list_trial())
iterobj = mlt.iter_tpl_trial()
d_last_index = defaultdict(int)
for mid, tid in enumerate(tids):
if tid is not None:
d_last_index[tid] = mid
d_last_index_rev = {mid: tid for tid, mid in d_last_index.items()}
d_tpl = {}
for mid, tpl in enumerate(iterobj):
if mid in d_last_index_rev:
tid = d_last_index_rev[mid]
d_tpl[tid] = tpl
return d_tpl
def offline_structure_metrics(conf, n_trial, trial_id=0, partial=False):
mlt = MeasureLTGen(conf, n_trial)
mlt.load(trial_id)
d_tpl = get_templates(conf, n_trial, trial_id, mlt=mlt)
tids = mlt.tid_list_trial(pass_none=True)
word_acc = structure_metrics.word_accuracy(
mlt.iter_tpl_answer(pass_none=True),
map(lambda x: d_tpl[x], tids))
line_acc = structure_metrics.line_accuracy(
mlt.iter_tpl_answer(pass_none=True),
map(lambda x: d_tpl[x], tids))
tpl_acc = structure_metrics.tpl_accuracy(
mlt.iter_tpl_answer(pass_none=True),
map(lambda x: d_tpl[x], tids), tids)
tpl_word_acc = structure_metrics.tpl_word_accuracy(
mlt.iter_tpl_answer(pass_none=True),
map(lambda x: d_tpl[x], tids), tids)
if partial:
tpl_desc_fail = structure_metrics.tpl_desc_accuracy(
mlt.iter_tpl_answer(pass_none=True),
map(lambda x: d_tpl[x], tids), tids)
tpl_var_fail = structure_metrics.tpl_var_accuracy(
mlt.iter_tpl_answer(pass_none=True),
map(lambda x: d_tpl[x], tids), tids)
ret = (word_acc, line_acc, tpl_acc, tpl_word_acc,
tpl_desc_fail, tpl_var_fail)
return ret
else:
return word_acc, line_acc, tpl_acc, tpl_word_acc
def search_fail_template(conf, n_trial, trial_id=0, pass_similar=True):
mlt = MeasureLTGen(conf, n_trial)
mlt.load(trial_id)
s_pass = set()
iterobj = zip(mlt.iter_org(), mlt.tid_list_answer(),
mlt.iter_label_answer(), mlt.iter_label_trial())
for l_w, tid_answer, labels_answer, labels_trial in iterobj:
if pass_similar and tid_answer in s_pass:
continue
if labels_answer == labels_trial:
pass
else:
result_answer = mlt.restore_result(labels_answer, l_w)
print("Answer: {0}".format(" ".join(result_answer)))
result_trial = mlt.restore_result(labels_trial, l_w)
print("Trial: {0}".format(" ".join(result_trial)))
print("--------------------")
pass
s_pass.add(tid_answer)
def search_diff_template(conf1, conf2, n_trial,
trial_id1=0, trial_id2=0, pass_similar=True):
mlt1 = MeasureLTGen(conf1, n_trial)
mlt1.load(trial_id1)
mlt2 = MeasureLTGen(conf2, n_trial)
mlt2.load(trial_id2)
s_pass = set()
iterobj = zip(mlt1.iter_org(),
mlt1.tid_list_answer(),
mlt1.iter_label_answer(),
mlt1.iter_label_trial(),
mlt2.iter_label_trial())
for l_w, tid_answer, labels_answer, labels_trial1, labels_trial2 in iterobj:
if pass_similar and tid_answer in s_pass:
continue
if (not labels_trial1 == labels_answer) and \
(labels_trial2 == labels_answer):
tpl_answer = mlt1.restore_result(labels_answer, l_w)
tpl_trial1 = mlt1.restore_result(labels_trial1, l_w)
print("< Answer: {0}".format(" ".join(tpl_answer)))
print("< Trial: {0}".format(" ".join(tpl_trial1)))
print("--------------------")
elif (labels_trial1 == labels_answer) and \
(not labels_trial2 == labels_answer):
tpl_answer = mlt1.restore_result(labels_answer, l_w)
tpl_trial2 = mlt2.restore_result(labels_trial2, l_w)
print("> Answer: {0}".format(" ".join(tpl_answer)))
print("> Trial: {0}".format(" ".join(tpl_trial2)))
print("--------------------")
s_pass.add(tid_answer)
def _sample_partial_cluster(a_true, a_pred, n_samples):
from sklearn.metrics.cluster import contingency_matrix
cm = contingency_matrix(a_true, a_pred, sparse=True)
# sklearn.metrics.cluster.contingency_matrix now uses
# inverse output of np.unique(a_true, inverse=true)
# as the input of contingency matrix.
# Therefore, the unique output works as value mapping.
a_true_map, a_true_inverse = np.unique(a_true, return_inverse=True)
nz_true, _ = cm.nonzero()
l_cluster = []
for cls_true, uniq_cnt in zip(*np.unique(nz_true, return_counts=True)):
if uniq_cnt > 1:
tid_true = a_true_map[cls_true]
div = []
for tid_pred, cnt_pred in zip(*np.unique(
a_pred[a_true_inverse == cls_true], return_counts=True)):
a_index = np.where((a_true == tid_true) &
(a_pred == tid_pred))[0]
tmp_n_samples = min(n_samples, a_index.shape[0])
a_index_sample = a_index[:tmp_n_samples]
div.append((tid_pred, cnt_pred, a_index_sample))
l_cluster.append((tid_true, div))
return l_cluster
def _get_complete_clusters(a_true, a_pred):
from sklearn.metrics.cluster import contingency_matrix
cm = contingency_matrix(a_true, a_pred, sparse=True)
nz_true, _ = cm.nonzero()
tids = []
for tid_true, uniq_cnt in zip(*np.unique(nz_true, return_counts=True)):
if uniq_cnt == 1:
tids.append(tid_true)
return tids
def search_fail_overdiv(conf, n_trial, trial_id=0, n_samples=1):
"""Search failed log clusters of over-division.
e.g., 1 cls in answer ≡ 3 cls in trial"""
timer = common.Timer("test fail_overdiv", output=_logger)
timer.start()
mlt = MeasureLTGen(conf, n_trial)
mlt.load(trial_id)
# overdiv cluster information
a_tid_answer = np.array(mlt.valid_tid_list_answer())
a_tid_trial = np.array(mlt.valid_tid_list_trial())
l_cluster = _sample_partial_cluster(a_tid_answer, a_tid_trial, n_samples)
timer.lap("lap1")
# make sample tpl list to show
s_index_to_show = set()
for _, div in l_cluster:
samples = [a_index_sample for _, _, a_index_sample in div]
s_index_to_show = s_index_to_show | set(np.ravel(samples))
timer.lap("lap2")
# get templates for the indexes to show
iterobj = mlt.iter_tpl_trial(pass_none=True, fill_wildcard=True)
d_result = {index: result for index, result in enumerate(iterobj)
if index in s_index_to_show}
timer.lap("lap3")
# show
for tid_answer, div in l_cluster:
print("Template ID {0} (in answer)".format(tid_answer))
iterobj = sorted(div, key=lambda x: x[1], reverse=True)
for cls_id, (tid_trial, cnt_trial, a_index) in enumerate(iterobj):
for index in a_index:
print("{0} ({1}): {2}".format(cls_id, cnt_trial,
" ".join(d_result[index])))
print("--------------------")
timer.stop()
def search_fail_overagg(conf, n_trial, trial_id=0, n_samples=1):
"""Search failed log clusters of over-division.
e.g., 3 cls in answer ≡ 1 cls in trial"""
mlt = MeasureLTGen(conf, n_trial)
mlt.load(trial_id)
# overagg cluster information
a_tid_answer = np.array(mlt.valid_tid_list_answer())
a_tid_trial = np.array(mlt.valid_tid_list_trial())
l_cluster = _sample_partial_cluster(a_tid_trial, a_tid_answer, n_samples)
# make sample tpl list to show
s_index_to_show = set()
for _, div in l_cluster:
samples = [a_index_sample for _, _, a_index_sample in div]
s_index_to_show = s_index_to_show | set(np.ravel(samples))
# get templates for the indexes to show
iterobj = mlt.iter_tpl_trial(pass_none=True, fill_wildcard=True)
d_result = {index: result for index, result in enumerate(iterobj)
if index in s_index_to_show}
# show
for tid_trial, div in l_cluster:
print("Cluster {0} (in trial)".format(tid_trial))
iterobj = sorted(div, key=lambda x: x[1], reverse=True)
for tid_answer, cnt_answer, a_index in iterobj:
for index in a_index:
print("ltid {0} ({1}): {2}".format(tid_answer, cnt_answer,
" ".join(d_result[index])))
print("--------------------")
def search_diff_overdiv(conf1, conf2, n_trial, trial_id=0, n_samples=1):
"""Search log clusters that is accurate in conf1,
but failed of over-division in conf2.
e.g., 1 cls in answer ≡ 1 cls in trial-conf1 ≡ 3 cls in trial-conf2"""
mlt1 = MeasureLTGen(conf1, n_trial)
mlt1.load(trial_id)
mlt2 = MeasureLTGen(conf2, n_trial)
mlt2.load(trial_id)
# clusters accurate in conf1
a_tid_answer = np.array(mlt1.valid_tid_list_answer())
a_tid_trial1 = np.array(mlt1.valid_tid_list_trial())
tids = _get_complete_clusters(a_tid_answer, a_tid_trial1)
# cluster information that is overdiv in conf2
a_tid_trial2 = np.array(mlt2.valid_tid_list_trial())
l_cls_all = _sample_partial_cluster(a_tid_answer, a_tid_trial2, n_samples)
l_cluster = [(tid_true, div) for tid_true, div
in l_cls_all if tid_true in tids]
# make sample tpl list to show
s_index_to_show = set()
for _, div in l_cluster:
samples = [a_index_sample for _, _, a_index_sample in div]
s_index_to_show = s_index_to_show | set(np.ravel(samples))
# get templates for the indexes to show
iterobj = mlt2.iter_tpl_trial(pass_none=True, fill_wildcard=True)
d_result = {index: result for index, result in enumerate(iterobj)
if index in s_index_to_show}
# show
for tid_answer, div in l_cluster:
print("Template ID {0} (in answer)".format(tid_answer))
iterobj = sorted(div, key=lambda x: x[1], reverse=True)
for cid, (tid_trial, cnt_trial, a_index) in enumerate(iterobj):
for index in a_index:
print("{0} ({1}): {2}".format(cid, cnt_trial,
" ".join(d_result[index])))
print("--------------------")
def search_diff_overagg(conf1, conf2, n_trial, trial_id=0, n_samples=1):
"""Search log clusters that is accurate in conf1,
but failed of over-aggregation in conf2.
e.g., 3 cls in answer ≡ 3 cls in trial-conf1 ≡ 1 cls in trial-conf2"""
mlt1 = MeasureLTGen(conf1, n_trial)
mlt1.load(trial_id)
mlt2 = MeasureLTGen(conf2, n_trial)
mlt2.load(trial_id)
# clusters accurate in conf1
a_tid_answer = np.array(mlt1.valid_tid_list_answer())
a_tid_trial1 = np.array(mlt1.valid_tid_list_trial())
tids = _get_complete_clusters(a_tid_answer, a_tid_trial1)
# cluster information that is overagg in conf2
a_tid_trial2 = np.array(mlt2.valid_tid_list_trial())
l_cls_all = _sample_partial_cluster(a_tid_trial2, a_tid_answer, n_samples)
l_cluster = []
for tid_trial2, div in l_cls_all:
for tid_answer, a_index, cnt in div:
if tid_answer not in tids:
break
else:
l_cluster.append((tid_trial2, div))
# make sample tpl list to show
s_index_to_show = set()
for _, div in l_cluster:
samples = [a_index_sample for _, _, a_index_sample in div]
s_index_to_show = s_index_to_show | set(np.ravel(samples))
# get templates for the indexes to show
iterobj = mlt2.iter_tpl_trial(pass_none=True, fill_wildcard=True)
d_result = {index: result for index, result in enumerate(iterobj)
if index in s_index_to_show}
# show
for tid_trial2, div in l_cluster:
print("Cluster {0} (in trial)".format(tid_trial2))
iterobj = sorted(div, key=lambda x: x[1], reverse=True)
for tid_answer, cnt_answer, a_index in iterobj:
for index in a_index:
print("ltid {0} ({1}): {2}".format(tid_answer, cnt_answer,
" ".join(d_result[index])))
print("--------------------")
def measure_time_online(conf, targets_train, targets_test, n_trial=None):
if n_trial is None:
n_trial = int(conf["eval"]["n_trial_time"])
d_time = {}
for trial_id in range(n_trial):
table = lt_common.TemplateTable()
ltgen = amulog.manager.init_ltgen_methods(conf, table)
if targets_train is not None:
for pline in amulog.manager.iter_plines(conf, targets_train):
ltgen.process_line(pline)
timer = common.Timer("measure-time-online trial{0}".format(trial_id),
output=None)
timer.start()
for pline in amulog.manager.iter_plines(conf, targets_test):
ltgen.process_line(pline)
timer.stop()
d_time[trial_id] = timer.total_time().total_seconds()
return d_time
def measure_time_offline(conf, targets_test, n_trial=None):
if n_trial is None:
n_trial = int(conf["eval"]["n_trial_time"])
d_time = {}
for trial_id in range(n_trial):
table = lt_common.TemplateTable()
ltgen = amulog.manager.init_ltgen_methods(conf, table)
timer = common.Timer("measure-time-offline trial{0}".format(trial_id),
output=None)
timer.start()
input_lines = list(amulog.manager.iter_plines(conf, targets_test))
d_plines = {mid: pline for mid, pline in enumerate(input_lines)}
ltgen.process_offline(d_plines)
timer.stop()
d_time[trial_id] = timer.total_time().total_seconds()
return d_time
| 37.287513 | 85 | 0.618939 |
36c36975f2269001b719d29b43d9fcb82b4958e5 | 2,101 | py | Python | adjutant_moc/tests/unit/test_actions/test_users.py | CCI-MOC/adjutant-moc | 015de325dced135f56867c2ca8e07814cc950e36 | [
"Apache-2.0"
] | 1 | 2021-01-22T18:21:42.000Z | 2021-01-22T18:21:42.000Z | adjutant_moc/tests/unit/test_actions/test_users.py | CCI-MOC/adjutant-moc | 015de325dced135f56867c2ca8e07814cc950e36 | [
"Apache-2.0"
] | 14 | 2020-05-06T13:39:21.000Z | 2022-02-22T16:27:01.000Z | adjutant_moc/tests/unit/test_actions/test_users.py | CCI-MOC/adjutant-moc | 015de325dced135f56867c2ca8e07814cc950e36 | [
"Apache-2.0"
] | 3 | 2019-01-26T20:10:10.000Z | 2019-11-04T16:39:46.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from adjutant.common.tests import fake_clients
from adjutant_moc.tests import base
from adjutant_moc.actions import users
class UserActionTests(base.TestBase):
token_confirm = {'confirm': True}
def setUp(self) -> None:
super().setUp()
self.projects = [
fake_clients.FakeProject(name=uuid.uuid4().hex),
fake_clients.FakeProject(name=uuid.uuid4().hex),
fake_clients.FakeProject(name=uuid.uuid4().hex)
]
self.users = [
fake_clients.FakeUser(name='user0@example.com'),
fake_clients.FakeUser(name='user1@example.com'),
fake_clients.FakeUser(name='user2@example.com')
]
fake_clients.setup_identity_cache(projects=self.projects,
users=self.users)
def test_invite_user(self):
task = self.new_task(self.users[1])
data = {
'email': self.users[0].name,
'project_id': self.projects[0].id,
'roles': ['member']
}
action = users.MocInviteUserAction(data, task=task, order=1)
action.prepare()
self.assertEqual(action.valid, True)
action.approve()
self.assertEqual(action.valid, True)
token = self.token_confirm.copy()
token['user'] = self.get_headers_for(self.users[0])
action.submit(token)
roles = self.identity._get_roles_as_names(
self.users[0], self.projects[0])
self.assertEqual(sorted(roles), sorted(data['roles']))
| 32.828125 | 74 | 0.646359 |
0d38fb3d24e96f5890b6ba7a75d695fda121be68 | 4,255 | py | Python | AskMe/settings.py | pratikroy/AskMe | 732f84e0d4f215a5232703a6e02ca19e430c27ab | [
"MIT"
] | null | null | null | AskMe/settings.py | pratikroy/AskMe | 732f84e0d4f215a5232703a6e02ca19e430c27ab | [
"MIT"
] | 18 | 2020-03-24T17:39:06.000Z | 2022-03-12T00:01:34.000Z | AskMe/settings.py | pratikroy/AskMe | 732f84e0d4f215a5232703a6e02ca19e430c27ab | [
"MIT"
] | null | null | null | """
Django settings for AskMe project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'elpzdu!hjusrtjbq*cin4u@dyv&^b*p$tf-@p3m6sg&la_fqde'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'rest_framework.authtoken',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_auth',
'rest_auth.registration',
'crispy_forms',
'webpack_loader',
'users',
'questions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AskMe.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AskMe.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGIN_URL = "accounts/login/"
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
# Custom user model
AUTH_USER_MODEL = "users.CustomUser"
# django crispy forms
CRISPY_TEMPLATE_PACK = "bootstrap4"
# django.contrib.sites
SITE_ID = 1
# django-allauth
ACCOUNT_EMAIL_VERIFICATION = "none"
ACCOUNT_EMAIL_REQUIRED = (True)
# DRF default authentication classes
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES':(
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES':(
'rest_framework.permissions.IsAuthenticated',
),
}
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': os.path.join(BASE_DIR, 'frontend', 'webpack-stats.json')
}
}
| 24.176136 | 91 | 0.694712 |
62991c9dc19a2dbf6b032f60b2f0df11986b95b1 | 2,083 | py | Python | sim_xps_spectra/x_sections/unit_tests/utest_standard_objs.py | RFogarty1/sim_xps_spectra | 26933a8b00678494121507e66205cf4c02d9b357 | [
"MIT"
] | null | null | null | sim_xps_spectra/x_sections/unit_tests/utest_standard_objs.py | RFogarty1/sim_xps_spectra | 26933a8b00678494121507e66205cf4c02d9b357 | [
"MIT"
] | null | null | null | sim_xps_spectra/x_sections/unit_tests/utest_standard_objs.py | RFogarty1/sim_xps_spectra | 26933a8b00678494121507e66205cf4c02d9b357 | [
"MIT"
] | null | null | null |
import math
import types
import unittest
import sim_xps_spectra.x_sections.standard_objs as tCode
class TestStandardCrossSectionCalculator(unittest.TestCase):
def setUp(self):
self.testXSectionsA = [(10,20), (20,25), (30,35)]
self.testAsymA = [(10,2), (20,4), (30,6)]
self.xSectionPreFactor = 1
self.createTestObjs()
def createTestObjs(self):
self.databaseA = types.SimpleNamespace( getHvAgainstAOCrossSections=lambda x: self.testXSectionsA,
getHvAgainstAOAsymFactors=lambda x: self.testAsymA )
self.tCalcA = tCode.CrossSectionCalculatorStandard(self.databaseA)
def testExpectedHvReturned(self):
testHvVals = [5,16,26]
expHvVals = [10,20,30]
actHvVals = [self.tCalcA.getHvUsedToCalculateCrossSection("fakeLabel", hv) for hv in testHvVals]
self.assertEqual(expHvVals,actHvVals)
def testAngularIndependentCrossSection(self):
fakeLabel = "S3s"
testHvVals = [5, 16, 26]
expCrossSections = [self.xSectionPreFactor*x for x in [20,25,35]]
actCrossSections = [self.tCalcA.calculateTotalCrossSection(fakeLabel,hv) for hv in testHvVals]
self.assertEqual(expCrossSections,actCrossSections)
def testAngularDependentCrossSection(self):
testHv = 22
testAngle = 50
expOutput = 19.0118066625099
actOutput = self.tCalcA.calculateTotalCrossSection( "fakeLabel", testHv, angle=testAngle )
self.assertAlmostEqual( expOutput, actOutput )
def testAngularDependentLinearPolarisedCrossSection(self):
testHv = 22
testAngle = 50
expOutput = 36.9763866749802
actOutput = self.tCalcA.calculateTotalCrossSection( "fakeLabel", testHv, angle=testAngle, pol="linear" )
self.assertAlmostEqual( expOutput, actOutput )
def testErrorRaisedIfAsymAndXSectionHvDifferent(self):
testHv, testAngle = 20, 20
self.testAsymA = [(1,20)]
self.createTestObjs()
with self.assertRaises(AssertionError):
self.tCalcA.calculateTotalCrossSection("fakelabel", testHv, angle=testAngle)
with self.assertRaises(AssertionError):
self.tCalcA.getHvUsedToCalculateCrossSection("fakelabel", testHv, angle=testAngle)
| 35.913793 | 106 | 0.763802 |
44e1b6d1fe0e428c5bf30a1da05ef9ca7ec795fb | 429 | py | Python | stubs/micropython-v1_9_4-esp8266/neopixel.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_9_4-esp8266/neopixel.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | stubs/micropython-v1_9_4-esp8266/neopixel.py | mattytrentini/micropython-stubs | 4d596273823b69e9e5bcf5fa67f249c374ee0bbc | [
"MIT"
] | null | null | null | """
Module: 'neopixel' on esp8266 v1.9.4
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.2.0-dev(9422289)', version='v1.9.4-8-ga9a3caad0 on 2018-05-11', machine='ESP module with ESP8266')
# Stubber: 1.1.2 - updated
from typing import Any
class NeoPixel:
""""""
ORDER = None
def fill(self, *argv) -> Any:
pass
def write(self, *argv) -> Any:
pass
def neopixel_write():
pass
| 18.652174 | 156 | 0.610723 |
880408d3288af013af40f39f347b0d59080b67c1 | 4,007 | py | Python | mla/tsne.py | Debanitrkl/MLAlgorithms | f53a267897e4d0babdcbae7c271c5042e07549ca | [
"MIT"
] | 2 | 2019-10-15T23:46:01.000Z | 2020-02-23T13:25:43.000Z | mla/tsne.py | Debanitrkl/MLAlgorithms | f53a267897e4d0babdcbae7c271c5042e07549ca | [
"MIT"
] | null | null | null | mla/tsne.py | Debanitrkl/MLAlgorithms | f53a267897e4d0babdcbae7c271c5042e07549ca | [
"MIT"
] | 1 | 2019-10-13T13:36:55.000Z | 2019-10-13T13:36:55.000Z | # coding:utf-8
import logging
import numpy as np
from six.moves import range
from mla.base import BaseEstimator
from mla.metrics.distance import l2_distance
np.random.seed(999)
"""
References:
https://lvdmaaten.github.io/tsne/
Based on:
https://lvdmaaten.github.io/tsne/code/tsne_python.zip
"""
class TSNE(BaseEstimator):
y_required = False
def __init__(self, n_components=2, perplexity=30.0, max_iter=200, learning_rate=500):
"""A t-Distributed Stochastic Neighbor Embedding implementation.
Parameters
----------
max_iter : int, default 200
perplexity : float, default 30.0
n_components : int, default 2
"""
self.max_iter = max_iter
self.perplexity = perplexity
self.n_components = n_components
self.initial_momentum = 0.5
self.final_momentum = 0.8
self.min_gain = 0.01
self.lr = learning_rate
self.tol = 1e-5
self.perplexity_tries = 50
def fit_transform(self, X, y=None):
self._setup_input(X, y)
Y = np.random.randn(self.n_samples, self.n_components)
velocity = np.zeros_like(Y)
gains = np.ones_like(Y)
P = self._get_pairwise_affinities(X)
iter_num = 0
while iter_num < self.max_iter:
iter_num += 1
D = l2_distance(Y)
Q = self._q_distribution(D)
# Normalizer q distribution
Q_n = Q / np.sum(Q)
# Early exaggeration & momentum
pmul = 4.0 if iter_num < 100 else 1.0
momentum = 0.5 if iter_num < 20 else 0.8
# Perform gradient step
grads = np.zeros(Y.shape)
for i in range(self.n_samples):
grad = 4 * np.dot((pmul * P[i] - Q_n[i]) * Q[i], Y[i] - Y)
grads[i] = grad
gains = (gains + 0.2) * ((grads > 0) != (velocity > 0)) + (gains * 0.8) * ((grads > 0) == (velocity > 0))
gains = gains.clip(min=self.min_gain)
velocity = momentum * velocity - self.lr * (gains * grads)
Y += velocity
Y = Y - np.mean(Y, 0)
error = np.sum(P * np.log(P / Q_n))
logging.info("Iteration %s, error %s" % (iter_num, error))
return Y
def _get_pairwise_affinities(self, X):
"""Computes pairwise affinities."""
affines = np.zeros((self.n_samples, self.n_samples), dtype=np.float32)
target_entropy = np.log(self.perplexity)
distances = l2_distance(X)
for i in range(self.n_samples):
affines[i, :] = self._binary_search(distances[i], target_entropy)
# Fill diagonal with near zero value
np.fill_diagonal(affines, 1.0e-12)
affines = affines.clip(min=1e-100)
affines = (affines + affines.T) / (2 * self.n_samples)
return affines
def _binary_search(self, dist, target_entropy):
"""Performs binary search to find suitable precision."""
precision_min = 0
precision_max = 1.0e15
precision = 1.0e5
for _ in range(self.perplexity_tries):
denom = np.sum(np.exp(-dist[dist > 0.0] / precision))
beta = np.exp(-dist / precision) / denom
# Exclude zeros
g_beta = beta[beta > 0.0]
entropy = -np.sum(g_beta * np.log2(g_beta))
error = entropy - target_entropy
if error > 0:
# Decrease precision
precision_max = precision
precision = (precision + precision_min) / 2.0
else:
# Increase precision
precision_min = precision
precision = (precision + precision_max) / 2.0
if np.abs(error) < self.tol:
break
return beta
def _q_distribution(self, D):
"""Computes Student t-distribution."""
Q = 1.0 / (1.0 + D)
np.fill_diagonal(Q, 0.0)
Q = Q.clip(min=1e-100)
return Q
| 29.902985 | 117 | 0.562266 |
468861790832a21703ab770cadb8e5124575908f | 405 | py | Python | globals.py | PeganovAnton/transformer-xl | f36428445cc903872fde54d90bc5e61886420a5a | [
"Apache-2.0"
] | 133 | 2019-04-17T05:06:39.000Z | 2022-03-24T03:43:56.000Z | globals.py | PeganovAnton/transformer-xl | f36428445cc903872fde54d90bc5e61886420a5a | [
"Apache-2.0"
] | 21 | 2019-05-01T03:54:10.000Z | 2021-03-12T07:00:44.000Z | globals.py | PeganovAnton/transformer-xl | f36428445cc903872fde54d90bc5e61886420a5a | [
"Apache-2.0"
] | 18 | 2019-04-28T16:56:06.000Z | 2021-04-01T05:52:41.000Z | # global variables shared between train.py, eval.py, ..., carrying info for a single user invocation-process pair
event_writer = None
token_count = None
args = None
timeit_dict = None
logger = None
corpus = None
va_iter = None
te_iter = None
va_custom_iter = None
tie_projs = None
cutoffs = None
ntokens = None
device = None
state = None # saveable state of optimization (model, optimizer, step, etc)
| 21.315789 | 113 | 0.750617 |
b45adb5511baf2e8648c47bad716c05768969653 | 3,680 | py | Python | main.py | Starrky/SII_files | e6d14b3e2bbd74472a1ebbf31b45f245a06fc329 | [
"MIT"
] | null | null | null | main.py | Starrky/SII_files | e6d14b3e2bbd74472a1ebbf31b45f245a06fc329 | [
"MIT"
] | null | null | null | main.py | Starrky/SII_files | e6d14b3e2bbd74472a1ebbf31b45f245a06fc329 | [
"MIT"
] | null | null | null | import datetime
import os
import time
from os import listdir
from os.path import isfile, join
from time import time
import pymsteams
import smtplib
from email.message import EmailMessage
import pandas as pd
import Configs.Data as CD
# Emailing system
Notificator_card = CD.Notificator_card
EMAIL_USER = CD.EMAIL_USER
EMAIL_PASSWORD = CD.EMAIL_PASSWORD
myTeamsMessage = pymsteams.connectorcard(Notificator_card)
RECIEVER = "myitportal@pepco.eu" # prod mail: myitportal@pepco.eu // test mail: test.support@pepco.eu
start_time = time()
dt = datetime.datetime.today()
today = dt.date()
yesterday = today - datetime.timedelta(days=1)
today = today.strftime('%Y-%m-%d')
yesterday = yesterday.strftime('%Y-%m-%d')
shops = ['240001']
no_file = []
with_file = []
no_connection = []
for shop in shops:
shop_no = shop
filename_1 = f'{shop_no}_{today}'
filename_2 = f'{shop_no}_{yesterday}'
shop = f'ES{shop}BOS01'
shop_loc = f'//{shop}/c$/xstore/spain'
try:
onlyfiles = [f for f in listdir(
shop_loc) if isfile(join(shop_loc, f))]
for file in onlyfiles:
if filename_1 in str(file) or filename_2 in str(file):
if shop_no not in with_file:
if shop_no in no_file:
no_file.remove(shop_no)
with_file.append(shop_no)
else:
if shop_no not in no_file:
no_file.append(shop_no)
except FileNotFoundError:
print(f"Couldn't connect to store: {shop_no}")
no_connection.append(shop_no)
print(f'with file: {with_file}\nno_file: {no_file}')
if len(no_file) != 0:
print("List is not empty, creating tickets")
for store in no_file:
subject = f"Missing SII files for store {store}, {yesterday}"
# Compose and send email
msg = EmailMessage()
msg['Subject'] = subject
msg['From'] = EMAIL_USER
msg['To'] = RECIEVER
html = f"Missing SII files for store {store} for date: {yesterday}"
msg.add_alternative(html, subtype='html')
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_USER, EMAIL_PASSWORD)
smtp.send_message(msg)
print("Email sent")
# Create tables for teams
# Table with file
df = pd.DataFrame(columns=['Store'])
df['Store'] = with_file
teams_table_p = df.to_html(index=False, justify='center')
teams_table = teams_table_p.replace('<tr>', '<tr align="center">')
# Table without file
df_2 = pd.DataFrame(columns=['Store'])
df_2['Store'] = no_file
teams_table_p_2 = df_2.to_html(index=False, justify='center')
teams_table_2 = teams_table_p_2.replace('<tr>', '<tr align="center">')
# Table with no connection
df_3 = pd.DataFrame(columns=['Store'])
df_3['Store'] = no_connection
teams_table_p_3 = df_3.to_html(index=False, justify='center')
teams_table_3 = teams_table_p_3.replace('<tr>', '<tr align="center">')
# Teams bot notification if ::
if len(with_file) != 0:
# Files were found
myTeamsMessage.title(f"SII Files were found for store/s on date: {yesterday}")
myTeamsMessage.text(
f"{teams_table}")
myTeamsMessage.send()
if len(no_file) != 0:
# Files were NOT found
myTeamsMessage.title(f"SII Files were NOT found for store/s on date: {yesterday}")
myTeamsMessage.text(
f"{teams_table_2}")
myTeamsMessage.send()
if len(no_connection) != 0:
# Couldn't connect to machine/s at all
myTeamsMessage.title(f"COULDN'T CONNECT TO MACHINE/S:")
myTeamsMessage.text(
f"{teams_table_3}")
myTeamsMessage.send()
print("Process finished --- %s seconds ---" % (time() - start_time))
| 30.413223 | 102 | 0.661957 |
a1bf16e255f31bae346b485adb0bac81fab5533b | 9,246 | py | Python | sdk/python/pulumi_aws_native/globalaccelerator/listener.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/globalaccelerator/listener.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/globalaccelerator/listener.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ListenerArgs', 'Listener']
@pulumi.input_type
class ListenerArgs:
def __init__(__self__, *,
accelerator_arn: pulumi.Input[str],
port_ranges: pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]],
protocol: pulumi.Input['ListenerProtocol'],
client_affinity: Optional[pulumi.Input['ListenerClientAffinity']] = None):
"""
The set of arguments for constructing a Listener resource.
:param pulumi.Input[str] accelerator_arn: The Amazon Resource Name (ARN) of the accelerator.
:param pulumi.Input['ListenerProtocol'] protocol: The protocol for the listener.
:param pulumi.Input['ListenerClientAffinity'] client_affinity: Client affinity lets you direct all requests from a user to the same endpoint.
"""
pulumi.set(__self__, "accelerator_arn", accelerator_arn)
pulumi.set(__self__, "port_ranges", port_ranges)
pulumi.set(__self__, "protocol", protocol)
if client_affinity is not None:
pulumi.set(__self__, "client_affinity", client_affinity)
@property
@pulumi.getter(name="acceleratorArn")
def accelerator_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of the accelerator.
"""
return pulumi.get(self, "accelerator_arn")
@accelerator_arn.setter
def accelerator_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "accelerator_arn", value)
@property
@pulumi.getter(name="portRanges")
def port_ranges(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]:
return pulumi.get(self, "port_ranges")
@port_ranges.setter
def port_ranges(self, value: pulumi.Input[Sequence[pulumi.Input['ListenerPortRangeArgs']]]):
pulumi.set(self, "port_ranges", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input['ListenerProtocol']:
"""
The protocol for the listener.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input['ListenerProtocol']):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="clientAffinity")
def client_affinity(self) -> Optional[pulumi.Input['ListenerClientAffinity']]:
"""
Client affinity lets you direct all requests from a user to the same endpoint.
"""
return pulumi.get(self, "client_affinity")
@client_affinity.setter
def client_affinity(self, value: Optional[pulumi.Input['ListenerClientAffinity']]):
pulumi.set(self, "client_affinity", value)
class Listener(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_arn: Optional[pulumi.Input[str]] = None,
client_affinity: Optional[pulumi.Input['ListenerClientAffinity']] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input['ListenerProtocol']] = None,
__props__=None):
"""
Resource Type definition for AWS::GlobalAccelerator::Listener
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accelerator_arn: The Amazon Resource Name (ARN) of the accelerator.
:param pulumi.Input['ListenerClientAffinity'] client_affinity: Client affinity lets you direct all requests from a user to the same endpoint.
:param pulumi.Input['ListenerProtocol'] protocol: The protocol for the listener.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ListenerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::GlobalAccelerator::Listener
:param str resource_name: The name of the resource.
:param ListenerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ListenerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_arn: Optional[pulumi.Input[str]] = None,
client_affinity: Optional[pulumi.Input['ListenerClientAffinity']] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input['ListenerProtocol']] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ListenerArgs.__new__(ListenerArgs)
if accelerator_arn is None and not opts.urn:
raise TypeError("Missing required property 'accelerator_arn'")
__props__.__dict__["accelerator_arn"] = accelerator_arn
__props__.__dict__["client_affinity"] = client_affinity
if port_ranges is None and not opts.urn:
raise TypeError("Missing required property 'port_ranges'")
__props__.__dict__["port_ranges"] = port_ranges
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
__props__.__dict__["listener_arn"] = None
super(Listener, __self__).__init__(
'aws-native:globalaccelerator:Listener',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Listener':
"""
Get an existing Listener resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ListenerArgs.__new__(ListenerArgs)
__props__.__dict__["accelerator_arn"] = None
__props__.__dict__["client_affinity"] = None
__props__.__dict__["listener_arn"] = None
__props__.__dict__["port_ranges"] = None
__props__.__dict__["protocol"] = None
return Listener(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="acceleratorArn")
def accelerator_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the accelerator.
"""
return pulumi.get(self, "accelerator_arn")
@property
@pulumi.getter(name="clientAffinity")
def client_affinity(self) -> pulumi.Output[Optional['ListenerClientAffinity']]:
"""
Client affinity lets you direct all requests from a user to the same endpoint.
"""
return pulumi.get(self, "client_affinity")
@property
@pulumi.getter(name="listenerArn")
def listener_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the listener.
"""
return pulumi.get(self, "listener_arn")
@property
@pulumi.getter(name="portRanges")
def port_ranges(self) -> pulumi.Output[Sequence['outputs.ListenerPortRange']]:
return pulumi.get(self, "port_ranges")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output['ListenerProtocol']:
"""
The protocol for the listener.
"""
return pulumi.get(self, "protocol")
| 42.608295 | 149 | 0.653364 |
c0a43018c6ae6bb7b102473adffa55428ce8bb1e | 2,989 | py | Python | tests/test_layouts_grid.py | datdinhquoc/flask_jsondash | 124f5739aebb39c4d36d27a57acb1a32df95a51d | [
"MIT"
] | 3,503 | 2016-08-25T19:57:33.000Z | 2022-03-31T20:04:37.000Z | tests/test_layouts_grid.py | wargrider/flask_jsondash | fd84c5498c933ea4175eca8357155826bdbcf14f | [
"MIT"
] | 203 | 2016-05-06T18:01:12.000Z | 2022-03-23T09:05:28.000Z | tests/test_layouts_grid.py | wargrider/flask_jsondash | fd84c5498c933ea4175eca8357155826bdbcf14f | [
"MIT"
] | 350 | 2016-08-30T10:29:57.000Z | 2022-02-02T17:59:41.000Z | import json
from conftest import setup_dashboard
def test_grid_mode_has_no_cols_empty_single_row(monkeypatch, ctx, client):
app, test = client
data = dict(
mode='grid',
name='Some dashboard',
)
dom = setup_dashboard(monkeypatch, app, test, data)
container = dom.find('#container')
assert len(container.find('.grid-row')) == 0
# Test it has 2 add row buttons - top and bottom
assert len(container.find('.add-new-row-container')) == 2
def test_grid_mode_has_2_rows(monkeypatch, ctx, client):
app, test = client
data = dict(
mode='grid',
name='Some dashboard',
module_foo=json.dumps(
dict(name=1, width=1, height=1, dataSource='...', row=2)
),
module_bar=json.dumps(
dict(name=1, width=1, height=1, dataSource='...', row=1),
),
)
dom = setup_dashboard(monkeypatch, app, test, data)
container = dom.find('#container')
assert len(container.find('.grid-row')) == 2
def test_grid_mode_has_correct_cols(monkeypatch, ctx, client):
app, test = client
data = dict(
mode='grid',
name='Some dashboard',
module_foo=json.dumps(
dict(name=1, width='col-4', height=1, dataSource='...', row=2)
),
module_bar=json.dumps(
dict(name=1, width='col-4', height=1, dataSource='...', row=1),
),
)
dom = setup_dashboard(monkeypatch, app, test, data)
container = dom.find('#container')
assert len(container.find('.grid-row')) == 2
assert len(container.find('.col-md-4')) == 2
def test_grid_mode_correct_multicols_multirows(monkeypatch, ctx, client):
app, test = client
data = dict(
mode='grid',
name='Some dashboard - lots of cols and rows',
module_baz=json.dumps(
dict(name=1, width='col-12', height=1, dataSource='...', row=1)
),
module_foo=json.dumps(
dict(name=1, width='col-5', height=1, dataSource='...', row=2)
),
module_bar=json.dumps(
dict(name=1, width='col-4', height=1, dataSource='...', row=2),
),
module_quux=json.dumps(
dict(name=1, width='col-3', height=1, dataSource='...', row=2),
),
module_quux2=json.dumps(
dict(name=1, width='col-6', height=1, dataSource='...', row=3),
),
module_quux3=json.dumps(
dict(name=1, width='col-6', height=1, dataSource='...', row=3),
),
)
dom = setup_dashboard(monkeypatch, app, test, data)
container = dom.find('#container')
assert len(container.find('.grid-row')) == 3
assert len(container.find('.grid-row').find('.col-md-6')) == 2
assert len(container.find('.grid-row').find('.col-md-12')) == 1
assert len(container.find('.grid-row').find('.col-md-5')) == 1
assert len(container.find('.grid-row').find('.col-md-4')) == 1
assert len(container.find('.grid-row').find('.col-md-3')) == 1
| 34.755814 | 75 | 0.583138 |
3492bf85eda2b4acf42989534fdba1ad1fc3735f | 1,024 | py | Python | src/vectorizer.py | deluxebrain/play-python-sentiment-analysis | d4aaa43e6bf6e6a18d86ed2ac505a0eaffb0f48f | [
"MIT"
] | null | null | null | src/vectorizer.py | deluxebrain/play-python-sentiment-analysis | d4aaa43e6bf6e6a18d86ed2ac505a0eaffb0f48f | [
"MIT"
] | null | null | null | src/vectorizer.py | deluxebrain/play-python-sentiment-analysis | d4aaa43e6bf6e6a18d86ed2ac505a0eaffb0f48f | [
"MIT"
] | null | null | null | from sklearn.feature_extraction.text import HashingVectorizer
from nltk.stem.porter import PorterStemmer
import re
import os
import pickle
work_path = os.path.join(os.path.expanduser('~'), 'tmp/datasets')
stop = pickle.load(open(
os.path.join(work_path,
'pkl_objects',
'stopwords.pkl'),
'rb'))
porter = PorterStemmer()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
def tokenizer(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)',
text.lower())
text = re.sub('[\W]+', ' ', text.lower()) + \
' '.join(emoticons).replace('-', '')
tokenized = [w for w in tokenizer_porter(text) if w not in stop]
return tokenized
vect = HashingVectorizer(decode_error='ignore',
n_features=2**21,
preprocessor=None,
ngram_range=(1, 3),
tokenizer=tokenizer)
| 28.444444 | 68 | 0.554688 |
ec7988d253dc0986f1e55a68adbddbd979001277 | 719 | py | Python | app/core/management/commands/wait_for_db.py | shubhamshinde321/recipe-app-api | 509184d5f3eefb7baf72153f3d3a854b76909d8d | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | shubhamshinde321/recipe-app-api | 509184d5f3eefb7baf72153f3d3a854b76909d8d | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | shubhamshinde321/recipe-app-api | 509184d5f3eefb7baf72153f3d3a854b76909d8d | [
"MIT"
] | null | null | null | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until db is available"""
def handle(self, *args, **options):
"""Handle the command"""
self.stdout.write('Waiting for database')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting for 1 \
seconds')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available'))
| 29.958333 | 72 | 0.628651 |
d051605da543877e8720f94f3f069cb1651347ef | 11,972 | py | Python | bitcoind-monitor.py | juergenhoetzel/bitcoin-prometheus-exporter | c2ee79d3119fc3e130734fc866a78a4d189b2e08 | [
"BSD-3-Clause"
] | null | null | null | bitcoind-monitor.py | juergenhoetzel/bitcoin-prometheus-exporter | c2ee79d3119fc3e130734fc866a78a4d189b2e08 | [
"BSD-3-Clause"
] | null | null | null | bitcoind-monitor.py | juergenhoetzel/bitcoin-prometheus-exporter | c2ee79d3119fc3e130734fc866a78a4d189b2e08 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import logging
import time
import os
import signal
import sys
import socket
from datetime import datetime
from functools import lru_cache
from pathlib import Path
from typing import Any
from typing import Dict
from typing import List
from typing import Union
from urllib.parse import quote
import riprova
from bitcoin.rpc import InWarmupError, Proxy
from prometheus_client import start_http_server, Gauge, Counter
logger = logging.getLogger("bitcoin-exporter")
# Create Prometheus metrics to track bitcoind stats.
BITCOIN_BLOCKS = Gauge("bitcoin_blocks", "Block height")
BITCOIN_DIFFICULTY = Gauge("bitcoin_difficulty", "Difficulty")
BITCOIN_PEERS = Gauge("bitcoin_peers", "Number of peers")
BITCOIN_HASHPS_NEG1 = Gauge(
"bitcoin_hashps_neg1", "Estimated network hash rate per second since the last difficulty change"
)
BITCOIN_HASHPS_1 = Gauge(
"bitcoin_hashps_1", "Estimated network hash rate per second for the last block"
)
BITCOIN_HASHPS = Gauge(
"bitcoin_hashps", "Estimated network hash rate per second for the last 120 blocks"
)
BITCOIN_ESTIMATED_SMART_FEE_GAUGES: Dict[int, Gauge] = {}
BITCOIN_WARNINGS = Counter("bitcoin_warnings", "Number of network or blockchain warnings detected")
BITCOIN_UPTIME = Gauge("bitcoin_uptime", "Number of seconds the Bitcoin daemon has been running")
BITCOIN_MEMINFO_USED = Gauge("bitcoin_meminfo_used", "Number of bytes used")
BITCOIN_MEMINFO_FREE = Gauge("bitcoin_meminfo_free", "Number of bytes available")
BITCOIN_MEMINFO_TOTAL = Gauge("bitcoin_meminfo_total", "Number of bytes managed")
BITCOIN_MEMINFO_LOCKED = Gauge("bitcoin_meminfo_locked", "Number of bytes locked")
BITCOIN_MEMINFO_CHUNKS_USED = Gauge("bitcoin_meminfo_chunks_used", "Number of allocated chunks")
BITCOIN_MEMINFO_CHUNKS_FREE = Gauge("bitcoin_meminfo_chunks_free", "Number of unused chunks")
BITCOIN_MEMPOOL_BYTES = Gauge("bitcoin_mempool_bytes", "Size of mempool in bytes")
BITCOIN_MEMPOOL_SIZE = Gauge(
"bitcoin_mempool_size", "Number of unconfirmed transactions in mempool"
)
BITCOIN_MEMPOOL_USAGE = Gauge("bitcoin_mempool_usage", "Total memory usage for the mempool")
BITCOIN_LATEST_BLOCK_HEIGHT = Gauge(
"bitcoin_latest_block_height", "Height or index of latest block"
)
BITCOIN_LATEST_BLOCK_WEIGHT = Gauge(
"bitcoin_latest_block_weight", "Weight of latest block according to BIP 141"
)
BITCOIN_LATEST_BLOCK_SIZE = Gauge("bitcoin_latest_block_size", "Size of latest block in bytes")
BITCOIN_LATEST_BLOCK_TXS = Gauge(
"bitcoin_latest_block_txs", "Number of transactions in latest block"
)
BITCOIN_NUM_CHAINTIPS = Gauge("bitcoin_num_chaintips", "Number of known blockchain branches")
BITCOIN_TOTAL_BYTES_RECV = Gauge("bitcoin_total_bytes_recv", "Total bytes received")
BITCOIN_TOTAL_BYTES_SENT = Gauge("bitcoin_total_bytes_sent", "Total bytes sent")
BITCOIN_LATEST_BLOCK_INPUTS = Gauge(
"bitcoin_latest_block_inputs", "Number of inputs in transactions of latest block"
)
BITCOIN_LATEST_BLOCK_OUTPUTS = Gauge(
"bitcoin_latest_block_outputs", "Number of outputs in transactions of latest block"
)
BITCOIN_LATEST_BLOCK_VALUE = Gauge(
"bitcoin_latest_block_value", "Bitcoin value of all transactions in the latest block"
)
BITCOIN_BAN_CREATED = Gauge(
"bitcoin_ban_created", "Time the ban was created", labelnames=["address", "reason"]
)
BITCOIN_BANNED_UNTIL = Gauge(
"bitcoin_banned_until", "Time the ban expires", labelnames=["address", "reason"]
)
BITCOIN_SERVER_VERSION = Gauge("bitcoin_server_version", "The server version")
BITCOIN_PROTOCOL_VERSION = Gauge("bitcoin_protocol_version", "The protocol version of the server")
BITCOIN_SIZE_ON_DISK = Gauge("bitcoin_size_on_disk", "Estimated size of the block and undo files")
BITCOIN_VERIFICATION_PROGRESS = Gauge(
"bitcoin_verification_progress", "Estimate of verification progress [0..1]"
)
EXPORTER_ERRORS = Counter(
"bitcoin_exporter_errors", "Number of errors encountered by the exporter", labelnames=["type"]
)
PROCESS_TIME = Counter(
"bitcoin_exporter_process_time", "Time spent processing metrics from bitcoin node"
)
BITCOIN_RPC_SCHEME = os.environ.get("BITCOIN_RPC_SCHEME", "http")
BITCOIN_RPC_HOST = os.environ.get("BITCOIN_RPC_HOST", "localhost")
BITCOIN_RPC_PORT = os.environ.get("BITCOIN_RPC_PORT", "8332")
BITCOIN_RPC_USER = os.environ.get("BITCOIN_RPC_USER")
BITCOIN_RPC_PASSWORD = os.environ.get("BITCOIN_RPC_PASSWORD")
BITCOIN_CONF_PATH = os.environ.get("BITCOIN_CONF_PATH")
SMART_FEES = [int(f) for f in os.environ.get("SMARTFEE_BLOCKS", "2,3,5,20").split(",")]
REFRESH_SECONDS = float(os.environ.get("REFRESH_SECONDS", "300"))
METRICS_PORT = int(os.environ.get("METRICS_PORT", "8334"))
RETRIES = int(os.environ.get("RETRIES", 5))
TIMEOUT = int(os.environ.get("TIMEOUT", 30))
LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO")
RETRY_EXCEPTIONS = (
InWarmupError,
ConnectionError,
socket.timeout
)
RpcResult = Union[Dict[str, Any], List[Any], str, int, float, bool, None]
def on_retry(err: Exception, next_try: float) -> None:
err_type = type(err)
exception_name = err_type.__module__ + "." + err_type.__name__
EXPORTER_ERRORS.labels(**{"type": exception_name}).inc()
logger.error("Retry after exception %s: %s", exception_name, err)
def error_evaluator(e: Exception) -> bool:
return isinstance(e, RETRY_EXCEPTIONS)
def bitcoin_conf_path() -> Path:
if BITCOIN_CONF_PATH is not None:
return Path(BITCOIN_CONF_PATH)
return Path.home() / ".bitcoin" / "bitcoin.conf"
@lru_cache(maxsize=1)
def rpc_client_factory():
bitcoin_conf: Path = bitcoin_conf_path()
if bitcoin_conf.exists():
logger.info("Using config file: %s", bitcoin_conf)
return lambda: Proxy(btc_conf_file=bitcoin_conf, timeout=TIMEOUT)
else:
host = BITCOIN_RPC_HOST
if BITCOIN_RPC_USER and BITCOIN_RPC_PASSWORD:
host = "%s:%s@%s" % (quote(BITCOIN_RPC_USER), quote(BITCOIN_RPC_PASSWORD), host,)
if BITCOIN_RPC_PORT:
host = "%s:%s" % (host, BITCOIN_RPC_PORT)
service_url = "%s://%s" % (BITCOIN_RPC_SCHEME, host)
return lambda: Proxy(service_url=service_url, timeout=TIMEOUT)
def rpc_client():
return rpc_client_factory()()
@riprova.retry(
timeout=TIMEOUT,
backoff=riprova.ExponentialBackOff(),
on_retry=on_retry,
error_evaluator=error_evaluator,
)
def bitcoinrpc(*args) -> RpcResult:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("RPC call: " + " ".join(str(a) for a in args))
result = rpc_client().call(*args)
logger.debug("Result: %s", result)
return result
def get_block(block_hash: str):
try:
block = bitcoinrpc("getblock", block_hash, 2)
except Exception:
logger.exception("Failed to retrieve block " + block_hash + " from bitcoind.")
return None
return block
def smartfee_gauge(num_blocks: int) -> Gauge:
gauge = BITCOIN_ESTIMATED_SMART_FEE_GAUGES.get(num_blocks)
if gauge is None:
gauge = Gauge(
"bitcoin_est_smart_fee_%d" % num_blocks,
"Estimated smart fee per kilobyte for confirmation in %d blocks" % num_blocks,
)
BITCOIN_ESTIMATED_SMART_FEE_GAUGES[num_blocks] = gauge
return gauge
def do_smartfee(num_blocks: int) -> None:
smartfee = bitcoinrpc("estimatesmartfee", num_blocks).get("feerate")
if smartfee is not None:
gauge = smartfee_gauge(num_blocks)
gauge.set(smartfee)
def refresh_metrics() -> None:
uptime = int(bitcoinrpc("uptime"))
meminfo = bitcoinrpc("getmemoryinfo", "stats")["locked"]
blockchaininfo = bitcoinrpc("getblockchaininfo")
networkinfo = bitcoinrpc("getnetworkinfo")
chaintips = len(bitcoinrpc("getchaintips"))
mempool = bitcoinrpc("getmempoolinfo")
nettotals = bitcoinrpc("getnettotals")
latest_block = get_block(str(blockchaininfo["bestblockhash"]))
hashps_120 = float(bitcoinrpc("getnetworkhashps", 120)) # 120 is the default
hashps_neg1 = float(bitcoinrpc("getnetworkhashps", -1))
hashps_1 = float(bitcoinrpc("getnetworkhashps", 1))
banned = bitcoinrpc("listbanned")
BITCOIN_UPTIME.set(uptime)
BITCOIN_BLOCKS.set(blockchaininfo["blocks"])
BITCOIN_PEERS.set(networkinfo["connections"])
BITCOIN_DIFFICULTY.set(blockchaininfo["difficulty"])
BITCOIN_HASHPS.set(hashps_120)
BITCOIN_HASHPS_NEG1.set(hashps_neg1)
BITCOIN_HASHPS_1.set(hashps_1)
BITCOIN_SERVER_VERSION.set(networkinfo["version"])
BITCOIN_PROTOCOL_VERSION.set(networkinfo["protocolversion"])
BITCOIN_SIZE_ON_DISK.set(blockchaininfo["size_on_disk"])
BITCOIN_VERIFICATION_PROGRESS.set(blockchaininfo["verificationprogress"])
for smartfee in SMART_FEES:
do_smartfee(smartfee)
for ban in banned:
BITCOIN_BAN_CREATED.labels(address=ban["address"], reason=ban["ban_reason"]).set(
ban["ban_created"]
)
BITCOIN_BANNED_UNTIL.labels(address=ban["address"], reason=ban["ban_reason"]).set(
ban["banned_until"]
)
if networkinfo["warnings"]:
BITCOIN_WARNINGS.inc()
BITCOIN_NUM_CHAINTIPS.set(chaintips)
BITCOIN_MEMINFO_USED.set(meminfo["used"])
BITCOIN_MEMINFO_FREE.set(meminfo["free"])
BITCOIN_MEMINFO_TOTAL.set(meminfo["total"])
BITCOIN_MEMINFO_LOCKED.set(meminfo["locked"])
BITCOIN_MEMINFO_CHUNKS_USED.set(meminfo["chunks_used"])
BITCOIN_MEMINFO_CHUNKS_FREE.set(meminfo["chunks_free"])
BITCOIN_MEMPOOL_BYTES.set(mempool["bytes"])
BITCOIN_MEMPOOL_SIZE.set(mempool["size"])
BITCOIN_MEMPOOL_USAGE.set(mempool["usage"])
BITCOIN_TOTAL_BYTES_RECV.set(nettotals["totalbytesrecv"])
BITCOIN_TOTAL_BYTES_SENT.set(nettotals["totalbytessent"])
if latest_block is not None:
BITCOIN_LATEST_BLOCK_SIZE.set(latest_block["size"])
BITCOIN_LATEST_BLOCK_TXS.set(latest_block["nTx"])
BITCOIN_LATEST_BLOCK_HEIGHT.set(latest_block["height"])
BITCOIN_LATEST_BLOCK_WEIGHT.set(latest_block["weight"])
inputs, outputs = 0, 0
value = 0
for tx in latest_block["tx"]:
i = len(tx["vin"])
inputs += i
o = len(tx["vout"])
outputs += o
value += sum(o["value"] for o in tx["vout"])
BITCOIN_LATEST_BLOCK_INPUTS.set(inputs)
BITCOIN_LATEST_BLOCK_OUTPUTS.set(outputs)
BITCOIN_LATEST_BLOCK_VALUE.set(value)
def sigterm_handler(signal, frame) -> None:
logger.critical("Received SIGTERM. Exiting.")
sys.exit(0)
def exception_count(e: Exception) -> None:
err_type = type(e)
exception_name = err_type.__module__ + "." + err_type.__name__
EXPORTER_ERRORS.labels(**{"type": exception_name}).inc()
def main():
# Set up logging to look similar to bitcoin logs (UTC).
logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s", datefmt="%Y-%m-%dT%H:%M:%SZ"
)
logging.Formatter.converter = time.gmtime
logger.setLevel(LOG_LEVEL)
# Handle SIGTERM gracefully.
signal.signal(signal.SIGTERM, sigterm_handler)
# Start up the server to expose the metrics.
start_http_server(METRICS_PORT)
while True:
process_start = datetime.now()
# Allow riprova.MaxRetriesExceeded and unknown exceptions to crash the process.
try:
refresh_metrics()
except riprova.exceptions.RetryError as e:
logger.error("Refresh failed during retry. Cause: " + str(e))
exception_count(e)
except json.decoder.JSONDecodeError as e:
logger.error("RPC call did not return JSON. Bad credentials? " + str(e))
sys.exit(1)
duration = datetime.now() - process_start
PROCESS_TIME.inc(duration.total_seconds())
logger.info("Refresh took %s seconds, sleeping for %s seconds", duration, REFRESH_SECONDS)
time.sleep(REFRESH_SECONDS)
if __name__ == "__main__":
main()
| 35.525223 | 100 | 0.724607 |
90c802423ce490e2937114df9dab23fb2a4fbf19 | 1,290 | py | Python | homeassistant/components/websocket_api/__init__.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 3 | 2019-01-31T13:41:37.000Z | 2020-05-20T14:22:18.000Z | homeassistant/components/websocket_api/__init__.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 5 | 2021-02-08T20:32:11.000Z | 2022-01-13T01:19:23.000Z | homeassistant/components/websocket_api/__init__.py | dauden1184/home-assistant | f4c6d389b77d0efa86644e76604eaea5d21abdb5 | [
"Apache-2.0"
] | 3 | 2018-08-29T19:26:20.000Z | 2020-01-19T11:58:22.000Z | """
Websocket based API for Home Assistant.
For more details about this component, please refer to the documentation at
https://developers.home-assistant.io/docs/external_api_websocket.html
"""
from homeassistant.core import callback
from homeassistant.loader import bind_hass
from . import commands, connection, const, decorators, http, messages
DOMAIN = const.DOMAIN
DEPENDENCIES = ('http',)
# Backwards compat / Make it easier to integrate
# pylint: disable=invalid-name
ActiveConnection = connection.ActiveConnection
BASE_COMMAND_MESSAGE_SCHEMA = messages.BASE_COMMAND_MESSAGE_SCHEMA
error_message = messages.error_message
result_message = messages.result_message
async_response = decorators.async_response
require_owner = decorators.require_owner
ws_require_user = decorators.ws_require_user
# pylint: enable=invalid-name
@bind_hass
@callback
def async_register_command(hass, command, handler, schema):
"""Register a websocket command."""
handlers = hass.data.get(DOMAIN)
if handlers is None:
handlers = hass.data[DOMAIN] = {}
handlers[command] = (handler, schema)
async def async_setup(hass, config):
"""Initialize the websocket API."""
hass.http.register_view(http.WebsocketAPIView)
commands.async_register_commands(hass)
return True
| 30 | 75 | 0.784496 |
4fbe430a969ee14ec651dfae6ac84e3fa20bb2c8 | 448 | py | Python | cpdb/data/models/attachment_narrative.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 25 | 2018-07-20T22:31:40.000Z | 2021-07-15T16:58:41.000Z | cpdb/data/models/attachment_narrative.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 13 | 2018-06-18T23:08:47.000Z | 2022-02-10T07:38:25.000Z | cpdb/data/models/attachment_narrative.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 6 | 2018-05-17T21:59:43.000Z | 2020-11-17T00:30:26.000Z | from django.contrib.gis.db import models
from .common import TimeStampsModel
class AttachmentNarrative(TimeStampsModel):
attachment = models.ForeignKey(
'data.AttachmentFile', on_delete=models.CASCADE, related_name='attachment_narratives'
)
page_num = models.IntegerField()
section_name = models.CharField(max_length=255)
column_name = models.CharField(max_length=255)
text_content = models.TextField(blank=True)
| 32 | 93 | 0.767857 |
f43df35c35793a42d247e44556a712199f1ea259 | 1,020 | py | Python | refcollections/admin_custom.py | uq-eresearch/archaeology-reference-collections | 532a8974e1e9f7c2b724e5c6d6b316d0fc93478b | [
"BSD-3-Clause"
] | null | null | null | refcollections/admin_custom.py | uq-eresearch/archaeology-reference-collections | 532a8974e1e9f7c2b724e5c6d6b316d0fc93478b | [
"BSD-3-Clause"
] | 2 | 2017-04-12T23:44:08.000Z | 2017-11-23T23:36:43.000Z | refcollections/admin_custom.py | uq-eresearch/archaeology-reference-collections | 532a8974e1e9f7c2b724e5c6d6b316d0fc93478b | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.admin.sites import AdminSite
from apps.shells.admin import SpeciesAdmin, SpecimenAdmin, SpeciesRepresentationAdmin
from apps.shells.models import Species, Specimen, SpeciesRepresentation
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib import admin
from django.contrib.sites.models import Site
from apps.botanycollection.admin import AccessionAdmin
from apps.botanycollection.models import Accession
refcollections_admin = AdminSite()
refcollections_admin.register(Species, SpeciesAdmin)
refcollections_admin.register(Specimen, SpecimenAdmin)
refcollections_admin.register(SpeciesRepresentation, SpeciesRepresentationAdmin)
refcollections_admin.register(Accession, AccessionAdmin)
######### DEFAULT APPS #############
refcollections_admin.register(User, UserAdmin)
class SiteAdmin(admin.ModelAdmin):
list_display = ('domain', 'name')
search_fields = ('domain', 'name')
refcollections_admin.register(Site, SiteAdmin)
| 30 | 85 | 0.819608 |
5244e8d1abf8c35d164ae30d9673aa7d030207bc | 140 | py | Python | backend/apps/cmdb/apps.py | codelieche/erp | 96861ff63a63a93918fbd5181ffb2646446d0eec | [
"MIT"
] | null | null | null | backend/apps/cmdb/apps.py | codelieche/erp | 96861ff63a63a93918fbd5181ffb2646446d0eec | [
"MIT"
] | 29 | 2020-06-05T19:57:11.000Z | 2022-02-26T13:42:36.000Z | backend/apps/cmdb/apps.py | codelieche/erp | 96861ff63a63a93918fbd5181ffb2646446d0eec | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class CmdbConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'cmdb'
| 20 | 56 | 0.75 |
3be9a79826d1e60a340d4cc6ceb8c88e59e262e2 | 670 | py | Python | plenum/common/ledger_info.py | jandayanan/indy-plenum | 2815e994404c77ad87eddcfd09062d5fe6efc1c5 | [
"Apache-2.0"
] | 148 | 2017-07-11T19:05:25.000Z | 2022-03-16T21:31:20.000Z | plenum/common/ledger_info.py | jandayanan/indy-plenum | 2815e994404c77ad87eddcfd09062d5fe6efc1c5 | [
"Apache-2.0"
] | 561 | 2017-06-29T17:59:56.000Z | 2022-03-09T15:47:14.000Z | plenum/common/ledger_info.py | jandayanan/indy-plenum | 2815e994404c77ad87eddcfd09062d5fe6efc1c5 | [
"Apache-2.0"
] | 378 | 2017-06-29T17:45:27.000Z | 2022-03-26T07:27:59.000Z | from plenum.common.ledger import Ledger
class LedgerInfo:
def __init__(self,
id: int,
ledger: Ledger,
preCatchupStartClbk,
postCatchupCompleteClbk,
postTxnAddedToLedgerClbk,
verifier):
self.id = id
self.ledger = ledger
self.preCatchupStartClbk = preCatchupStartClbk
self.postCatchupCompleteClbk = postCatchupCompleteClbk
self.postTxnAddedToLedgerClbk = postTxnAddedToLedgerClbk
self.verifier = verifier
@property
def ledger_summary(self):
return self.id, len(self.ledger), self.ledger.root_hash
| 27.916667 | 64 | 0.620896 |
06716d49f4e54a619394fd3d8b8dd12afcb40781 | 2,353 | py | Python | pants-plugins/structured/tasks/resolve_packages_task.py | cosmicexplorer/structured | ea452a37e265dd75d4160efa59a4a939bf8c0521 | [
"Apache-2.0"
] | null | null | null | pants-plugins/structured/tasks/resolve_packages_task.py | cosmicexplorer/structured | ea452a37e265dd75d4160efa59a4a939bf8c0521 | [
"Apache-2.0"
] | null | null | null | pants-plugins/structured/tasks/resolve_packages_task.py | cosmicexplorer/structured | ea452a37e265dd75d4160efa59a4a939bf8c0521 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.exceptions import TaskError
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
from structured.subsystems.cran import CRAN, CRANDependency
from structured.subsystems.github import Github, GithubDependency
from structured.tasks.r_task import RTask
class ResolvePackagesTask(RTask):
class ResolveError(TaskError):
"""???"""
@classmethod
def subsystem_dependencies(cls):
return super(ResolvePackagesTask, cls).subsystem_dependencies() + (
CRAN.scoped(cls),
Github.scoped(cls),
)
@memoized_property
def cran(self):
return CRAN.scoped_instance(self)
@memoized_property
def github(self):
return Github.scoped_instance(self)
def resolve_dep(self, dep, outdir):
if isinstance(dep, CRANDependency):
installed_pkgs = self.r_distribution.install_cran_package(
self.cran, self.context, dep, outdir)
elif isinstance(dep, GithubDependency):
installed_pkgs = self.r_distribution.install_github_package(
self.github, self.context, dep, outdir)
else:
raise ResolveError("could not identify type of R dependency: '{}'"
.format(repr(dep)))
return installed_pkgs
def resolve_dep_list(self, r_deps, outdir):
safe_mkdir(outdir)
cur_installed_packages = self.r_distribution.get_installed_packages(
self.context, outdir)
self.context.log.debug("cur_installed_packages: '{}'".format(cur_installed_packages))
for dep in r_deps:
pkg_name = dep.name
if pkg_name in cur_installed_packages:
self.context.log.debug("continuing after '{}'".format(pkg_name))
continue
# TODO: figure out what to do here!
# raise self.ResolveError("package '{}' is already installed in '{}'!"
# .format(pkg_name, outdir))
self.resolve_dep(dep, outdir)
cur_installed_packages = self.r_distribution.get_installed_packages(
self.context, outdir)
self.context.log.debug(
"resolved dep '{}' in '{}'. cur_installed_packages: '{}'".format(
pkg_name, outdir, cur_installed_packages))
return cur_installed_packages
| 34.602941 | 93 | 0.702507 |
d323cd5fa662e1b9732221a3d97056877c751907 | 4,096 | py | Python | rest-service/manager_rest/test/infrastructure/base_list_test.py | Metaswitch/cloudify-manager | 760affb83facbe154c35c6ce20acb9432daa8bbd | [
"Apache-2.0"
] | null | null | null | rest-service/manager_rest/test/infrastructure/base_list_test.py | Metaswitch/cloudify-manager | 760affb83facbe154c35c6ce20acb9432daa8bbd | [
"Apache-2.0"
] | 1 | 2021-03-26T00:32:30.000Z | 2021-03-26T00:32:30.000Z | rest-service/manager_rest/test/infrastructure/base_list_test.py | vbohinc/cloudify-manager | 760affb83facbe154c35c6ce20acb9432daa8bbd | [
"Apache-2.0"
] | 1 | 2019-11-24T12:07:18.000Z | 2019-11-24T12:07:18.000Z | #########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import tempfile
import os
import shutil
from manager_rest.test.base_test import BaseServerTestCase
import wagon
class BaseListTest(BaseServerTestCase):
def _put_deployment_modification(self, deployment_id,
modified_nodes=None,
node_instances=None,
nodes=None):
resource_path = '/deployment-modifications'
data = {'deployment_id': deployment_id,
'modified_nodes': modified_nodes or {},
'node_instances': node_instances or {},
'nodes': nodes or {}}
return self.post(resource_path, data).json
def _mark_deployment_modification_finished(self, modification_id=None):
resource_path = '/deployment-modifications/{0}/finish'.format(
modification_id)
data = {'modification_id': modification_id}
return self.post(resource_path, data).json
def _put_n_deployment_modifications(self, id_prefix,
number_of_modifications,
skip_creation=None):
self._put_n_deployments(id_prefix,
number_of_modifications,
skip_creation=skip_creation,
add_modification=True)
def _put_n_plugins(self, number_of_plugins):
for i in range(0, number_of_plugins):
tmpdir = tempfile.mkdtemp(prefix='test-pagination-')
with open(os.path.join(tmpdir, 'setup.py'), 'w') as f:
f.write('from setuptools import setup\n')
f.write('setup(name="some-package", version={0})'.format(i))
plugin_path = wagon.create(tmpdir, archive_destination_dir=tmpdir)
yaml_path = self.get_full_path('mock_blueprint/plugin.yaml')
zip_path = self.zip_files([plugin_path, yaml_path])
self.post_file('/plugins', zip_path)
shutil.rmtree(tmpdir)
def _put_n_deployments(self, id_prefix,
number_of_deployments,
skip_creation=None,
add_modification=None):
for i in range(0, number_of_deployments):
deployment_id = "{0}{1}_{2}".format(id_prefix, str(i),
'deployment')
blueprint_id = "{0}{1}_{2}".format(id_prefix, str(i), 'blueprint')
if not skip_creation:
self.put_deployment(deployment_id=deployment_id,
blueprint_id=blueprint_id)
if add_modification:
response = self._put_deployment_modification(
deployment_id=deployment_id)
self._mark_deployment_modification_finished(
modification_id=response['id'])
def _put_n_snapshots(self, number_of_snapshots, prefix=None, suffix=None):
prefix = prefix or 'oh-snap'
suffix = suffix or ''
for i in range(number_of_snapshots):
self.client.snapshots.create(
snapshot_id='{0}{1}{2}'.format(prefix, i, suffix),
include_metrics=False,
include_credentials=False
)
def _put_n_secrets(self, number_of_secrets):
for i in range(number_of_secrets):
self.client.secrets.create('test{0}_secret'.format(i), 'value')
| 44.043011 | 78 | 0.600098 |
9c138948112b76952f741b7be90124819eca9c52 | 2,130 | py | Python | segmentation_models_pytorch/utils/functions.py | vfdev-5/segmentation_models.pytorch | 07a0040df57be5ed3a923435aa2912c3fa2e5673 | [
"MIT"
] | 1 | 2019-05-08T02:21:21.000Z | 2019-05-08T02:21:21.000Z | segmentation_models_pytorch/utils/functions.py | vfdev-5/segmentation_models.pytorch | 07a0040df57be5ed3a923435aa2912c3fa2e5673 | [
"MIT"
] | null | null | null | segmentation_models_pytorch/utils/functions.py | vfdev-5/segmentation_models.pytorch | 07a0040df57be5ed3a923435aa2912c3fa2e5673 | [
"MIT"
] | 1 | 2022-01-01T12:01:02.000Z | 2022-01-01T12:01:02.000Z | import torch
def iou(pr, gt, eps=1e-7, threshold=None, activation='sigmoid'):
"""
Source:
https://github.com/catalyst-team/catalyst/
Args:
pr (torch.Tensor): A list of predicted elements
gt (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: IoU (Jaccard) score
"""
if activation is None or activation == "none":
activation_fn = lambda x: x
elif activation == "sigmoid":
activation_fn = torch.nn.Sigmoid()
elif activation == "softmax2d":
activation_fn = torch.nn.Softmax2d()
else:
raise NotImplementedError(
"Activation implemented for sigmoid and softmax2d"
)
pr = activation_fn(pr)
if threshold is not None:
pr = (pr > threshold).float()
intersection = torch.sum(gt * pr)
union = torch.sum(gt) + torch.sum(pr) - intersection + eps
return (intersection + eps) / union
jaccard = iou
def f_score(pr, gt, beta=1, eps=1e-7, threshold=None, activation='sigmoid'):
"""
Args:
pr (torch.Tensor): A list of predicted elements
gt (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
threshold: threshold for outputs binarization
Returns:
float: IoU (Jaccard) score
"""
if activation is None or activation == "none":
activation_fn = lambda x: x
elif activation == "sigmoid":
activation_fn = torch.nn.Sigmoid()
elif activation == "softmax2d":
activation_fn = torch.nn.Softmax2d()
else:
raise NotImplementedError(
"Activation implemented for sigmoid and softmax2d"
)
pr = activation_fn(pr)
if threshold is not None:
pr = (pr > threshold).float()
tp = torch.sum(gt * pr)
fp = torch.sum(pr) - tp
fn = torch.sum(gt) - tp
score = ((1 + beta ** 2) * tp + eps) \
/ ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + eps)
return score
| 28.026316 | 76 | 0.602347 |
91375483e35e00e3d3759dff0e26b9798c9b5b80 | 682 | py | Python | init_repo.py | Serfentum/xcms_finder | dff95fd9e4f9952a6ee365152005ff08b4132210 | [
"MIT"
] | null | null | null | init_repo.py | Serfentum/xcms_finder | dff95fd9e4f9952a6ee365152005ff08b4132210 | [
"MIT"
] | null | null | null | init_repo.py | Serfentum/xcms_finder | dff95fd9e4f9952a6ee365152005ff08b4132210 | [
"MIT"
] | null | null | null | from pathlib import Path
import git
def init_repo(repo_clone_url, path, version):
"""
Clone repo from url to specified path, dir with it will be named as version
:param repo_clone_url: str - url from gihub to clone
:param path: str - path, where dir with repo will be places
:param version: str - future name of repo dir
:return: git.repo.base.Repo, str - repository object and path to the correspondent local repository
"""
# Create path for repo
local_repo = Path(path) / version
local_repo = local_repo.expanduser()
# Initialize repository
repo = git.Repo.clone_from(repo_clone_url, local_repo)
return repo, local_repo
| 32.47619 | 103 | 0.708211 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.