hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7903198180dd839a07fa38f93a9fd6887e061f20
| 11,408
|
py
|
Python
|
setup.py
|
milter001/transformers
|
71846ba7f958f03f816334c8e06e1cd75d17984e
|
[
"Apache-2.0"
] | 1
|
2021-11-28T08:35:10.000Z
|
2021-11-28T08:35:10.000Z
|
setup.py
|
PaulLerner/transformers
|
6d9e11a1939815910e9274cc1109b632cfa84db4
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
PaulLerner/transformers
|
6d9e11a1939815910e9274cc1109b632cfa84db4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py
To create the package for pypi.
1. Change the version in __init__.py, setup.py as well as docs/source/conf.py. Remove the master from the links in
the new models of the README:
(https://huggingface.co/transformers/master/model_doc/ -> https://huggingface.co/transformers/model_doc/)
then run `make fix-copies` to fix the index of the documentation.
2. Unpin specific versions from setup.py that use a git install.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level directory.
(this will build a wheel for the python version you use to build it).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
You may have to specify the repository url, use the following command then:
twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi transformers
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
8. Add the release version to docs/source/_static/js/custom.js and .circleci/deploy.sh
9. Update README.md to redirect to correct documentation.
10. Update the version in __init__.py, setup.py to the new version "-dev" and push to master.
"""
import os
import re
import shutil
from distutils.core import Command
from pathlib import Path
from setuptools import find_packages, setup
# Remove stale transformers.egg-info directory to avoid https://github.com/pypa/pip/issues/5466
stale_egg_info = Path(__file__).parent / "transformers.egg-info"
if stale_egg_info.exists():
print(
(
"Warning: {} exists.\n\n"
"If you recently updated transformers to 3.0 or later, this is expected,\n"
"but it may prevent transformers from installing in editable mode.\n\n"
"This directory is automatically generated by Python's packaging tools.\n"
"I will remove it now.\n\n"
"See https://github.com/pypa/pip/issues/5466 for details.\n"
).format(stale_egg_info)
)
shutil.rmtree(stale_egg_info)
# IMPORTANT:
# 1. all dependencies should be listed here with their version requirements if any
# 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py
_deps = [
"black>=20.8b1",
"cookiecutter==1.7.2",
"dataclasses",
"datasets",
"faiss-cpu",
"fastapi",
"filelock",
"flake8>=3.8.3",
"flax>=0.2.2",
"fugashi>=1.0",
"importlib_metadata",
"ipadic>=1.0.0,<2.0",
"isort>=5.5.4",
"jax>=0.2.8",
"jaxlib>=0.1.59",
"keras2onnx",
"numpy>=1.17",
"onnxconverter-common",
"onnxruntime-tools>=1.4.2",
"onnxruntime>=1.4.0",
"packaging",
"parameterized",
"protobuf",
"psutil",
"pydantic",
"pytest",
"pytest-xdist",
"python>=3.6.0",
"recommonmark",
"regex!=2019.12.17",
"requests",
"sacremoses",
"scikit-learn",
"sentencepiece==0.1.91",
"soundfile",
"sphinx-copybutton",
"sphinx-markdown-tables",
"sphinx-rtd-theme==0.4.3", # sphinx-rtd-theme==0.5.0 introduced big changes in the style.
"sphinx==3.2.1",
"starlette",
"tensorflow-cpu>=2.3",
"tensorflow>=2.3",
"timeout-decorator",
"tokenizers>=0.10.1,<0.11",
"torch>=1.0",
"torchaudio",
"tqdm>=4.27",
"unidic>=1.0.2",
"unidic_lite>=1.0.7",
"uvicorn",
]
# this is a lookup table with items like:
#
# tokenizers: "tokenizers==0.9.4"
# packaging: "packaging"
#
# some of the values are versioned whereas others aren't.
deps = {b: a for a, b in (re.findall(r"^(([^!=<>]+)(?:[!=<>].*)?$)", x)[0] for x in _deps)}
# since we save this data in src/transformers/dependency_versions_table.py it can be easily accessed from
# anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with:
#
# python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets
#
# Just pass the desired package names to that script as it's shown with 2 packages above.
#
# If transformers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above
#
# You can then feed this for example to `pip`:
#
# pip install -U $(python -c 'import sys; from transformers.dependency_versions_table import deps; \
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets)
#
def deps_list(*pkgs):
return [deps[pkg] for pkg in pkgs]
class DepsTableUpdateCommand(Command):
"""
A custom distutils command that updates the dependency table.
usage: python setup.py deps_table_update
"""
description = "build runtime dependency table"
user_options = [
# format: (long option, short option, description).
("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()])
content = [
"# THIS FILE HAS BEEN AUTOGENERATED. To update:",
"# 1. modify the `_deps` dict in setup.py",
"# 2. run `make deps_table_update``",
"deps = {",
entries,
"}",
"",
]
target = "src/transformers/dependency_versions_table.py"
print(f"updating {target}")
with open(target, "w", encoding="utf-8", newline="\n") as f:
f.write("\n".join(content))
extras = {}
extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic")
extras["sklearn"] = deps_list("scikit-learn")
extras["tf"] = deps_list("tensorflow", "onnxconverter-common", "keras2onnx")
extras["tf-cpu"] = deps_list("tensorflow-cpu", "onnxconverter-common", "keras2onnx")
extras["torch"] = deps_list("torch")
if os.name == "nt": # windows
extras["retrieval"] = deps_list("datasets") # faiss is not supported on windows
extras["flax"] = [] # jax is not supported on windows
else:
extras["retrieval"] = deps_list("faiss-cpu", "datasets")
extras["flax"] = deps_list("jax", "jaxlib", "flax")
extras["tokenizers"] = deps_list("tokenizers")
extras["onnxruntime"] = deps_list("onnxruntime", "onnxruntime-tools")
extras["modelcreation"] = deps_list("cookiecutter")
extras["serving"] = deps_list("pydantic", "uvicorn", "fastapi", "starlette")
extras["speech"] = deps_list("soundfile", "torchaudio")
extras["sentencepiece"] = deps_list("sentencepiece", "protobuf")
extras["testing"] = (
deps_list("pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets")
+ extras["retrieval"]
+ extras["modelcreation"]
)
extras["docs"] = deps_list("recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme", "sphinx-copybutton")
extras["quality"] = deps_list("black", "isort", "flake8")
extras["all"] = extras["tf"] + extras["torch"] + extras["flax"] + extras["sentencepiece"] + extras["tokenizers"]
extras["dev"] = (
extras["all"]
+ extras["testing"]
+ extras["quality"]
+ extras["ja"]
+ extras["docs"]
+ extras["sklearn"]
+ extras["modelcreation"]
)
extras["torchhub"] = deps_list(
"filelock",
"importlib_metadata",
"numpy",
"packaging",
"protobuf",
"regex",
"requests",
"sacremoses",
"sentencepiece",
"torch",
"tokenizers",
"tqdm",
)
# when modifying the following list, make sure to update src/transformers/dependency_versions_check.py
install_requires = [
deps["dataclasses"] + ";python_version<'3.7'", # dataclasses for Python versions that don't have it
deps["importlib_metadata"] + ";python_version<'3.8'", # importlib_metadata for Python versions that don't have it
deps["filelock"], # filesystem locks, e.g., to prevent parallel downloads
deps["numpy"],
deps["packaging"], # utilities from PyPA to e.g., compare versions
deps["regex"], # for OpenAI GPT
deps["requests"], # for downloading models over HTTPS
deps["sacremoses"], # for XLM
deps["tokenizers"],
deps["tqdm"], # progress bars in model download and training scripts
]
setup(
name="transformers",
version="4.4.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
author_email="thomas@huggingface.co",
description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU",
license="Apache",
url="https://github.com/huggingface/transformers",
package_dir={"": "src"},
packages=find_packages("src"),
extras_require=extras,
entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]},
python_requires=">=3.6.0",
install_requires=install_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
cmdclass={"deps_table_update": DepsTableUpdateCommand},
)
| 36.33121
| 233
| 0.673562
|
import os
import re
import shutil
from distutils.core import Command
from pathlib import Path
from setuptools import find_packages, setup
stale_egg_info = Path(__file__).parent / "transformers.egg-info"
if stale_egg_info.exists():
print(
(
"Warning: {} exists.\n\n"
"If you recently updated transformers to 3.0 or later, this is expected,\n"
"but it may prevent transformers from installing in editable mode.\n\n"
"This directory is automatically generated by Python's packaging tools.\n"
"I will remove it now.\n\n"
"See https://github.com/pypa/pip/issues/5466 for details.\n"
).format(stale_egg_info)
)
shutil.rmtree(stale_egg_info)
# IMPORTANT:
# 1. all dependencies should be listed here with their version requirements if any
# 2. once modified, run: `make deps_table_update` to update src/transformers/dependency_versions_table.py
_deps = [
"black>=20.8b1",
"cookiecutter==1.7.2",
"dataclasses",
"datasets",
"faiss-cpu",
"fastapi",
"filelock",
"flake8>=3.8.3",
"flax>=0.2.2",
"fugashi>=1.0",
"importlib_metadata",
"ipadic>=1.0.0,<2.0",
"isort>=5.5.4",
"jax>=0.2.8",
"jaxlib>=0.1.59",
"keras2onnx",
"numpy>=1.17",
"onnxconverter-common",
"onnxruntime-tools>=1.4.2",
"onnxruntime>=1.4.0",
"packaging",
"parameterized",
"protobuf",
"psutil",
"pydantic",
"pytest",
"pytest-xdist",
"python>=3.6.0",
"recommonmark",
"regex!=2019.12.17",
"requests",
"sacremoses",
"scikit-learn",
"sentencepiece==0.1.91",
"soundfile",
"sphinx-copybutton",
"sphinx-markdown-tables",
"sphinx-rtd-theme==0.4.3", # sphinx-rtd-theme==0.5.0 introduced big changes in the style.
"sphinx==3.2.1",
"starlette",
"tensorflow-cpu>=2.3",
"tensorflow>=2.3",
"timeout-decorator",
"tokenizers>=0.10.1,<0.11",
"torch>=1.0",
"torchaudio",
"tqdm>=4.27",
"unidic>=1.0.2",
"unidic_lite>=1.0.7",
"uvicorn",
]
# this is a lookup table with items like:
#
# tokenizers: "tokenizers==0.9.4"
# packaging: "packaging"
#
# some of the values are versioned whereas others aren't.
deps = {b: a for a, b in (re.findall(r"^(([^!=<>]+)(?:[!=<>].*)?$)", x)[0] for x in _deps)}
# print(" ".join([ deps[x] for x in sys.argv[1:]]))' tokenizers datasets
#
# If transformers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above
#
# You can then feed this for example to `pip`:
#
# pip install -U $(python -c 'import sys; from transformers.dependency_versions_table import deps; \
#
def deps_list(*pkgs):
return [deps[pkg] for pkg in pkgs]
class DepsTableUpdateCommand(Command):
description = "build runtime dependency table"
user_options = [
# format: (long option, short option, description).
("dep-table-update", None, "updates src/transformers/dependency_versions_table.py"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()])
content = [
"# THIS FILE HAS BEEN AUTOGENERATED. To update:",
"# 1. modify the `_deps` dict in setup.py",
"# 2. run `make deps_table_update``",
"deps = {",
entries,
"}",
"",
]
target = "src/transformers/dependency_versions_table.py"
print(f"updating {target}")
with open(target, "w", encoding="utf-8", newline="\n") as f:
f.write("\n".join(content))
extras = {}
extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic")
extras["sklearn"] = deps_list("scikit-learn")
extras["tf"] = deps_list("tensorflow", "onnxconverter-common", "keras2onnx")
extras["tf-cpu"] = deps_list("tensorflow-cpu", "onnxconverter-common", "keras2onnx")
extras["torch"] = deps_list("torch")
if os.name == "nt": # windows
extras["retrieval"] = deps_list("datasets") # faiss is not supported on windows
extras["flax"] = [] # jax is not supported on windows
else:
extras["retrieval"] = deps_list("faiss-cpu", "datasets")
extras["flax"] = deps_list("jax", "jaxlib", "flax")
extras["tokenizers"] = deps_list("tokenizers")
extras["onnxruntime"] = deps_list("onnxruntime", "onnxruntime-tools")
extras["modelcreation"] = deps_list("cookiecutter")
extras["serving"] = deps_list("pydantic", "uvicorn", "fastapi", "starlette")
extras["speech"] = deps_list("soundfile", "torchaudio")
extras["sentencepiece"] = deps_list("sentencepiece", "protobuf")
extras["testing"] = (
deps_list("pytest", "pytest-xdist", "timeout-decorator", "parameterized", "psutil", "datasets")
+ extras["retrieval"]
+ extras["modelcreation"]
)
extras["docs"] = deps_list("recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme", "sphinx-copybutton")
extras["quality"] = deps_list("black", "isort", "flake8")
extras["all"] = extras["tf"] + extras["torch"] + extras["flax"] + extras["sentencepiece"] + extras["tokenizers"]
extras["dev"] = (
extras["all"]
+ extras["testing"]
+ extras["quality"]
+ extras["ja"]
+ extras["docs"]
+ extras["sklearn"]
+ extras["modelcreation"]
)
extras["torchhub"] = deps_list(
"filelock",
"importlib_metadata",
"numpy",
"packaging",
"protobuf",
"regex",
"requests",
"sacremoses",
"sentencepiece",
"torch",
"tokenizers",
"tqdm",
)
# when modifying the following list, make sure to update src/transformers/dependency_versions_check.py
install_requires = [
deps["dataclasses"] + ";python_version<'3.7'", # dataclasses for Python versions that don't have it
deps["importlib_metadata"] + ";python_version<'3.8'",
deps["filelock"], # filesystem locks, e.g., to prevent parallel downloads
deps["numpy"],
deps["packaging"], # utilities from PyPA to e.g., compare versions
deps["regex"], # for OpenAI GPT
deps["requests"], # for downloading models over HTTPS
deps["sacremoses"], # for XLM
deps["tokenizers"],
deps["tqdm"], # progress bars in model download and training scripts
]
setup(
name="transformers",
version="4.4.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
author_email="thomas@huggingface.co",
description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU",
license="Apache",
url="https://github.com/huggingface/transformers",
package_dir={"": "src"},
packages=find_packages("src"),
extras_require=extras,
entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]},
python_requires=">=3.6.0",
install_requires=install_requires,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
cmdclass={"deps_table_update": DepsTableUpdateCommand},
)
| true
| true
|
790319936ac015db09e45b9eac799b3bdf0b0250
| 11,862
|
py
|
Python
|
python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py
|
laipaang/Paddle
|
d7f35434b761707a8479b75636546a624399369a
|
[
"Apache-2.0"
] | 3
|
2021-06-11T06:48:10.000Z
|
2021-09-02T10:18:06.000Z
|
python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py
|
MaJun-cn/Paddle
|
0ec3a42e9740a5f5066053bb49a923d538eba24a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/dygraph_to_static/test_ptb_lm.py
|
MaJun-cn/Paddle
|
0ec3a42e9740a5f5066053bb49a923d538eba24a
|
[
"Apache-2.0"
] | 4
|
2020-07-27T13:24:03.000Z
|
2020-08-06T08:20:32.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import logging
import time
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.nn import Embedding
from paddle.fluid.optimizer import SGDOptimizer
PRINT_STEP = 20
SEED = 2020
program_translator = ProgramTranslator()
class SimpleLSTMRNN(fluid.Layer):
def __init__(self,
hidden_size,
num_steps,
num_layers=2,
init_scale=0.1,
dropout=None):
super(SimpleLSTMRNN, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
self._init_scale = init_scale
self._dropout = dropout
self._num_steps = num_steps
self.cell_array = []
self.hidden_array = []
self.weight_1_arr = []
self.weight_2_arr = []
self.bias_arr = []
self.mask_array = []
for i in range(self._num_layers):
weight_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 2, self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale))
self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
bias_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.0))
self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))
def forward(self, input_embedding, init_hidden=None, init_cell=None):
cell_array = []
hidden_array = []
for i in range(self._num_layers):
hidden_array.append(init_hidden[i])
cell_array.append(init_cell[i])
res = []
for index in range(self._num_steps):
step_input = input_embedding[:, index, :]
for k in range(self._num_layers):
pre_hidden = hidden_array[k]
pre_cell = cell_array[k]
weight_1 = self.weight_1_arr[k]
bias = self.bias_arr[k]
nn = fluid.layers.concat([step_input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1)
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(
i) * fluid.layers.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)
hidden_array[k] = m
cell_array[k] = c
step_input = m
if self._dropout is not None and self._dropout > 0.0:
step_input = fluid.layers.dropout(
step_input,
dropout_prob=self._dropout,
dropout_implementation='upscale_in_train')
res.append(step_input)
real_res = fluid.layers.concat(res, 1)
real_res = fluid.layers.reshape(
real_res, [-1, self._num_steps, self._hidden_size])
last_hidden = fluid.layers.concat(hidden_array, 1)
last_hidden = fluid.layers.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size])
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(cell_array, 1)
last_cell = fluid.layers.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size])
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
return real_res, last_hidden, last_cell
class PtbModel(fluid.Layer):
def __init__(self,
hidden_size,
vocab_size,
num_layers=2,
num_steps=20,
init_scale=0.1,
dropout=None):
super(PtbModel, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.init_scale = init_scale
self.num_layers = num_layers
self.num_steps = num_steps
self.dropout = dropout
self.simple_lstm_rnn = SimpleLSTMRNN(
hidden_size,
num_steps,
num_layers=num_layers,
init_scale=init_scale,
dropout=dropout)
self.embedding = Embedding(
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False,
param_attr=fluid.ParamAttr(
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale)))
self.softmax_weight = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
def build_once(self, input, label, init_hidden, init_cell):
pass
@declarative
def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size])
init_c = fluid.layers.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size])
x_emb = self.embedding(input)
x_emb = fluid.layers.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size])
if self.dropout is not None and self.dropout > 0.0:
x_emb = fluid.layers.dropout(
x_emb,
dropout_prob=self.dropout,
dropout_implementation='upscale_in_train')
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h,
init_c)
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell
def debug_emb(self):
np.save("emb_grad", self.x_emb.gradient())
def train(place):
num_layers = 1
batch_size = 4
hidden_size = 10
num_steps = 3
init_scale = 0.1
max_epoch = 1
dropout = 0.0
vocab_size = 1000
batch_num = 200
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale,
dropout=dropout)
sgd = SGDOptimizer(
learning_rate=1e-3, parameter_list=ptb_model.parameters())
for epoch_id in range(max_epoch):
total_loss = 0.0
iters = 0.0
total_sample = 0
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
for step_id in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
x_data = x_data.reshape((-1, num_steps, 1))
y_data = y_data.reshape((-1, num_steps, 1))
x = to_variable(x_data)
y = to_variable(y_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
out_loss = dy_loss.numpy()
dy_loss.backward()
sgd.minimize(dy_loss)
ptb_model.clear_gradients()
total_loss += out_loss
iters += num_steps
total_sample += 1
if step_id % PRINT_STEP == 0:
if step_id == 0:
logging.info("epoch %d | step %d, loss %0.3f" % (
epoch_id, step_id, total_loss / total_sample))
avg_batch_time = time.time()
else:
speed = PRINT_STEP / (time.time() - avg_batch_time)
logging.info(
"epoch %d | step %d, loss %0.3f, speed %.3f steps/s"
% (epoch_id, step_id, total_loss / total_sample,
speed))
avg_batch_time = time.time()
return out_loss, last_hidden.numpy(), last_cell.numpy()
def train_dygraph(place):
program_translator.enable(False)
return train(place)
def train_static(place):
program_translator.enable(True)
return train(place)
class TestPtb(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
def test_check_result(self):
loss_1, hidden_1, cell_1 = train_static(self.place)
loss_2, hidden_2, cell_2 = train_dygraph(self.place)
self.assertTrue(
np.allclose(loss_1, loss_2),
msg="static loss: {} \ndygraph loss: {}".format(loss_1, loss_2))
self.assertTrue(
np.allclose(hidden_1, hidden_2),
msg="static hidden: {} \ndygraph acc1: {}".format(hidden_1,
hidden_2))
self.assertTrue(
np.allclose(cell_1, cell_2),
msg="static cell: {} \ndygraph cell: {}".format(cell_1, cell_2))
if __name__ == '__main__':
unittest.main()
| 37.301887
| 80
| 0.581858
|
from __future__ import absolute_import, division, print_function
import logging
import time
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.nn import Embedding
from paddle.fluid.optimizer import SGDOptimizer
PRINT_STEP = 20
SEED = 2020
program_translator = ProgramTranslator()
class SimpleLSTMRNN(fluid.Layer):
def __init__(self,
hidden_size,
num_steps,
num_layers=2,
init_scale=0.1,
dropout=None):
super(SimpleLSTMRNN, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
self._init_scale = init_scale
self._dropout = dropout
self._num_steps = num_steps
self.cell_array = []
self.hidden_array = []
self.weight_1_arr = []
self.weight_2_arr = []
self.bias_arr = []
self.mask_array = []
for i in range(self._num_layers):
weight_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 2, self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale))
self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
bias_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.0))
self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))
def forward(self, input_embedding, init_hidden=None, init_cell=None):
cell_array = []
hidden_array = []
for i in range(self._num_layers):
hidden_array.append(init_hidden[i])
cell_array.append(init_cell[i])
res = []
for index in range(self._num_steps):
step_input = input_embedding[:, index, :]
for k in range(self._num_layers):
pre_hidden = hidden_array[k]
pre_cell = cell_array[k]
weight_1 = self.weight_1_arr[k]
bias = self.bias_arr[k]
nn = fluid.layers.concat([step_input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1)
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(
i) * fluid.layers.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)
hidden_array[k] = m
cell_array[k] = c
step_input = m
if self._dropout is not None and self._dropout > 0.0:
step_input = fluid.layers.dropout(
step_input,
dropout_prob=self._dropout,
dropout_implementation='upscale_in_train')
res.append(step_input)
real_res = fluid.layers.concat(res, 1)
real_res = fluid.layers.reshape(
real_res, [-1, self._num_steps, self._hidden_size])
last_hidden = fluid.layers.concat(hidden_array, 1)
last_hidden = fluid.layers.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size])
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(cell_array, 1)
last_cell = fluid.layers.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size])
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
return real_res, last_hidden, last_cell
class PtbModel(fluid.Layer):
def __init__(self,
hidden_size,
vocab_size,
num_layers=2,
num_steps=20,
init_scale=0.1,
dropout=None):
super(PtbModel, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.init_scale = init_scale
self.num_layers = num_layers
self.num_steps = num_steps
self.dropout = dropout
self.simple_lstm_rnn = SimpleLSTMRNN(
hidden_size,
num_steps,
num_layers=num_layers,
init_scale=init_scale,
dropout=dropout)
self.embedding = Embedding(
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False,
param_attr=fluid.ParamAttr(
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale)))
self.softmax_weight = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
def build_once(self, input, label, init_hidden, init_cell):
pass
@declarative
def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size])
init_c = fluid.layers.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size])
x_emb = self.embedding(input)
x_emb = fluid.layers.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size])
if self.dropout is not None and self.dropout > 0.0:
x_emb = fluid.layers.dropout(
x_emb,
dropout_prob=self.dropout,
dropout_implementation='upscale_in_train')
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h,
init_c)
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell
def debug_emb(self):
np.save("emb_grad", self.x_emb.gradient())
def train(place):
num_layers = 1
batch_size = 4
hidden_size = 10
num_steps = 3
init_scale = 0.1
max_epoch = 1
dropout = 0.0
vocab_size = 1000
batch_num = 200
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale,
dropout=dropout)
sgd = SGDOptimizer(
learning_rate=1e-3, parameter_list=ptb_model.parameters())
for epoch_id in range(max_epoch):
total_loss = 0.0
iters = 0.0
total_sample = 0
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
for step_id in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
x_data = x_data.reshape((-1, num_steps, 1))
y_data = y_data.reshape((-1, num_steps, 1))
x = to_variable(x_data)
y = to_variable(y_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
out_loss = dy_loss.numpy()
dy_loss.backward()
sgd.minimize(dy_loss)
ptb_model.clear_gradients()
total_loss += out_loss
iters += num_steps
total_sample += 1
if step_id % PRINT_STEP == 0:
if step_id == 0:
logging.info("epoch %d | step %d, loss %0.3f" % (
epoch_id, step_id, total_loss / total_sample))
avg_batch_time = time.time()
else:
speed = PRINT_STEP / (time.time() - avg_batch_time)
logging.info(
"epoch %d | step %d, loss %0.3f, speed %.3f steps/s"
% (epoch_id, step_id, total_loss / total_sample,
speed))
avg_batch_time = time.time()
return out_loss, last_hidden.numpy(), last_cell.numpy()
def train_dygraph(place):
program_translator.enable(False)
return train(place)
def train_static(place):
program_translator.enable(True)
return train(place)
class TestPtb(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
def test_check_result(self):
loss_1, hidden_1, cell_1 = train_static(self.place)
loss_2, hidden_2, cell_2 = train_dygraph(self.place)
self.assertTrue(
np.allclose(loss_1, loss_2),
msg="static loss: {} \ndygraph loss: {}".format(loss_1, loss_2))
self.assertTrue(
np.allclose(hidden_1, hidden_2),
msg="static hidden: {} \ndygraph acc1: {}".format(hidden_1,
hidden_2))
self.assertTrue(
np.allclose(cell_1, cell_2),
msg="static cell: {} \ndygraph cell: {}".format(cell_1, cell_2))
if __name__ == '__main__':
unittest.main()
| true
| true
|
79031ab3c19a36d606422ffe661ef5b98ac8f980
| 1,010
|
py
|
Python
|
starthinker/util/salesforce/quickstart.py
|
quan/starthinker
|
4e392415d77affd4a3d91165d1141ab38efd3b8b
|
[
"Apache-2.0"
] | null | null | null |
starthinker/util/salesforce/quickstart.py
|
quan/starthinker
|
4e392415d77affd4a3d91165d1141ab38efd3b8b
|
[
"Apache-2.0"
] | null | null | null |
starthinker/util/salesforce/quickstart.py
|
quan/starthinker
|
4e392415d77affd4a3d91165d1141ab38efd3b8b
|
[
"Apache-2.0"
] | null | null | null |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from starthinker.util.project import project
from starthinker.util.salesforce import get_service
if __name__ == '__main__':
project.from_commandline('setup')
service = get_service()
print('Credentials Ready: %s' % project.recipe['setup']['auth']['salesforce'])
| 38.846154
| 80
| 0.635644
| true
| true
|
|
79031abb303eeb168d31b045754650487be818de
| 12,715
|
py
|
Python
|
light9/curvecalc/curve.py
|
drewp/light9
|
ab173a40d095051546e532962f7a33ac502943a6
|
[
"MIT"
] | 2
|
2018-10-05T13:32:46.000Z
|
2022-01-01T22:51:20.000Z
|
light9/curvecalc/curve.py
|
drewp/light9
|
ab173a40d095051546e532962f7a33ac502943a6
|
[
"MIT"
] | 4
|
2021-06-08T19:33:40.000Z
|
2022-03-11T23:18:06.000Z
|
light9/curvecalc/curve.py
|
drewp/light9
|
ab173a40d095051546e532962f7a33ac502943a6
|
[
"MIT"
] | null | null | null |
import logging, ast, os
from bisect import bisect_left, bisect
import louie as dispatcher
from twisted.internet import reactor
from rdflib import Literal
from light9 import showconfig
from light9.namespaces import L9, RDF, RDFS
from rdfdb.patch import Patch
log = logging.getLogger()
# todo: move to config, consolidate with ascoltami, musicPad, etc
introPad = 4
postPad = 4
class Curve(object):
"""curve does not know its name. see Curveset"""
def __init__(self, uri, pointsStorage='graph'):
self.uri = uri
self.pointsStorage = pointsStorage
self.points = [] # x-sorted list of (x,y)
self._muted = False
def __repr__(self):
return "<%s %s (%s points)>" % (self.__class__.__name__, self.uri,
len(self.points))
def muted():
doc = "Whether to currently send levels (boolean, obviously)"
def fget(self):
return self._muted
def fset(self, val):
self._muted = val
dispatcher.send('mute changed', sender=self)
return locals()
muted = property(**muted())
def toggleMute(self):
self.muted = not self.muted
def load(self, filename):
self.points[:] = []
for line in open(filename):
x, y = line.split()
self.points.append((float(x), ast.literal_eval(y)))
self.points.sort()
dispatcher.send("points changed", sender=self)
def set_from_string(self, pts):
self.points[:] = []
vals = pts.split()
pairs = list(zip(vals[0::2], vals[1::2]))
for x, y in pairs:
self.points.append((float(x), ast.literal_eval(y)))
self.points.sort()
dispatcher.send("points changed", sender=self)
def points_as_string(self):
def outVal(x):
if isinstance(x, str): # markers
return x
return "%.4g" % x
return ' '.join(
"%s %s" % (outVal(p[0]), outVal(p[1])) for p in self.points)
def save(self, filename):
# this is just around for markers, now
if filename.endswith('-music') or filename.endswith('_music'):
print("not saving music track")
return
f = open(filename, 'w')
for p in self.points:
f.write("%s %r\n" % p)
f.close()
def eval(self, t, allow_muting=True):
if self.muted and allow_muting:
return 0
if not self.points:
raise ValueError("curve has no points")
i = bisect_left(self.points, (t, None)) - 1
if i == -1:
return self.points[0][1]
if self.points[i][0] > t:
return self.points[i][1]
if i >= len(self.points) - 1:
return self.points[i][1]
p1, p2 = self.points[i], self.points[i + 1]
frac = (t - p1[0]) / (p2[0] - p1[0])
y = p1[1] + (p2[1] - p1[1]) * frac
return y
__call__ = eval
def insert_pt(self, new_pt):
"""returns index of new point"""
i = bisect(self.points, (new_pt[0], None))
self.points.insert(i, new_pt)
# missing a check that this isn't the same X as the neighbor point
dispatcher.send("points changed", sender=self)
return i
def live_input_point(self, new_pt, clear_ahead_secs=.01):
x, y = new_pt
exist = self.points_between(x, x + clear_ahead_secs)
for pt in exist:
self.remove_point(pt)
self.insert_pt(new_pt)
dispatcher.send("points changed", sender=self)
# now simplify to the left
def set_points(self, updates):
for i, pt in updates:
self.points[i] = pt
# this should be on, but live_input_point made it fail a
# lot. need a new solution.
#self.checkOverlap()
dispatcher.send("points changed", sender=self)
def checkOverlap(self):
x = None
for p in self.points:
if p[0] <= x:
raise ValueError("overlapping points")
x = p[0]
def pop_point(self, i):
p = self.points.pop(i)
dispatcher.send("points changed", sender=self)
return p
def remove_point(self, pt):
self.points.remove(pt)
dispatcher.send("points changed", sender=self)
def indices_between(self, x1, x2, beyond=0):
leftidx = max(0, bisect(self.points, (x1, None)) - beyond)
rightidx = min(len(self.points),
bisect(self.points, (x2, None)) + beyond)
return list(range(leftidx, rightidx))
def points_between(self, x1, x2):
"""returns (x,y) points"""
return [self.points[i] for i in self.indices_between(x1, x2)]
def point_before(self, x):
"""(x,y) of the point left of x, or None"""
leftidx = self.index_before(x)
if leftidx is None:
return None
return self.points[leftidx]
def index_before(self, x):
leftidx = bisect(self.points, (x, None)) - 1
if leftidx < 0:
return None
return leftidx
class CurveResource(object):
"""
holds a Curve, deals with graphs
"""
def __init__(self, graph, uri):
# probably newCurve and loadCurve should be the constructors instead.
self.graph, self.uri = graph, uri
def curvePointsContext(self):
return self.uri
def newCurve(self, ctx, label):
"""
Save type/label for a new :Curve resource.
Pass the ctx where the main curve data (not the points) will go.
"""
if hasattr(self, 'curve'):
raise ValueError('CurveResource already has a curve %r' %
self.curve)
self.graph.patch(
Patch(addQuads=[
(self.uri, RDF.type, L9['Curve'], ctx),
(self.uri, RDFS.label, label, ctx),
]))
self.curve = Curve(self.uri)
self.curve.points.extend([(0, 0)])
self.saveCurve()
self.watchCurvePointChanges()
def loadCurve(self):
if hasattr(self, 'curve'):
raise ValueError('CurveResource already has a curve %r' %
self.curve)
pointsFile = self.graph.value(self.uri, L9['pointsFile'])
self.curve = Curve(self.uri,
pointsStorage='file' if pointsFile else 'graph')
if hasattr(self.graph, 'addHandler'):
self.graph.addHandler(self.pointsFromGraph)
else:
# given a currentState graph
self.pointsFromGraph()
def pointsFromGraph(self):
pts = self.graph.value(self.uri, L9['points'])
if pts is not None:
self.curve.set_from_string(pts)
else:
diskPts = self.graph.value(self.uri, L9['pointsFile'])
if diskPts is not None:
self.curve.load(os.path.join(showconfig.curvesDir(), diskPts))
else:
log.warn("curve %s has no points", self.uri)
self.watchCurvePointChanges()
def saveCurve(self):
self.pendingSave = None
for p in self.getSavePatches():
self.graph.patch(p)
def getSavePatches(self):
if self.curve.pointsStorage == 'file':
log.warn("not saving file curves anymore- skipping %s" % self.uri)
#cur.save("%s-%s" % (basename,name))
return []
elif self.curve.pointsStorage == 'graph':
return [
self.graph.getObjectPatch(self.curvePointsContext(),
subject=self.uri,
predicate=L9['points'],
newObject=Literal(
self.curve.points_as_string()))
]
else:
raise NotImplementedError(self.curve.pointsStorage)
def watchCurvePointChanges(self):
"""start watching and saving changes to the graph"""
dispatcher.connect(self.onChange, 'points changed', sender=self.curve)
def onChange(self):
# Don't write a patch for the edited curve points until they've been
# stable for this long. This can be very short, since it's just to
# stop a 100-point edit from sending many updates. If it's too long,
# you won't see output lights change while you drag a point. Todo:
# this is just the wrong timing algorithm- it should be a max rate,
# not a max-hold-still-time.
HOLD_POINTS_GRAPH_COMMIT_SECS = .1
if getattr(self, 'pendingSave', None):
self.pendingSave.cancel()
self.pendingSave = reactor.callLater(HOLD_POINTS_GRAPH_COMMIT_SECS,
self.saveCurve)
class Markers(Curve):
"""Marker is like a point but the y value is a string"""
def eval(self):
raise NotImplementedError()
def slope(p1, p2):
if p2[0] == p1[0]:
return 0
return (p2[1] - p1[1]) / (p2[0] - p1[0])
class Curveset(object):
def __init__(self, graph, session):
self.graph, self.session = graph, session
self.currentSong = None
self.curveResources = {} # uri : CurveResource
self.markers = Markers(uri=None, pointsStorage='file')
graph.addHandler(self.loadCurvesForSong)
def curveFromUri(self, uri):
return self.curveResources[uri].curve
def loadCurvesForSong(self):
"""
current curves will track song's curves.
This fires 'add_curve' dispatcher events to announce the new curves.
"""
log.info('loadCurvesForSong')
dispatcher.send("clear_curves")
self.curveResources.clear()
self.markers = Markers(uri=None, pointsStorage='file')
self.currentSong = self.graph.value(self.session, L9['currentSong'])
if self.currentSong is None:
return
for uri in sorted(self.graph.objects(self.currentSong, L9['curve'])):
try:
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.loadCurve()
curvename = self.graph.label(uri)
if not curvename:
raise ValueError("curve %r has no label" % uri)
dispatcher.send("add_curve",
sender=self,
uri=uri,
label=curvename,
curve=cr.curve)
except Exception as e:
log.error("loading %s failed: %s", uri, e)
basename = os.path.join(
showconfig.curvesDir(),
showconfig.songFilenameFromURI(self.currentSong))
try:
self.markers.load("%s.markers" % basename)
except IOError:
print("no marker file found")
def save(self):
"""writes a file for each curve with a name
like basename-curvename, or saves them to the rdf graph"""
basename = os.path.join(
showconfig.curvesDir(),
showconfig.songFilenameFromURI(self.currentSong))
patches = []
for cr in list(self.curveResources.values()):
patches.extend(cr.getSavePatches())
self.markers.save("%s.markers" % basename)
# this will cause reloads that will rebuild our curve list
for p in patches:
self.graph.patch(p)
def sorter(self, name):
return self.curves[name].uri
def curveUrisInOrder(self):
return sorted(self.curveResources.keys())
def currentCurves(self):
# deprecated
for uri, cr in sorted(self.curveResources.items()):
with self.graph.currentState(tripleFilter=(uri, RDFS['label'],
None)) as g:
yield uri, g.label(uri), cr.curve
def globalsdict(self):
raise NotImplementedError('subterm used to get a dict of name:curve')
def get_time_range(self):
return 0, dispatcher.send("get max time")[0][1]
def new_curve(self, name):
if isinstance(name, Literal):
name = str(name)
uri = self.graph.sequentialUri(self.currentSong + '/curve-')
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.newCurve(ctx=self.currentSong, label=Literal(name))
s, e = self.get_time_range()
cr.curve.points.extend([(s, 0), (e, 0)])
ctx = self.currentSong
self.graph.patch(
Patch(addQuads=[
(self.currentSong, L9['curve'], uri, ctx),
]))
cr.saveCurve()
| 33.025974
| 78
| 0.562407
|
import logging, ast, os
from bisect import bisect_left, bisect
import louie as dispatcher
from twisted.internet import reactor
from rdflib import Literal
from light9 import showconfig
from light9.namespaces import L9, RDF, RDFS
from rdfdb.patch import Patch
log = logging.getLogger()
introPad = 4
postPad = 4
class Curve(object):
def __init__(self, uri, pointsStorage='graph'):
self.uri = uri
self.pointsStorage = pointsStorage
self.points = []
self._muted = False
def __repr__(self):
return "<%s %s (%s points)>" % (self.__class__.__name__, self.uri,
len(self.points))
def muted():
doc = "Whether to currently send levels (boolean, obviously)"
def fget(self):
return self._muted
def fset(self, val):
self._muted = val
dispatcher.send('mute changed', sender=self)
return locals()
muted = property(**muted())
def toggleMute(self):
self.muted = not self.muted
def load(self, filename):
self.points[:] = []
for line in open(filename):
x, y = line.split()
self.points.append((float(x), ast.literal_eval(y)))
self.points.sort()
dispatcher.send("points changed", sender=self)
def set_from_string(self, pts):
self.points[:] = []
vals = pts.split()
pairs = list(zip(vals[0::2], vals[1::2]))
for x, y in pairs:
self.points.append((float(x), ast.literal_eval(y)))
self.points.sort()
dispatcher.send("points changed", sender=self)
def points_as_string(self):
def outVal(x):
if isinstance(x, str):
return x
return "%.4g" % x
return ' '.join(
"%s %s" % (outVal(p[0]), outVal(p[1])) for p in self.points)
def save(self, filename):
if filename.endswith('-music') or filename.endswith('_music'):
print("not saving music track")
return
f = open(filename, 'w')
for p in self.points:
f.write("%s %r\n" % p)
f.close()
def eval(self, t, allow_muting=True):
if self.muted and allow_muting:
return 0
if not self.points:
raise ValueError("curve has no points")
i = bisect_left(self.points, (t, None)) - 1
if i == -1:
return self.points[0][1]
if self.points[i][0] > t:
return self.points[i][1]
if i >= len(self.points) - 1:
return self.points[i][1]
p1, p2 = self.points[i], self.points[i + 1]
frac = (t - p1[0]) / (p2[0] - p1[0])
y = p1[1] + (p2[1] - p1[1]) * frac
return y
__call__ = eval
def insert_pt(self, new_pt):
i = bisect(self.points, (new_pt[0], None))
self.points.insert(i, new_pt)
dispatcher.send("points changed", sender=self)
return i
def live_input_point(self, new_pt, clear_ahead_secs=.01):
x, y = new_pt
exist = self.points_between(x, x + clear_ahead_secs)
for pt in exist:
self.remove_point(pt)
self.insert_pt(new_pt)
dispatcher.send("points changed", sender=self)
# now simplify to the left
def set_points(self, updates):
for i, pt in updates:
self.points[i] = pt
# this should be on, but live_input_point made it fail a
# lot. need a new solution.
#self.checkOverlap()
dispatcher.send("points changed", sender=self)
def checkOverlap(self):
x = None
for p in self.points:
if p[0] <= x:
raise ValueError("overlapping points")
x = p[0]
def pop_point(self, i):
p = self.points.pop(i)
dispatcher.send("points changed", sender=self)
return p
def remove_point(self, pt):
self.points.remove(pt)
dispatcher.send("points changed", sender=self)
def indices_between(self, x1, x2, beyond=0):
leftidx = max(0, bisect(self.points, (x1, None)) - beyond)
rightidx = min(len(self.points),
bisect(self.points, (x2, None)) + beyond)
return list(range(leftidx, rightidx))
def points_between(self, x1, x2):
return [self.points[i] for i in self.indices_between(x1, x2)]
def point_before(self, x):
leftidx = self.index_before(x)
if leftidx is None:
return None
return self.points[leftidx]
def index_before(self, x):
leftidx = bisect(self.points, (x, None)) - 1
if leftidx < 0:
return None
return leftidx
class CurveResource(object):
def __init__(self, graph, uri):
# probably newCurve and loadCurve should be the constructors instead.
self.graph, self.uri = graph, uri
def curvePointsContext(self):
return self.uri
def newCurve(self, ctx, label):
if hasattr(self, 'curve'):
raise ValueError('CurveResource already has a curve %r' %
self.curve)
self.graph.patch(
Patch(addQuads=[
(self.uri, RDF.type, L9['Curve'], ctx),
(self.uri, RDFS.label, label, ctx),
]))
self.curve = Curve(self.uri)
self.curve.points.extend([(0, 0)])
self.saveCurve()
self.watchCurvePointChanges()
def loadCurve(self):
if hasattr(self, 'curve'):
raise ValueError('CurveResource already has a curve %r' %
self.curve)
pointsFile = self.graph.value(self.uri, L9['pointsFile'])
self.curve = Curve(self.uri,
pointsStorage='file' if pointsFile else 'graph')
if hasattr(self.graph, 'addHandler'):
self.graph.addHandler(self.pointsFromGraph)
else:
# given a currentState graph
self.pointsFromGraph()
def pointsFromGraph(self):
pts = self.graph.value(self.uri, L9['points'])
if pts is not None:
self.curve.set_from_string(pts)
else:
diskPts = self.graph.value(self.uri, L9['pointsFile'])
if diskPts is not None:
self.curve.load(os.path.join(showconfig.curvesDir(), diskPts))
else:
log.warn("curve %s has no points", self.uri)
self.watchCurvePointChanges()
def saveCurve(self):
self.pendingSave = None
for p in self.getSavePatches():
self.graph.patch(p)
def getSavePatches(self):
if self.curve.pointsStorage == 'file':
log.warn("not saving file curves anymore- skipping %s" % self.uri)
#cur.save("%s-%s" % (basename,name))
return []
elif self.curve.pointsStorage == 'graph':
return [
self.graph.getObjectPatch(self.curvePointsContext(),
subject=self.uri,
predicate=L9['points'],
newObject=Literal(
self.curve.points_as_string()))
]
else:
raise NotImplementedError(self.curve.pointsStorage)
def watchCurvePointChanges(self):
dispatcher.connect(self.onChange, 'points changed', sender=self.curve)
def onChange(self):
# Don't write a patch for the edited curve points until they've been
# stable for this long. This can be very short, since it's just to
# you won't see output lights change while you drag a point. Todo:
HOLD_POINTS_GRAPH_COMMIT_SECS = .1
if getattr(self, 'pendingSave', None):
self.pendingSave.cancel()
self.pendingSave = reactor.callLater(HOLD_POINTS_GRAPH_COMMIT_SECS,
self.saveCurve)
class Markers(Curve):
def eval(self):
raise NotImplementedError()
def slope(p1, p2):
if p2[0] == p1[0]:
return 0
return (p2[1] - p1[1]) / (p2[0] - p1[0])
class Curveset(object):
def __init__(self, graph, session):
self.graph, self.session = graph, session
self.currentSong = None
self.curveResources = {}
self.markers = Markers(uri=None, pointsStorage='file')
graph.addHandler(self.loadCurvesForSong)
def curveFromUri(self, uri):
return self.curveResources[uri].curve
def loadCurvesForSong(self):
log.info('loadCurvesForSong')
dispatcher.send("clear_curves")
self.curveResources.clear()
self.markers = Markers(uri=None, pointsStorage='file')
self.currentSong = self.graph.value(self.session, L9['currentSong'])
if self.currentSong is None:
return
for uri in sorted(self.graph.objects(self.currentSong, L9['curve'])):
try:
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.loadCurve()
curvename = self.graph.label(uri)
if not curvename:
raise ValueError("curve %r has no label" % uri)
dispatcher.send("add_curve",
sender=self,
uri=uri,
label=curvename,
curve=cr.curve)
except Exception as e:
log.error("loading %s failed: %s", uri, e)
basename = os.path.join(
showconfig.curvesDir(),
showconfig.songFilenameFromURI(self.currentSong))
try:
self.markers.load("%s.markers" % basename)
except IOError:
print("no marker file found")
def save(self):
basename = os.path.join(
showconfig.curvesDir(),
showconfig.songFilenameFromURI(self.currentSong))
patches = []
for cr in list(self.curveResources.values()):
patches.extend(cr.getSavePatches())
self.markers.save("%s.markers" % basename)
for p in patches:
self.graph.patch(p)
def sorter(self, name):
return self.curves[name].uri
def curveUrisInOrder(self):
return sorted(self.curveResources.keys())
def currentCurves(self):
for uri, cr in sorted(self.curveResources.items()):
with self.graph.currentState(tripleFilter=(uri, RDFS['label'],
None)) as g:
yield uri, g.label(uri), cr.curve
def globalsdict(self):
raise NotImplementedError('subterm used to get a dict of name:curve')
def get_time_range(self):
return 0, dispatcher.send("get max time")[0][1]
def new_curve(self, name):
if isinstance(name, Literal):
name = str(name)
uri = self.graph.sequentialUri(self.currentSong + '/curve-')
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.newCurve(ctx=self.currentSong, label=Literal(name))
s, e = self.get_time_range()
cr.curve.points.extend([(s, 0), (e, 0)])
ctx = self.currentSong
self.graph.patch(
Patch(addQuads=[
(self.currentSong, L9['curve'], uri, ctx),
]))
cr.saveCurve()
| true
| true
|
79031b38db87921226711c90569e81f01ec8472c
| 23,712
|
py
|
Python
|
nesta/packages/geo_utils/tests/test_geotools.py
|
anniyanvr/nesta
|
4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3
|
[
"MIT"
] | 13
|
2019-06-18T16:53:53.000Z
|
2021-03-04T10:58:52.000Z
|
nesta/packages/geo_utils/tests/test_geotools.py
|
nestauk/old_nesta_daps
|
4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3
|
[
"MIT"
] | 208
|
2018-08-10T13:15:40.000Z
|
2021-07-21T10:16:07.000Z
|
nesta/packages/geo_utils/tests/test_geotools.py
|
nestauk/old_nesta_daps
|
4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3
|
[
"MIT"
] | 8
|
2018-09-20T15:19:23.000Z
|
2020-12-15T17:41:34.000Z
|
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from unittest import mock
from nesta.packages.geo_utils.geocode import geocode
from nesta.packages.geo_utils.geocode import _geocode
from nesta.packages.geo_utils.geocode import geocode_dataframe
from nesta.packages.geo_utils.geocode import geocode_batch_dataframe
from nesta.packages.geo_utils.geocode import generate_composite_key
from nesta.packages.geo_utils.country_iso_code import country_iso_code
from nesta.packages.geo_utils.country_iso_code import country_iso_code_dataframe
from nesta.packages.geo_utils.country_iso_code import country_iso_code_to_name
from nesta.packages.geo_utils.lookup import get_continent_lookup
from nesta.packages.geo_utils.lookup import get_country_region_lookup
from nesta.packages.geo_utils.lookup import get_country_continent_lookup
REQUESTS = 'nesta.packages.geo_utils.geocode.requests.get'
PYCOUNTRY = 'nesta.packages.geo_utils.country_iso_code.pycountry.countries.get'
GEOCODE = 'nesta.packages.geo_utils.geocode.geocode'
_GEOCODE = 'nesta.packages.geo_utils.geocode._geocode'
COUNTRY_ISO_CODE = 'nesta.packages.geo_utils.country_iso_code.country_iso_code'
class TestGeocoding():
@staticmethod
@pytest.fixture
def mocked_osm_response():
mocked_response = mock.Mock()
mocked_response.json.return_value = [{'lat': '12.923432', 'lon': '-75.234569'}]
return mocked_response
def test_error_raised_when_arguments_missing(self):
with pytest.raises(ValueError) as e:
geocode()
assert "No geocode match" in str(e.value)
@mock.patch(REQUESTS)
def test_request_includes_user_agent_in_header(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
geocode(something='a')
assert mocked_request.call_args[1]['headers'] == {'User-Agent': 'Nesta health data geocode'}
@mock.patch(REQUESTS)
def test_url_correct_with_city_and_country(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
kwargs = dict(city='london', country='UK')
geocode(**kwargs)
assert mocked_request.call_args[1]['params'] == dict(format="json", **kwargs)
@mock.patch(REQUESTS)
def test_url_correct_with_query(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
kwargs = dict(q='my place')
geocode(**kwargs)
assert mocked_request.call_args[1]['params'] == dict(format="json", **kwargs)
@mock.patch(REQUESTS)
def test_error_returned_if_no_match(self, mocked_request):
mocked_response = mock.Mock()
mocked_response.json.return_value = []
mocked_request.return_value = mocked_response
with pytest.raises(ValueError) as e:
geocode(q="Something bad")
assert "No geocode match" in str(e.value)
@mock.patch(REQUESTS)
def test_coordinates_extracted_from_json_with_one_result(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
assert geocode(q='somewhere') == [{'lat': '12.923432', 'lon': '-75.234569'}]
@mock.patch(GEOCODE)
def test_geocode_wrapper_rejects_invalid_query_parameters(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode(cat='dog', city='Nice')
assert "Invalid query parameter" in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_rejects_both_q_and_kwargs_supplied(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode(city='London', q='somewhere')
assert "Supply either q OR other query parameters, they cannot be combined." in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_errors_if_no_query_parameters_supplied(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode()
assert "No query parameters supplied" in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_calls_geocode_properly(self, mocked_geocode):
mocked_geocode.return_value = [{'lat': 1.1, 'lon': 2.2}]
_geocode('my place')
_geocode(q='somewhere')
_geocode(city='London', country='UK')
_geocode(postalcode='ABC 123')
expected_calls = [mock.call(q='my place'),
mock.call(q='somewhere'),
mock.call(city='London', country='UK'),
mock.call(postalcode='ABC 123')
]
assert mocked_geocode.mock_calls == expected_calls
class TestGeocodeDataFrame():
@staticmethod
@pytest.fixture
def test_dataframe():
df = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
})
return df
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_city_country(self, mocked_geocode,
test_dataframe):
# Generate dataframe using a mocked output
mocked_geocode.side_effect = ['cat', 'dog', 'squirrel']
geocoded_dataframe = geocode_dataframe(test_dataframe)
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'coordinates': ['cat', 'dog', 'squirrel']
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium')]
# Check expected behaviours
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_fallback(self, mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [None, None, None, 'dog', 'cat', 'squirrel']
geocoded_dataframe = geocode_dataframe(test_dataframe)
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'coordinates': ['dog', 'cat', 'squirrel']
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium'),
mock.call('London UK'),
mock.call('Sheffield United Kingdom'),
mock.call('Brussels Belgium')]
# Check expected behaviours
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_duplicates_are_only_geocoded_once(self, mocked_geocode):
test_dataframe = pd.DataFrame({'index': [0, 1, 2, 3],
'city': ['London', 'Brussels', 'London', 'Brussels'],
'country': ['UK', 'Belgium', 'UK', 'Belgium']
})
mocked_geocode.side_effect = ['LON', 'BRU']
geocoded_dataframe = geocode_dataframe(test_dataframe)
expected_dataframe = pd.DataFrame({'index': [0, 1, 2, 3],
'city': ['London', 'Brussels', 'London', 'Brussels'],
'country': ['UK', 'Belgium', 'UK', 'Belgium'],
'coordinates': ['LON', 'BRU', 'LON', 'BRU']
})
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.call_count == 2
class TestGeocodeBatchDataframe():
@staticmethod
@pytest.fixture
def test_dataframe():
df = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
})
return df
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_city_country(self, mocked_geocode,
test_dataframe):
# Generate dataframe using a mocked output
mocked_geocode.side_effect = [{'lat': '12.923432', 'lon': '-75.234569'},
{'lat': '99.999999', 'lon': '-88.888888'},
{'lat': '-2.202022', 'lon': '0.000000'}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [12.923432, 99.999999, -2.202022],
'longitude': [-75.234569, -88.888888, 0.0]
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe)
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_fallback(self,
mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [None,
{'lat': 1, 'lon': 4},
None,
{'lat': 2, 'lon': 5},
None,
{'lat': 3, 'lon': 6}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [1.0, 2.0, 3.0],
'longitude': [4.0, 5.0, 6.0],
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(q='London UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(q='Sheffield United Kingdom'),
mock.call(city='Brussels', country='Belgium'),
mock.call(q='Brussels Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe, query_method='both')
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_method_only(self,
mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [{'lat': 1, 'lon': 4},
{'lat': 2, 'lon': 5},
{'lat': 3, 'lon': 6}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [1.0, 2.0, 3.0],
'longitude': [4.0, 5.0, 6.0],
})
expected_calls = [mock.call(q='London UK'),
mock.call(q='Sheffield United Kingdom'),
mock.call(q='Brussels Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe, query_method='query_only')
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_valueerror_raised_when_invalid_query_method_passed(self,
mocked_geocode,
test_dataframe):
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method='cats')
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method='test')
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method=1)
@mock.patch(_GEOCODE)
def test_output_column_names_are_applied(self, mocked_geocode, test_dataframe):
# Generate dataframe using a mocked output
mocked_geocode.side_effect = [{'lat': '12.923432', 'lon': '-75.234569'},
{'lat': '99.999999', 'lon': '-88.888888'},
{'lat': '-2.202022', 'lon': '0.000000'}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'lat': [12.923432, 99.999999, -2.202022],
'lon': [-75.234569, -88.888888, 0.0]
})
geocoded_dataframe = geocode_batch_dataframe(test_dataframe,
latitude='lat',
longitude='lon')
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
class TestCountryIsoCode():
@mock.patch(PYCOUNTRY)
def test_lookup_via_name(self, mocked_pycountry):
mocked_pycountry.return_value = 'country_object'
expected_calls = [mock.call(name='United Kingdom')]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 1
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_lookup_via_common_name(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), 'country_object']
expected_calls = [mock.call(name='United Kingdom'),
mock.call(common_name='United Kingdom')
]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 2
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_lookup_via_official_name(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), KeyError(), 'country_object']
expected_calls = [mock.call(name='United Kingdom'),
mock.call(common_name='United Kingdom'),
mock.call(official_name='United Kingdom')
]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 3
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_invalid_lookup_raises_keyerror(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), KeyError(), KeyError()]*2
with pytest.raises(KeyError) as e:
country_iso_code('Fake Country')
assert 'Fake Country not found' in str(e.value)
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_title_case_is_applied(self, mocked_pycountry):
expected_calls = []
names = ['united kingdom', 'UNITED KINGDOM',
'United kingdom']
mocked_pycountry.side_effect = [KeyError(), KeyError(), KeyError(), 'blah'] * len(names)
for name in names:
country_iso_code(name) # Find the iso codes
raw_call = mock.call(name=name)
common_call = mock.call(common_name=name)
official_call = mock.call(official_name=name)
title_call = mock.call(name='United Kingdom')
expected_calls.append(raw_call) # The initial call
expected_calls.append(common_call) # Tries common name call
expected_calls.append(official_call) # Tries official name
expected_calls.append(title_call) # The title case call
assert mocked_pycountry.mock_calls == expected_calls
country_iso_code.cache_clear()
class TestCountryIsoCodeDataframe():
@staticmethod
def _mocked_response(alpha_2, alpha_3, numeric, continent):
'''Builds a mocked response for the patched country_iso_code function.'''
response = mock.Mock()
response.alpha_2 = alpha_2
response.alpha_3 = alpha_3
response.numeric = numeric
response.continent = continent
return response
@mock.patch(COUNTRY_ISO_CODE)
def test_valid_countries_coded(self, mocked_country_iso_code):
test_df = pd.DataFrame({'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States']
})
mocked_response_uk = self._mocked_response('GB', 'GBR', '123', 'EU')
mocked_response_be = self._mocked_response('BE', 'BEL', '875', 'EU')
mocked_response_us = self._mocked_response('US', 'USA', '014', 'NA')
mocked_country_iso_code.side_effect = [mocked_response_uk,
mocked_response_be,
mocked_response_us
]
expected_dataframe = pd.DataFrame(
{'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States'],
'country_alpha_2': ['GB', 'BE', 'US'],
'country_alpha_3': ['GBR', 'BEL', 'USA'],
'country_numeric': ['123', '875', '014'],
'continent': ['EU', 'EU', 'NA']
})
coded_df = country_iso_code_dataframe(test_df)
assert coded_df.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
@mock.patch(COUNTRY_ISO_CODE)
def test_invalid_countries_data_is_none(self, mocked_country_iso_code):
test_df = pd.DataFrame({'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States']
})
mocked_country_iso_code.side_effect = KeyError
expected_dataframe = pd.DataFrame(
{'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States'],
'country_alpha_2': [None, None, None],
'country_alpha_3': [None, None, None],
'country_numeric': [None, None, None],
'continent': [None, None, None]
})
coded_df = country_iso_code_dataframe(test_df)
assert coded_df.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
class TestCountryIsoCodeToName():
def test_valid_iso_code_returns_name(self):
assert country_iso_code_to_name('ITA') == 'Italy'
assert country_iso_code_to_name('DEU') == 'Germany'
assert country_iso_code_to_name('GBR') == 'United Kingdom'
def test_invalid_iso_code_returns_none(self):
assert country_iso_code_to_name('FOO') is None
assert country_iso_code_to_name('ABC') is None
assert country_iso_code_to_name('ZZZ') is None
def test_generate_composite_key():
assert generate_composite_key('London', 'United Kingdom') == 'london_united-kingdom'
assert generate_composite_key('Paris', 'France') == 'paris_france'
assert generate_composite_key('Name-with hyphen', 'COUNTRY') == 'name-with-hyphen_country'
def test_generate_composite_key_raises_error_with_invalid_input():
with pytest.raises(ValueError):
generate_composite_key(None, 'UK')
with pytest.raises(ValueError):
generate_composite_key('city_only')
with pytest.raises(ValueError):
generate_composite_key(1, 2)
def test_get_continent_lookup():
continents = get_continent_lookup()
assert None in continents
assert '' in continents
assert continents['NA'] == 'North America'
assert len(continents) == 9 # 2 nulls + 7 continents
def test_get_country_region_lookup():
countries = get_country_region_lookup()
assert len(countries) > 100
assert len(countries) < 1000
assert all(len(k) == 2 for k in countries.keys())
assert all(type(v) is tuple for v in countries.values())
assert all(len(v) == 2 for v in countries.values())
all_regions = {v[1] for v in countries.values()}
assert len(all_regions) == 18
def test_country_continent_lookup():
lookup = get_country_continent_lookup()
non_nulls = {k: v for k, v in lookup.items()
if k is not None and k != ''}
# All iso2, so length == 2
assert all(len(k) == 2 for k in non_nulls.items())
assert all(len(v) == 2 for v in non_nulls.values())
# Either strings or Nones
country_types = set(type(v) for v in lookup.values())
assert country_types == {str, type(None)}
# Right ball-park of country and continent numbers
assert len(non_nulls) > 100 # num countries
assert len(non_nulls) < 1000 # num countries
assert len(set(non_nulls.values())) == 7 # num continents
| 48.195122
| 107
| 0.565494
|
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from unittest import mock
from nesta.packages.geo_utils.geocode import geocode
from nesta.packages.geo_utils.geocode import _geocode
from nesta.packages.geo_utils.geocode import geocode_dataframe
from nesta.packages.geo_utils.geocode import geocode_batch_dataframe
from nesta.packages.geo_utils.geocode import generate_composite_key
from nesta.packages.geo_utils.country_iso_code import country_iso_code
from nesta.packages.geo_utils.country_iso_code import country_iso_code_dataframe
from nesta.packages.geo_utils.country_iso_code import country_iso_code_to_name
from nesta.packages.geo_utils.lookup import get_continent_lookup
from nesta.packages.geo_utils.lookup import get_country_region_lookup
from nesta.packages.geo_utils.lookup import get_country_continent_lookup
REQUESTS = 'nesta.packages.geo_utils.geocode.requests.get'
PYCOUNTRY = 'nesta.packages.geo_utils.country_iso_code.pycountry.countries.get'
GEOCODE = 'nesta.packages.geo_utils.geocode.geocode'
_GEOCODE = 'nesta.packages.geo_utils.geocode._geocode'
COUNTRY_ISO_CODE = 'nesta.packages.geo_utils.country_iso_code.country_iso_code'
class TestGeocoding():
@staticmethod
@pytest.fixture
def mocked_osm_response():
mocked_response = mock.Mock()
mocked_response.json.return_value = [{'lat': '12.923432', 'lon': '-75.234569'}]
return mocked_response
def test_error_raised_when_arguments_missing(self):
with pytest.raises(ValueError) as e:
geocode()
assert "No geocode match" in str(e.value)
@mock.patch(REQUESTS)
def test_request_includes_user_agent_in_header(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
geocode(something='a')
assert mocked_request.call_args[1]['headers'] == {'User-Agent': 'Nesta health data geocode'}
@mock.patch(REQUESTS)
def test_url_correct_with_city_and_country(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
kwargs = dict(city='london', country='UK')
geocode(**kwargs)
assert mocked_request.call_args[1]['params'] == dict(format="json", **kwargs)
@mock.patch(REQUESTS)
def test_url_correct_with_query(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
kwargs = dict(q='my place')
geocode(**kwargs)
assert mocked_request.call_args[1]['params'] == dict(format="json", **kwargs)
@mock.patch(REQUESTS)
def test_error_returned_if_no_match(self, mocked_request):
mocked_response = mock.Mock()
mocked_response.json.return_value = []
mocked_request.return_value = mocked_response
with pytest.raises(ValueError) as e:
geocode(q="Something bad")
assert "No geocode match" in str(e.value)
@mock.patch(REQUESTS)
def test_coordinates_extracted_from_json_with_one_result(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
assert geocode(q='somewhere') == [{'lat': '12.923432', 'lon': '-75.234569'}]
@mock.patch(GEOCODE)
def test_geocode_wrapper_rejects_invalid_query_parameters(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode(cat='dog', city='Nice')
assert "Invalid query parameter" in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_rejects_both_q_and_kwargs_supplied(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode(city='London', q='somewhere')
assert "Supply either q OR other query parameters, they cannot be combined." in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_errors_if_no_query_parameters_supplied(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode()
assert "No query parameters supplied" in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_calls_geocode_properly(self, mocked_geocode):
mocked_geocode.return_value = [{'lat': 1.1, 'lon': 2.2}]
_geocode('my place')
_geocode(q='somewhere')
_geocode(city='London', country='UK')
_geocode(postalcode='ABC 123')
expected_calls = [mock.call(q='my place'),
mock.call(q='somewhere'),
mock.call(city='London', country='UK'),
mock.call(postalcode='ABC 123')
]
assert mocked_geocode.mock_calls == expected_calls
class TestGeocodeDataFrame():
@staticmethod
@pytest.fixture
def test_dataframe():
df = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
})
return df
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_city_country(self, mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = ['cat', 'dog', 'squirrel']
geocoded_dataframe = geocode_dataframe(test_dataframe)
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'coordinates': ['cat', 'dog', 'squirrel']
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium')]
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_fallback(self, mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [None, None, None, 'dog', 'cat', 'squirrel']
geocoded_dataframe = geocode_dataframe(test_dataframe)
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'coordinates': ['dog', 'cat', 'squirrel']
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium'),
mock.call('London UK'),
mock.call('Sheffield United Kingdom'),
mock.call('Brussels Belgium')]
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_duplicates_are_only_geocoded_once(self, mocked_geocode):
test_dataframe = pd.DataFrame({'index': [0, 1, 2, 3],
'city': ['London', 'Brussels', 'London', 'Brussels'],
'country': ['UK', 'Belgium', 'UK', 'Belgium']
})
mocked_geocode.side_effect = ['LON', 'BRU']
geocoded_dataframe = geocode_dataframe(test_dataframe)
expected_dataframe = pd.DataFrame({'index': [0, 1, 2, 3],
'city': ['London', 'Brussels', 'London', 'Brussels'],
'country': ['UK', 'Belgium', 'UK', 'Belgium'],
'coordinates': ['LON', 'BRU', 'LON', 'BRU']
})
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.call_count == 2
class TestGeocodeBatchDataframe():
@staticmethod
@pytest.fixture
def test_dataframe():
df = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
})
return df
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_city_country(self, mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [{'lat': '12.923432', 'lon': '-75.234569'},
{'lat': '99.999999', 'lon': '-88.888888'},
{'lat': '-2.202022', 'lon': '0.000000'}
]
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [12.923432, 99.999999, -2.202022],
'longitude': [-75.234569, -88.888888, 0.0]
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe)
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_fallback(self,
mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [None,
{'lat': 1, 'lon': 4},
None,
{'lat': 2, 'lon': 5},
None,
{'lat': 3, 'lon': 6}
]
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [1.0, 2.0, 3.0],
'longitude': [4.0, 5.0, 6.0],
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(q='London UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(q='Sheffield United Kingdom'),
mock.call(city='Brussels', country='Belgium'),
mock.call(q='Brussels Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe, query_method='both')
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_method_only(self,
mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [{'lat': 1, 'lon': 4},
{'lat': 2, 'lon': 5},
{'lat': 3, 'lon': 6}
]
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [1.0, 2.0, 3.0],
'longitude': [4.0, 5.0, 6.0],
})
expected_calls = [mock.call(q='London UK'),
mock.call(q='Sheffield United Kingdom'),
mock.call(q='Brussels Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe, query_method='query_only')
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_valueerror_raised_when_invalid_query_method_passed(self,
mocked_geocode,
test_dataframe):
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method='cats')
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method='test')
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method=1)
@mock.patch(_GEOCODE)
def test_output_column_names_are_applied(self, mocked_geocode, test_dataframe):
mocked_geocode.side_effect = [{'lat': '12.923432', 'lon': '-75.234569'},
{'lat': '99.999999', 'lon': '-88.888888'},
{'lat': '-2.202022', 'lon': '0.000000'}
]
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'lat': [12.923432, 99.999999, -2.202022],
'lon': [-75.234569, -88.888888, 0.0]
})
geocoded_dataframe = geocode_batch_dataframe(test_dataframe,
latitude='lat',
longitude='lon')
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
class TestCountryIsoCode():
@mock.patch(PYCOUNTRY)
def test_lookup_via_name(self, mocked_pycountry):
mocked_pycountry.return_value = 'country_object'
expected_calls = [mock.call(name='United Kingdom')]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 1
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_lookup_via_common_name(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), 'country_object']
expected_calls = [mock.call(name='United Kingdom'),
mock.call(common_name='United Kingdom')
]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 2
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_lookup_via_official_name(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), KeyError(), 'country_object']
expected_calls = [mock.call(name='United Kingdom'),
mock.call(common_name='United Kingdom'),
mock.call(official_name='United Kingdom')
]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 3
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_invalid_lookup_raises_keyerror(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), KeyError(), KeyError()]*2
with pytest.raises(KeyError) as e:
country_iso_code('Fake Country')
assert 'Fake Country not found' in str(e.value)
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_title_case_is_applied(self, mocked_pycountry):
expected_calls = []
names = ['united kingdom', 'UNITED KINGDOM',
'United kingdom']
mocked_pycountry.side_effect = [KeyError(), KeyError(), KeyError(), 'blah'] * len(names)
for name in names:
country_iso_code(name)
raw_call = mock.call(name=name)
common_call = mock.call(common_name=name)
official_call = mock.call(official_name=name)
title_call = mock.call(name='United Kingdom')
expected_calls.append(raw_call)
expected_calls.append(common_call)
expected_calls.append(official_call)
expected_calls.append(title_call)
assert mocked_pycountry.mock_calls == expected_calls
country_iso_code.cache_clear()
class TestCountryIsoCodeDataframe():
@staticmethod
def _mocked_response(alpha_2, alpha_3, numeric, continent):
response = mock.Mock()
response.alpha_2 = alpha_2
response.alpha_3 = alpha_3
response.numeric = numeric
response.continent = continent
return response
@mock.patch(COUNTRY_ISO_CODE)
def test_valid_countries_coded(self, mocked_country_iso_code):
test_df = pd.DataFrame({'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States']
})
mocked_response_uk = self._mocked_response('GB', 'GBR', '123', 'EU')
mocked_response_be = self._mocked_response('BE', 'BEL', '875', 'EU')
mocked_response_us = self._mocked_response('US', 'USA', '014', 'NA')
mocked_country_iso_code.side_effect = [mocked_response_uk,
mocked_response_be,
mocked_response_us
]
expected_dataframe = pd.DataFrame(
{'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States'],
'country_alpha_2': ['GB', 'BE', 'US'],
'country_alpha_3': ['GBR', 'BEL', 'USA'],
'country_numeric': ['123', '875', '014'],
'continent': ['EU', 'EU', 'NA']
})
coded_df = country_iso_code_dataframe(test_df)
assert coded_df.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
@mock.patch(COUNTRY_ISO_CODE)
def test_invalid_countries_data_is_none(self, mocked_country_iso_code):
test_df = pd.DataFrame({'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States']
})
mocked_country_iso_code.side_effect = KeyError
expected_dataframe = pd.DataFrame(
{'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States'],
'country_alpha_2': [None, None, None],
'country_alpha_3': [None, None, None],
'country_numeric': [None, None, None],
'continent': [None, None, None]
})
coded_df = country_iso_code_dataframe(test_df)
assert coded_df.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
class TestCountryIsoCodeToName():
def test_valid_iso_code_returns_name(self):
assert country_iso_code_to_name('ITA') == 'Italy'
assert country_iso_code_to_name('DEU') == 'Germany'
assert country_iso_code_to_name('GBR') == 'United Kingdom'
def test_invalid_iso_code_returns_none(self):
assert country_iso_code_to_name('FOO') is None
assert country_iso_code_to_name('ABC') is None
assert country_iso_code_to_name('ZZZ') is None
def test_generate_composite_key():
assert generate_composite_key('London', 'United Kingdom') == 'london_united-kingdom'
assert generate_composite_key('Paris', 'France') == 'paris_france'
assert generate_composite_key('Name-with hyphen', 'COUNTRY') == 'name-with-hyphen_country'
def test_generate_composite_key_raises_error_with_invalid_input():
with pytest.raises(ValueError):
generate_composite_key(None, 'UK')
with pytest.raises(ValueError):
generate_composite_key('city_only')
with pytest.raises(ValueError):
generate_composite_key(1, 2)
def test_get_continent_lookup():
continents = get_continent_lookup()
assert None in continents
assert '' in continents
assert continents['NA'] == 'North America'
assert len(continents) == 9
def test_get_country_region_lookup():
countries = get_country_region_lookup()
assert len(countries) > 100
assert len(countries) < 1000
assert all(len(k) == 2 for k in countries.keys())
assert all(type(v) is tuple for v in countries.values())
assert all(len(v) == 2 for v in countries.values())
all_regions = {v[1] for v in countries.values()}
assert len(all_regions) == 18
def test_country_continent_lookup():
lookup = get_country_continent_lookup()
non_nulls = {k: v for k, v in lookup.items()
if k is not None and k != ''}
assert all(len(k) == 2 for k in non_nulls.items())
assert all(len(v) == 2 for v in non_nulls.values())
country_types = set(type(v) for v in lookup.values())
assert country_types == {str, type(None)}
assert len(non_nulls) > 100
assert len(non_nulls) < 1000
assert len(set(non_nulls.values())) == 7
| true
| true
|
79031bbb2bbfd5f965cf61f9f0adf83d9e6b27a0
| 450
|
py
|
Python
|
scripts/item/consume_2435553.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/item/consume_2435553.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/item/consume_2435553.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Monster Hot Air Balloon | (2435553)
if sm.getSkillByItem() == 0:# Check whether item has an vehicleID stored, 0 if false.
sm.chat("An Error occurred whilst trying to find the mount.")
elif sm.hasSkill(sm.getSkillByItem()):
sm.chat("You already have the 'Monster Hot Air Balloon' mount.")
else:
sm.consumeItem()
sm.giveSkill(sm.getSkillByItem())
sm.chat("Successfully added the 'Monster Hot Air Balloon' mount.")
sm.dispose()
| 40.909091
| 86
| 0.708889
|
if sm.getSkillByItem() == 0:
sm.chat("An Error occurred whilst trying to find the mount.")
elif sm.hasSkill(sm.getSkillByItem()):
sm.chat("You already have the 'Monster Hot Air Balloon' mount.")
else:
sm.consumeItem()
sm.giveSkill(sm.getSkillByItem())
sm.chat("Successfully added the 'Monster Hot Air Balloon' mount.")
sm.dispose()
| true
| true
|
79031c7b27d8487b66e06d2009fb12b18b93ae3c
| 12,011
|
py
|
Python
|
scripts/gp_pdf_extractor.py
|
delenamalan/covid19za
|
414a7e0771ebb4b054809f20bff6c4efc0c24ff6
|
[
"MIT"
] | null | null | null |
scripts/gp_pdf_extractor.py
|
delenamalan/covid19za
|
414a7e0771ebb4b054809f20bff6c4efc0c24ff6
|
[
"MIT"
] | 1
|
2020-11-28T15:38:20.000Z
|
2020-11-28T15:47:22.000Z
|
scripts/gp_pdf_extractor.py
|
delenamalan/covid19za
|
414a7e0771ebb4b054809f20bff6c4efc0c24ff6
|
[
"MIT"
] | null | null | null |
import pdfplumber
import re
import pandas as pd
from datetime import datetime
import sys
# AUTHOR: Simon Rosen
# -----------------------------------
# DEPENDENCIES
# This module requires 'pdfplumber'
#
# Install: pip install pdfplumber
# -----------------------------------
def extract_data(file_path):
pdfp_obj = pdfplumber.open(file_path)
# Helper functions
# text - string you are finding substring in
def get_string_between_2_strings(text, string1, string2):
# print("text: {}\n string1: {}, string2:{}".format("text", string1, string2))
try:
regex_str = string1 + '(.+?)' + string2
# print('regex_str: {}'.format(regex_str))
# all_found = [x.group() for x in re.finditer(regex_str, text)]
all_found = re.search(regex_str, text, re.DOTALL).group(1)
# print(all_found)
except AttributeError:
# no text found between two substrings
# print('Not found')
all_found = [] # apply your error handling
return all_found
# GP data contained in paragraph under following heading
# GAUTENG CONFIRMED COVID-19 CASES DISTRICT BREAKDOWN
# GP cases, recoveries, deaths, contacts traced, people de-isolated & hospitalisations
def get_gp_breakdown_data():
district_pg =0
first_page_txt = pdfp_obj.pages[0].extract_text()
# GAUTENG CONFIRMED COVID-19 CASES DISTRICT BREAKDOWN
heading_txt_1 = "GAUTENG CONFIRMED COVID-19 CASES DISTRICT BREAKDOWN"
heading_txt_2 = "BREAKDOWN PER DISTRICT"
breakdown_txt = get_string_between_2_strings(first_page_txt, heading_txt_1, heading_txt_2)
if len(breakdown_txt)==0:
breakdown_txt = get_string_between_2_strings(pdfp_obj.pages[1].extract_text(), heading_txt_1, heading_txt_2)
district_pg=1
if len(breakdown_txt)==0:
breakdown_txt = get_string_between_2_strings(pdfp_obj.pages[1].extract_text(), "^", heading_txt_2)
district_pg=1
str_list = list(filter(lambda x: False if x == ' ' else True, breakdown_txt.splitlines()))
str_body = "".join(str_list)
sentences = str_body.split('.')
def find_date(text):
return re.search(r'(\d{2}|\d{1}) [a-zA-Z]* \d{4}', text).group(0)
def get_nums(text, exclude_texts=['COVID-19']):
for exclude_text in exclude_texts:
text = text.replace(exclude_text, '')
num_tuples = re.findall(r'(\d{3}|\d{2}|\d{1})( \d{3}|\d{2}|\d{1})*', text)
num_list = [int(x[0] + x[1].replace(' ', '')) for x in num_tuples]
return num_list
date_txt = get_string_between_2_strings(pdfp_obj.pages[0].extract_text(), heading_txt_1, "$")
sentences = "".join(date_txt).split(".")
_gp_covid_stats = {"date": find_date(date_txt)}
# First Sentence
tmp_dict = dict(zip(['cases', 'recoveries', 'deaths'], get_nums(sentences[0])[2:]))
_gp_covid_stats.update(tmp_dict)
# Second Sentence
tmp_dict = dict(zip(['traced', 'de_isolated'], get_nums(sentences[1])[:2]))
_gp_covid_stats.update(tmp_dict)
# Third Sentence
tmp_dict = dict(zip(['hospitalised'], get_nums(sentences[2])))
_gp_covid_stats.update(tmp_dict)
return district_pg, _gp_covid_stats
district_pg, gp_covid_stats = get_gp_breakdown_data()
# DISTRICT BREAKDOWN
def get_district_data():
district_table_list = pdfp_obj.pages[district_pg].extract_tables()[0]
print(type(district_table_list))
dl = []
for i, row in enumerate(district_table_list):
print(i,row)
dl.append(list(filter(lambda x: x != None and len(x) !=0, row)))
dl[-2]=dl[-2]+[0,0,0]
print(dl)
all_list = [[x[i] for x in dl] for i in range(0, len(dl[0]))]
print(all_list,"*******")
gp_breakdown_dict = {curr_list[0]: curr_list[1:] for curr_list in all_list}
gp_breakdown_df = pd.DataFrame.from_dict(gp_breakdown_dict)
print(gp_breakdown_df)
gp_breakdown_df.fillna(0, inplace=True)
gp_breakdown_df.set_index("DISTRICT", inplace=True)
gp_breakdown_df.rename(inplace=True, columns={gp_breakdown_df.columns[0]: "CASES",
gp_breakdown_df.columns[1]: "NEW CASES"})
for i in range(0, 4):
gp_breakdown_df.iloc[:, i] = gp_breakdown_df.iloc[:, i].apply(lambda x: x if type(x)==int else x.replace(' ', ''))
return gp_breakdown_df
gp_district_df = get_district_data()
# ---------------
# SUB-DISTRICTS
# ---------------
def get_extracted_raw_list(page_no):
currPage = pdfp_obj.pages[page_no]
bounding_box = (300, 0, currPage.width, currPage.height)
cropped_page = currPage.crop(bounding_box)
# table_settings = {"vertical_strategy": "text"}
table_settings = {"snap_tolerance": 10, "join_tolerance": 15}
extracted_raw_list = cropped_page.extract_tables(table_settings)[0]
return extracted_raw_list
def get_sub_districts_data(raw_list):
sub_districts_list = []
curr_sub_district = []
prev_sub_district = []
for i in range(1, len(raw_list)):
curr_list = raw_list[i]
if curr_sub_district == [] or not (curr_list[0] == None or curr_list[0] == ''):
# print(prev_sub_district)
if prev_sub_district != []:
sub_districts_list.append(curr_sub_district)
curr_sub_district = curr_list
prev_sub_district = curr_sub_district
# print(curr_sub_district)
if (curr_sub_district[1] == '' and curr_list[1] != '' and curr_list[1] != None):
curr_sub_district[1] = curr_list[1]
if (curr_sub_district[2] == '' and curr_list[2] != '' and curr_list[2] != None):
curr_sub_district[2] = curr_list[2]
if (i == len(raw_list) - 1):
sub_districts_list.append(curr_sub_district)
# Check if first item of list is valid e.g. total and/or recoveries has values
prev_sub_district = sub_districts_list[0]
if (prev_sub_district[1] == '' or prev_sub_district[1] == None) and (prev_sub_district[2] == '' or \
prev_sub_district[2] == None):
sub_districts_list.pop(0)
return sub_districts_list
def get_table_list(page_no):
currPage = pdfp_obj.pages[page_no]
bounding_box = (300, 0, currPage.width, currPage.height)
cropped_page = currPage.crop(bounding_box)
# table_settings = {"vertical_strategy": "text"}
table_settings = {"snap_tolerance": 10, "join_tolerance": 15}
extracted_raw_list = cropped_page.extract_tables(table_settings)[0]
return extracted_raw_list
def get_all_sub_districts(page_start, page_end):
all_sub_districts = []
for i in range(page_start, page_end + 1):
all_sub_districts.extend(get_sub_districts_data(get_table_list(i)))
def remove_spaces(str_no):
if type(str_no)==str:
return str_no.replace(" ", "")
else:
return str_no
all_sub_districts = [[x[0], remove_spaces(x[1]), remove_spaces(x[2])] for x in all_sub_districts]
return all_sub_districts
all_sub_dists = get_all_sub_districts(district_pg+1, district_pg+4)
pdfp_obj.close()
def get_district_map():
# Johannesburg
jhb_dict = dict(zip(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'Unallocated'],
[[x[1], x[2]] for x in all_sub_dists[0:8]]))
# Tshwane
tsh_keys = list(range(1, 8))
tsh_keys.append('Unallocated')
tsh_dict = dict(zip(tsh_keys, [[x[1], x[2]] for x in all_sub_dists[8:16]]))
# Ekurhuleni
eku_keys = "e1 e2 n1 n2 s1 s2 Unallocated".split(" ")
eku_dict = dict(zip(eku_keys, [[x[1], x[2]] for x in all_sub_dists[16:23]]))
# Sedibeng
sed_keys = "Lesedi Emfuleni Midvaal Unallocated".split(" ")
sed_dict = dict(zip(sed_keys, [[x[1], x[2]] for x in all_sub_dists[23:27]]))
# West Rand
wr_keys = "Mogale Rand_West Merafong Unallocated".split(" ")
wr_dict = dict(zip(wr_keys, [[x[1], x[2]] for x in all_sub_dists[27:31]]))
# All Districts
district_map = {
'Johannesburg': jhb_dict,
'Tshwane': tsh_dict,
'Ekurhuleni': eku_dict,
'Sedibeng': sed_dict,
'West Rand': wr_dict
}
return district_map
district_map = get_district_map()
# DATE
curr_date = datetime.strptime(gp_covid_stats['date'], '%d %B %Y')
date_formatted = datetime.strftime(curr_date, '%d-%m-%Y')
date_yyyymmdd = datetime.strftime(curr_date, '%Y%m%d')
# print(gp_covid_stats['date'], date_formatted, date_yyyymmdd)
##############################
# OUT LIST #
# DETERMINES ORDER OF OUTPUT #
##############################
# List later gets converted to formatted string
jhb_districts = [x for x in 'ABCDEFG']+['Unallocated']
tsh_districts = [x for x in range(1,8)]+['Unallocated']
wr_districts=['Mogale',"Rand_West","Merafong","Unallocated"]
out_list = [
# Date
date_yyyymmdd, date_formatted,
# Gauteng Data
gp_covid_stats['cases'], 'Check', 'Check',
gp_covid_stats['recoveries'], gp_covid_stats['deaths'], 'Check','Check',
gp_covid_stats['hospitalised'],
# DISTRICT TOTALS DATA
# ----------------------
# Johannesburg
gp_district_df.loc['Johannesburg']['CASES'],
gp_district_df.loc['Ekurhuleni']['CASES'],
gp_district_df.loc['Tshwane']['CASES'],
gp_district_df.loc['Sedibeng']['CASES'],
gp_district_df.loc['West Rand']['CASES'],
gp_district_df.loc['Unallocated']['CASES'],
' Check',
gp_district_df.loc['Johannesburg']['DEATHS'],
gp_district_df.loc['Ekurhuleni']['DEATHS'],
gp_district_df.loc['Tshwane']['DEATHS'],
gp_district_df.loc['Sedibeng']['DEATHS'],
gp_district_df.loc['West Rand']['DEATHS'],
gp_district_df.loc['Johannesburg']['RECOVERIES'],
gp_district_df.loc['Ekurhuleni']['RECOVERIES'],
gp_district_df.loc['Tshwane']['RECOVERIES'],
gp_district_df.loc['Sedibeng']['RECOVERIES'],
gp_district_df.loc['West Rand']['RECOVERIES'], ' Check', ' Check'] + \
[district_map['Johannesburg'][x][0] for x in jhb_districts]+\
['Check']+\
[district_map['Johannesburg'][x][1] for x in jhb_districts]+\
['Check']+\
[district_map['Tshwane'][x][0] for x in tsh_districts]+\
['Check']+\
[district_map['Tshwane'][x][1] for x in tsh_districts]+\
['Check']+\
[district_map['Ekurhuleni'][x][0] for x in ['e1','e2','n1','n2','s1','s2','Unallocated']]+\
['Check']+\
[district_map['Ekurhuleni'][x][1] for x in ['e1','e2','n1','n2','s1','s2','Unallocated']]+\
['Check']+\
[district_map['Sedibeng'][x][0] for x in ['Lesedi','Emfuleni','Midvaal','Unallocated']]+\
['Check']+\
[district_map['Sedibeng'][x][1] for x in ['Lesedi','Emfuleni','Midvaal','Unallocated']]+\
['Check']+\
[district_map['West Rand'][x][0] for x in wr_districts]+\
[district_map['West Rand'][x][1] for x in wr_districts]+\
['Check']
def list_to_formatted(in_list, delimiter='\t'):
return delimiter.join(map(str, in_list))
out_str = list_to_formatted(out_list)
# return district_map
return out_str
if __name__ == "__main__":
print(extract_data(sys.argv[1]))
| 40.036667
| 126
| 0.590542
|
import pdfplumber
import re
import pandas as pd
from datetime import datetime
import sys
def extract_data(file_path):
pdfp_obj = pdfplumber.open(file_path)
def get_string_between_2_strings(text, string1, string2):
try:
regex_str = string1 + '(.+?)' + string2
all_found = re.search(regex_str, text, re.DOTALL).group(1)
except AttributeError:
all_found = []
return all_found
def get_gp_breakdown_data():
district_pg =0
first_page_txt = pdfp_obj.pages[0].extract_text()
heading_txt_1 = "GAUTENG CONFIRMED COVID-19 CASES DISTRICT BREAKDOWN"
heading_txt_2 = "BREAKDOWN PER DISTRICT"
breakdown_txt = get_string_between_2_strings(first_page_txt, heading_txt_1, heading_txt_2)
if len(breakdown_txt)==0:
breakdown_txt = get_string_between_2_strings(pdfp_obj.pages[1].extract_text(), heading_txt_1, heading_txt_2)
district_pg=1
if len(breakdown_txt)==0:
breakdown_txt = get_string_between_2_strings(pdfp_obj.pages[1].extract_text(), "^", heading_txt_2)
district_pg=1
str_list = list(filter(lambda x: False if x == ' ' else True, breakdown_txt.splitlines()))
str_body = "".join(str_list)
sentences = str_body.split('.')
def find_date(text):
return re.search(r'(\d{2}|\d{1}) [a-zA-Z]* \d{4}', text).group(0)
def get_nums(text, exclude_texts=['COVID-19']):
for exclude_text in exclude_texts:
text = text.replace(exclude_text, '')
num_tuples = re.findall(r'(\d{3}|\d{2}|\d{1})( \d{3}|\d{2}|\d{1})*', text)
num_list = [int(x[0] + x[1].replace(' ', '')) for x in num_tuples]
return num_list
date_txt = get_string_between_2_strings(pdfp_obj.pages[0].extract_text(), heading_txt_1, "$")
sentences = "".join(date_txt).split(".")
_gp_covid_stats = {"date": find_date(date_txt)}
tmp_dict = dict(zip(['cases', 'recoveries', 'deaths'], get_nums(sentences[0])[2:]))
_gp_covid_stats.update(tmp_dict)
tmp_dict = dict(zip(['traced', 'de_isolated'], get_nums(sentences[1])[:2]))
_gp_covid_stats.update(tmp_dict)
tmp_dict = dict(zip(['hospitalised'], get_nums(sentences[2])))
_gp_covid_stats.update(tmp_dict)
return district_pg, _gp_covid_stats
district_pg, gp_covid_stats = get_gp_breakdown_data()
def get_district_data():
district_table_list = pdfp_obj.pages[district_pg].extract_tables()[0]
print(type(district_table_list))
dl = []
for i, row in enumerate(district_table_list):
print(i,row)
dl.append(list(filter(lambda x: x != None and len(x) !=0, row)))
dl[-2]=dl[-2]+[0,0,0]
print(dl)
all_list = [[x[i] for x in dl] for i in range(0, len(dl[0]))]
print(all_list,"*******")
gp_breakdown_dict = {curr_list[0]: curr_list[1:] for curr_list in all_list}
gp_breakdown_df = pd.DataFrame.from_dict(gp_breakdown_dict)
print(gp_breakdown_df)
gp_breakdown_df.fillna(0, inplace=True)
gp_breakdown_df.set_index("DISTRICT", inplace=True)
gp_breakdown_df.rename(inplace=True, columns={gp_breakdown_df.columns[0]: "CASES",
gp_breakdown_df.columns[1]: "NEW CASES"})
for i in range(0, 4):
gp_breakdown_df.iloc[:, i] = gp_breakdown_df.iloc[:, i].apply(lambda x: x if type(x)==int else x.replace(' ', ''))
return gp_breakdown_df
gp_district_df = get_district_data()
def get_extracted_raw_list(page_no):
currPage = pdfp_obj.pages[page_no]
bounding_box = (300, 0, currPage.width, currPage.height)
cropped_page = currPage.crop(bounding_box)
table_settings = {"snap_tolerance": 10, "join_tolerance": 15}
extracted_raw_list = cropped_page.extract_tables(table_settings)[0]
return extracted_raw_list
def get_sub_districts_data(raw_list):
sub_districts_list = []
curr_sub_district = []
prev_sub_district = []
for i in range(1, len(raw_list)):
curr_list = raw_list[i]
if curr_sub_district == [] or not (curr_list[0] == None or curr_list[0] == ''):
if prev_sub_district != []:
sub_districts_list.append(curr_sub_district)
curr_sub_district = curr_list
prev_sub_district = curr_sub_district
if (curr_sub_district[1] == '' and curr_list[1] != '' and curr_list[1] != None):
curr_sub_district[1] = curr_list[1]
if (curr_sub_district[2] == '' and curr_list[2] != '' and curr_list[2] != None):
curr_sub_district[2] = curr_list[2]
if (i == len(raw_list) - 1):
sub_districts_list.append(curr_sub_district)
prev_sub_district = sub_districts_list[0]
if (prev_sub_district[1] == '' or prev_sub_district[1] == None) and (prev_sub_district[2] == '' or \
prev_sub_district[2] == None):
sub_districts_list.pop(0)
return sub_districts_list
def get_table_list(page_no):
currPage = pdfp_obj.pages[page_no]
bounding_box = (300, 0, currPage.width, currPage.height)
cropped_page = currPage.crop(bounding_box)
table_settings = {"snap_tolerance": 10, "join_tolerance": 15}
extracted_raw_list = cropped_page.extract_tables(table_settings)[0]
return extracted_raw_list
def get_all_sub_districts(page_start, page_end):
all_sub_districts = []
for i in range(page_start, page_end + 1):
all_sub_districts.extend(get_sub_districts_data(get_table_list(i)))
def remove_spaces(str_no):
if type(str_no)==str:
return str_no.replace(" ", "")
else:
return str_no
all_sub_districts = [[x[0], remove_spaces(x[1]), remove_spaces(x[2])] for x in all_sub_districts]
return all_sub_districts
all_sub_dists = get_all_sub_districts(district_pg+1, district_pg+4)
pdfp_obj.close()
def get_district_map():
jhb_dict = dict(zip(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'Unallocated'],
[[x[1], x[2]] for x in all_sub_dists[0:8]]))
tsh_keys = list(range(1, 8))
tsh_keys.append('Unallocated')
tsh_dict = dict(zip(tsh_keys, [[x[1], x[2]] for x in all_sub_dists[8:16]]))
eku_keys = "e1 e2 n1 n2 s1 s2 Unallocated".split(" ")
eku_dict = dict(zip(eku_keys, [[x[1], x[2]] for x in all_sub_dists[16:23]]))
sed_keys = "Lesedi Emfuleni Midvaal Unallocated".split(" ")
sed_dict = dict(zip(sed_keys, [[x[1], x[2]] for x in all_sub_dists[23:27]]))
wr_keys = "Mogale Rand_West Merafong Unallocated".split(" ")
wr_dict = dict(zip(wr_keys, [[x[1], x[2]] for x in all_sub_dists[27:31]]))
district_map = {
'Johannesburg': jhb_dict,
'Tshwane': tsh_dict,
'Ekurhuleni': eku_dict,
'Sedibeng': sed_dict,
'West Rand': wr_dict
}
return district_map
district_map = get_district_map()
curr_date = datetime.strptime(gp_covid_stats['date'], '%d %B %Y')
date_formatted = datetime.strftime(curr_date, '%d-%m-%Y')
date_yyyymmdd = datetime.strftime(curr_date, '%Y%m%d')
p_district_df.loc['Ekurhuleni']['DEATHS'],
gp_district_df.loc['Tshwane']['DEATHS'],
gp_district_df.loc['Sedibeng']['DEATHS'],
gp_district_df.loc['West Rand']['DEATHS'],
gp_district_df.loc['Johannesburg']['RECOVERIES'],
gp_district_df.loc['Ekurhuleni']['RECOVERIES'],
gp_district_df.loc['Tshwane']['RECOVERIES'],
gp_district_df.loc['Sedibeng']['RECOVERIES'],
gp_district_df.loc['West Rand']['RECOVERIES'], ' Check', ' Check'] + \
[district_map['Johannesburg'][x][0] for x in jhb_districts]+\
['Check']+\
[district_map['Johannesburg'][x][1] for x in jhb_districts]+\
['Check']+\
[district_map['Tshwane'][x][0] for x in tsh_districts]+\
['Check']+\
[district_map['Tshwane'][x][1] for x in tsh_districts]+\
['Check']+\
[district_map['Ekurhuleni'][x][0] for x in ['e1','e2','n1','n2','s1','s2','Unallocated']]+\
['Check']+\
[district_map['Ekurhuleni'][x][1] for x in ['e1','e2','n1','n2','s1','s2','Unallocated']]+\
['Check']+\
[district_map['Sedibeng'][x][0] for x in ['Lesedi','Emfuleni','Midvaal','Unallocated']]+\
['Check']+\
[district_map['Sedibeng'][x][1] for x in ['Lesedi','Emfuleni','Midvaal','Unallocated']]+\
['Check']+\
[district_map['West Rand'][x][0] for x in wr_districts]+\
[district_map['West Rand'][x][1] for x in wr_districts]+\
['Check']
def list_to_formatted(in_list, delimiter='\t'):
return delimiter.join(map(str, in_list))
out_str = list_to_formatted(out_list)
return out_str
if __name__ == "__main__":
print(extract_data(sys.argv[1]))
| true
| true
|
79031d0f20a9f82164968cb1d5d5621862da4894
| 384
|
py
|
Python
|
run_doctests.py
|
jtauber/functional-differential-geometry
|
c3c2eedb378a1610353d99e7f0063993520b8b47
|
[
"MIT"
] | 37
|
2015-02-06T11:06:42.000Z
|
2021-12-17T23:17:57.000Z
|
run_doctests.py
|
jtauber/functional-differential-geometry
|
c3c2eedb378a1610353d99e7f0063993520b8b47
|
[
"MIT"
] | null | null | null |
run_doctests.py
|
jtauber/functional-differential-geometry
|
c3c2eedb378a1610353d99e7f0063993520b8b47
|
[
"MIT"
] | 11
|
2015-02-08T03:22:29.000Z
|
2021-12-07T19:08:23.000Z
|
# this works around a path issue with just calling
# coverage run -m doctest -v <rst-file>
import doctest
import sys
fails = 0
for filename in [
"tuples.rst",
"functions.rst",
"symbolic.rst",
"simplification.rst",
"differentiation.rst",
"symbolic_tuples.rst",
]:
result = doctest.testfile(filename)
fails += result.failed
if fails:
sys.exit(1)
| 17.454545
| 50
| 0.661458
|
import doctest
import sys
fails = 0
for filename in [
"tuples.rst",
"functions.rst",
"symbolic.rst",
"simplification.rst",
"differentiation.rst",
"symbolic_tuples.rst",
]:
result = doctest.testfile(filename)
fails += result.failed
if fails:
sys.exit(1)
| true
| true
|
79031db7333e35f6f952417715791483e7bf8f10
| 23,884
|
py
|
Python
|
crispy/gui/quanty/calculation.py
|
jminar/crispy
|
560bb11ee1ed03c1151f16a15725390784a38c79
|
[
"MIT"
] | 1
|
2021-06-30T13:06:33.000Z
|
2021-06-30T13:06:33.000Z
|
crispy/gui/quanty/calculation.py
|
jminar/crispy
|
560bb11ee1ed03c1151f16a15725390784a38c79
|
[
"MIT"
] | null | null | null |
crispy/gui/quanty/calculation.py
|
jminar/crispy
|
560bb11ee1ed03c1151f16a15725390784a38c79
|
[
"MIT"
] | null | null | null |
# coding: utf-8
###################################################################
# Copyright (c) 2016-2020 European Synchrotron Radiation Facility #
# #
# Author: Marius Retegan #
# #
# This work is licensed under the terms of the MIT license. #
# For further information, see https://github.com/mretegan/crispy #
###################################################################
"""Classes used to setup Quanty calculations."""
import datetime
import glob
import logging
import os
import re
import subprocess
from functools import lru_cache
from PyQt5.QtCore import QProcess, Qt, pyqtSignal
from crispy import resourceAbsolutePath
from crispy.config import Config
from crispy.gui.items import BaseItem, DoubleItem, IntItem, SelectableItem
from crispy.gui.quanty.axes import Axes
from crispy.gui.quanty.hamiltonian import Hamiltonian
from crispy.gui.quanty.spectra import Spectra
from crispy.quanty import CALCULATIONS, XDB
logger = logging.getLogger(__name__)
settings = Config().read()
SUBSHELLS = {
"3d": {"atomicNumbers": (21, 30 + 1), "coreElectrons": 18},
"4d": {"atomicNumbers": (39, 48 + 1), "coreElectrons": 36},
"4f": {"atomicNumbers": (57, 71 + 1), "coreElectrons": 54},
"5d": {"atomicNumbers": (72, 80 + 1), "coreElectrons": 68},
"5f": {"atomicNumbers": (89, 103 + 1), "coreElectrons": 86},
}
OCCUPANCIES = {"s": 2, "p": 6, "d": 10, "f": 14}
class Element(BaseItem):
def __init__(self, parent=None, name="Element", value=None):
super().__init__(parent=parent, name=name)
self.symbol = None
self.charge = None
self.value = value
@property
def atomicNumber(self):
return XDB.atomic_number(self.symbol)
@property
def valenceSubshell(self):
"""Name of the valence subshell."""
for subshell, properties in SUBSHELLS.items():
if self.atomicNumber in range(*properties["atomicNumbers"]):
return subshell
return None
@property
def valenceBlock(self):
# pylint: disable=unsubscriptable-object
"""Name of the valence block."""
return self.valenceSubshell[-1]
@property
def valenceOccupancy(self):
"""Occupancy of the valence subshell."""
assert self.charge is not None, "The charge must be set."
# Reverse the string holding the charge before changing it to
# an integer.
charge = int(self.charge[::-1])
# Calculate the number of electrons of the ion.
ion_electrons = self.atomicNumber - charge
core_electorns = SUBSHELLS[self.valenceSubshell]["coreElectrons"]
occupancy = ion_electrons - core_electorns
return occupancy
@property
def value(self):
if self.charge is None:
return f"{self.symbol}"
return f"{self.symbol}{self.charge}"
@value.setter
def value(self, value):
if value is None:
return
tokens = re.findall(r"(\w{1,2})(\d[+,-])", value)
if not tokens:
raise ValueError(f"Invalid element {value}.")
[tokens] = tokens
self.symbol, self.charge = tokens
class Configuration:
# pylint: disable=too-many-instance-attributes
def __init__(self, value=None):
self.value = value
self.energy = None
self.atomic_parameters = None
@property
def value(self):
return self._value
@value.setter
def value(self, value):
PATTERNS = (r"^(\d)(\w)(\d+),(\d)(\w)(\d+)$", r"^(\d)(\w)(\d+)$")
# Test the configuration string.
tokens = (token for pattern in PATTERNS for token in re.findall(pattern, value))
if not tokens:
raise ValueError("Invalid configuration string.")
[tokens] = tokens
if len(tokens) == 3:
core = None
valence = tokens
elif len(tokens) == 6:
core = tokens[:3]
valence = tokens[-3:]
else:
raise ValueError("Unexpected length of the configuration string.")
valenceLevel, valenceShell, valenceOccupancy = valence
valenceLevel = int(valenceLevel)
valenceOccupancy = int(valenceOccupancy)
if valenceOccupancy > OCCUPANCIES[valenceShell]:
raise ValueError("Wrong number of electrons in the valence shell.")
if core:
coreLevel, coreShell, coreOccupancy = core
coreLevel = int(coreLevel)
coreOccupancy = int(coreOccupancy)
if coreOccupancy > OCCUPANCIES[coreShell]:
raise ValueError("Wrong number of electrons in the core shell.")
self.levels = (coreLevel, valenceLevel)
self.shells = (coreShell, valenceShell)
self.occupancies = [coreOccupancy, valenceOccupancy]
else:
self.levels = (valenceLevel,)
self.shells = (valenceShell,)
self.occupancies = [valenceOccupancy]
self.subshells = tuple(
[f"{level}{shell}" for level, shell in zip(self.levels, self.shells)]
)
self._value = value
@property
def hasCore(self):
return len(self.subshells) == 2
@staticmethod
def countParticles(shell, occupancy):
"""Count the number of particles (electrons) or quasiparticles
(holes) in a shell."""
key = f"{shell}{occupancy}"
if key in ("s0", "s2", "p0", "p6", "d0", "d10", "f0", "f14"):
particles = "zero"
elif key in ("s1", "p1", "p5", "d1", "d9", "f1", "f13"):
particles = "one"
else:
particles = "multiple"
return particles
@property
def numberOfCoreParticles(self):
"""Count the number of core particles. Returns None if the electronic
configuration has no core."""
if not self.hasCore:
return None
core_shell, _ = self.shells
core_occupancy, _ = self.occupancies
return self.countParticles(core_shell, core_occupancy)
@classmethod
def fromSubshellsAndOccupancies(cls, subshells, occupancies):
value = ",".join(
f"{subshell:s}{occupancy:d}"
for subshell, occupancy in zip(subshells, occupancies)
)
return cls(value=value)
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
return self.value < other.value
def __repr__(self):
return self.value
class Symmetry(BaseItem):
def __init__(self, parent=None, name="Symmetry", value=None):
super().__init__(parent=parent, name=name, value=value)
class Edge(BaseItem):
def __init__(self, parent=None, name="Edge", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def coreSubshells(self):
"""Use the name of the edge to determine the names of the core subshells.
e.g. for K (1s) the function returns ("1s",), while for K-L2,3 (1s2p) it
returns ("1s", "2p").
"""
PATTERNS = (r".*\((\d\w)(\d\w)\)", r".*\((\d\w)\)")
name = self.value
tokens = (token for pattern in PATTERNS for token in re.findall(pattern, name))
# Get the elements of the generator.
[tokens] = tokens
if not tokens:
raise ValueError("The name of the edge cannot be parsed.")
if isinstance(tokens, str):
tokens = (tokens,)
return tokens
@property
def coreBlocks(self):
return tuple(subshell[1] for subshell in self.coreSubshells)
@property
def coreOccupancies(self):
return tuple(OCCUPANCIES[coreBlock] for coreBlock in self.coreBlocks)
@property
def labels(self):
"""Edge or line labels needed to interrogate xraydb database."""
CONVERTERS = {
"Kɑ": "Ka1",
"Kβ": "Kb1",
"K": "K",
"L1": "L1",
"L2,3": "L3",
"M1": "M1",
"M2,3": "M3",
"M4,5": "M5",
"N1": "N1",
"N2,3": "N3",
"N4,5": "N5",
"O1": "O1",
"O2,3": "O3",
"O4,5": "O5",
}
raw, _ = self.value.split()
names = list()
separator = "-"
if separator in raw:
names.extend(raw.split(separator))
else:
names.append(raw)
# TODO: This needs to be put in a try/except block.
names = [CONVERTERS[name] for name in names]
return tuple(names)
class Experiment(BaseItem):
def __init__(self, parent=None, name="Experiment", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def isOneStep(self):
return self.value in ("XAS", "XPS")
@property
def isTwoSteps(self):
return not self.isOneStep
@property
def excitesToVacuum(self):
return self.value in ("XES", "XPS")
@property
def isOneDimensional(self):
return not self.isTwoDimensional
@property
def isTwoDimensional(self):
return self.value in ("RIXS",)
@property
def isEmission(self):
return self.value in ("XES",)
class Temperature(IntItem):
def __init__(self, parent=None, name="Temperature", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value < 0:
raise ValueError("The temperature cannot be negative.")
self._value = value
class MagneticField(DoubleItem):
def __init__(self, parent=None, name="Magnetic Field", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
# Set the values in the magnetic field Hamiltonian term.
calculation = self.ancestor
hamiltonian = calculation.hamiltonian
# Use the normalized vector.
k = calculation.axes.xaxis.photon.k.normalized
TESLA_TO_EV = 5.7883818011084e-05
for i, name in enumerate(("Bx", "By", "Bz")):
# Get the values of the wave vector.
for item in hamiltonian.findChild(name):
item.value = k[i] * value * TESLA_TO_EV
class Runner(QProcess):
outputUpdated = pyqtSignal(str)
successful = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
# Merge stdout and stderr channels.
self.setProcessChannelMode(QProcess.MergedChannels)
self.startingTime = None
self.endingTime = None
self.readyRead.connect(self.updateOutput)
self.finished.connect(self.checkExitCodes)
self.output = str()
def run(self, inputName):
self.startingTime = datetime.datetime.now()
# Run Quanty using QProcess.
try:
self.start(self.executablePath, (inputName,))
except FileNotFoundError as error:
raise RuntimeError from error
cwd = os.getcwd()
message = f"Running Quanty {inputName} in the folder {cwd}."
logger.info(message)
def checkExitCodes(self, exitCode, exitStatus):
self.endingTime = datetime.datetime.now()
successful = False
if exitStatus == 0 and exitCode == 0:
message = "Quanty has finished successfully in "
delta = self.runningTime
hours, reminder = divmod(delta, 3600)
minutes, seconds = divmod(reminder, 60)
seconds = round(seconds, 2)
if hours > 0:
message += "{} hours {} minutes and {} seconds.".format(
hours, minutes, seconds
)
elif minutes > 0:
message += "{} minutes and {} seconds.".format(minutes, seconds)
else:
message += "{} seconds.".format(seconds)
logger.info(message)
successful = True
elif exitStatus == 0 and exitCode == 1:
message = (
"Quanty has finished unsuccessfully. "
"Check the logging window for more details."
)
logger.info(message)
# exitCode is platform dependent; exitStatus is always 1.
elif exitStatus == 1:
message = "Quanty was stopped."
logger.info(message)
self.successful.emit(successful)
def updateOutput(self):
data = self.readAll().data()
data = data.decode("utf-8").rstrip()
self.output = self.output + data
self.outputUpdated.emit(data)
@property
def runningTime(self):
return (self.endingTime - self.startingTime).total_seconds()
@property
def executablePath(self):
path = Config().read().value("Quanty/Path")
if path is None:
message = (
"The path to the Quanty executable is not set. "
"Please use the preferences menu to set it."
)
raise FileNotFoundError(message)
# Test the executable.
with open(os.devnull, "w") as fp:
try:
subprocess.call(path, stdout=fp, stderr=fp)
except FileNotFoundError as e:
message = (
"The Quanty executable is not working properly. "
"Is the PATH set correctly?"
)
logger.error(message)
raise e
return path
class Calculation(SelectableItem):
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-public-methods
titleChanged = pyqtSignal(str)
def __init__(
self,
symbol="Ni",
charge="2+",
symmetry="Oh",
experiment="XAS",
edge="L2,3 (2p)",
hamiltonian=True,
parent=None,
):
super().__init__(parent=parent, name="Calculation")
# Set the very special ancestor, in this case self.
self._ancestor = self
# Validate the keyword arguments. This is best done this way; using properties
# it gets rather convoluted.
self._symbols = list()
for subshell in CALCULATIONS.keys():
self._symbols.extend(CALCULATIONS[subshell]["symbols"])
self._symbols = tuple(sorted(self._symbols))
if symbol not in self.symbols:
symbol = self._symbols[0]
# Get the subshell.
subshell = None
for subshell in CALCULATIONS.keys():
if symbol in CALCULATIONS[subshell]["symbols"]:
break
symbols = CALCULATIONS[subshell]["symbols"]
experiments = CALCULATIONS[subshell]["experiments"]
self._charges = tuple(symbols[symbol]["charges"])
if charge not in self._charges:
charge = self._charges[0]
self._experiments = tuple(experiments)
if experiment not in self._experiments:
experiment = self._experiments[0]
self._symmetries = tuple(experiments[experiment]["symmetries"])
if symmetry not in self._symmetries:
symmetry = self._symmetries[0]
self._edges = tuple(experiments[experiment]["edges"])
if edge not in self._edges:
edge = self._edges[0]
self.element = Element(parent=self, value=f"{symbol}{charge}")
self.symmetry = Symmetry(parent=self, value=symmetry)
self.experiment = Experiment(parent=self, value=experiment)
self.edge = Edge(parent=self, value=edge)
self.temperature = Temperature(parent=self, value=10)
self.magneticField = MagneticField(parent=self, value=0)
self.axes = Axes(parent=self)
self.spectra = Spectra(parent=self)
# This flag is needed because the class is also used to generate Hamiltonian
# parameters, which are needed to create the Hamiltonian object in the
# first place. A bit of chicken and egg problem.
if hamiltonian:
self.hamiltonian = Hamiltonian(parent=self)
# Set the name of the calculation.
subshells = "".join(self.edge.coreSubshells)
element = self.element.value
symmetry = self.symmetry.value
experiment = self.experiment.value
self._value = f"{element}_{symmetry}_{experiment}_{subshells}"
# Instantiate the runner used to execute Quanty.
self.runner = Runner()
self.runner.successful.connect(self.process)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
self.dataChanged.emit(0)
self.titleChanged.emit(value)
def data(self, column, role=Qt.DisplayRole):
if role in (Qt.EditRole, Qt.DisplayRole, Qt.UserRole):
column = 0 if column == 1 else 1
return super().data(column, role)
def setData(self, column, value, role=Qt.EditRole):
if role in (Qt.EditRole, Qt.UserRole):
column = 0 if column == 1 else 1
return super().setData(column, value, role)
def flags(self, column):
return (
Qt.ItemIsEnabled
| Qt.ItemIsSelectable
| Qt.ItemIsEditable
| Qt.ItemIsUserCheckable
)
@property
def symbols(self):
return self._symbols
@property
def charges(self):
return self._charges
@property
def symmetries(self):
return self._symmetries
@property
def experiments(self):
return self._experiments
@property
def edges(self):
return self._edges
@property
def templateName(self):
valenceSubshell = self.element.valenceSubshell
symmetry = self.symmetry.value
experiment = self.experiment.value
subshells = "".join(self.edge.coreSubshells)
return f"{valenceSubshell}_{symmetry}_{experiment}_{subshells}.lua"
@property
@lru_cache()
def configurations(self):
"""Determine the electronic configurations involved in a calculation."""
valenceSubshell = self.element.valenceSubshell
valenceOccupancy = self.element.valenceOccupancy
configurations = list()
# Initial configuration.
initialConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(valenceSubshell,), occupancies=(valenceOccupancy,)
)
configurations.append(initialConfiguration)
# Final and in some cases intermediate configurations.
if self.experiment.isOneStep:
if not self.experiment.excitesToVacuum:
valenceOccupancy += 1
(coreSubshell,) = self.edge.coreSubshells
(coreOccupancy,) = self.edge.coreOccupancies
coreOccupancy -= 1
finalConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(coreSubshell, valenceSubshell),
occupancies=(coreOccupancy, valenceOccupancy),
)
configurations.append(finalConfiguration)
else:
if not self.experiment.excitesToVacuum:
valenceOccupancy += 1
core1Subshell, core2Subshell = self.edge.coreSubshells
core1Occupancy, core2Occupancy = self.edge.coreOccupancies
core1Occupancy -= 1
core2Occupancy -= 1
intermediateConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(core1Subshell, valenceSubshell),
occupancies=(core1Occupancy, valenceOccupancy),
)
configurations.append(intermediateConfiguration)
if core2Subshell == valenceSubshell:
finalConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(valenceSubshell,),
occupancies=(valenceOccupancy - 1,),
)
else:
finalConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(core2Subshell, valenceSubshell),
occupancies=(core2Occupancy, valenceOccupancy),
)
configurations.append(finalConfiguration)
return configurations
@property
def replacements(self):
"""Replacements dictionary used to fill the calculation template. The
construction of more complex items is delegated to the respective object.
"""
replacements = dict()
# Values defined in another places.
replacements["Verbosity"] = settings.value("Quanty/Verbosity")
replacements["DenseBorder"] = settings.value("Quanty/DenseBorder")
replacements["ShiftToZero"] = settings.value("Quanty/ShiftSpectra")
subshell = self.element.valenceSubshell
occupancy = self.element.valenceOccupancy
replacements[f"NElectrons_{subshell}"] = occupancy
replacements["Temperature"] = self.temperature.value
replacements["Prefix"] = self.value
replacements.update(self.axes.xaxis.replacements)
if self.experiment.isTwoDimensional:
replacements.update(self.axes.yaxis.replacements)
replacements.update(self.spectra.replacements)
replacements.update(self.hamiltonian.replacements)
return replacements
@property
def input(self):
path = resourceAbsolutePath(
os.path.join("quanty", "templates", f"{self.templateName}")
)
try:
with open(path) as fp:
template = fp.read()
except FileNotFoundError as e:
message = f"Could not find the template file {self.templateName}."
logger.error(message)
raise e
for pattern, replacement in self.replacements.items():
# True/False in Lua are lowercase.
if isinstance(replacement, bool):
replacement = str(replacement).lower()
else:
replacement = str(replacement)
template = template.replace(f"${pattern}", str(replacement))
return template
@property
def inputName(self):
return f"{self.value}.lua"
@property
def output(self):
return self.runner.output
# @property
# def summary(self):
# return f"Summary for {self.value}"
def saveInput(self):
# TODO: Is this too hidden?
os.chdir(settings.value("CurrentPath"))
with open(self.inputName, "w") as fp:
fp.write(self.input)
def run(self):
# Don't crash if something went wrong when saving the input file.
try:
self.saveInput()
except FileNotFoundError:
return
self.runner.run(self.inputName)
def process(self, successful):
if not successful:
return
# TODO: Check if loading the spectra was successful.
self.spectra.load()
def stop(self):
self.runner.kill()
def clean(self):
os.remove(f"{self.value}.lua")
# Remove the spectra.
for spectrum in glob.glob(f"{self.value}*.spec"):
os.remove(spectrum)
def copyFrom(self, item):
super().copyFrom(item)
self.temperature.copyFrom(item.temperature)
self.magneticField.copyFrom(item.magneticField)
self.axes.copyFrom(item.axes)
self.spectra.copyFrom(item.spectra)
self.hamiltonian.copyFrom(item.hamiltonian)
def main():
pass
if __name__ == "__main__":
main()
| 31.88785
| 95
| 0.59521
|
occupancy):
key = f"{shell}{occupancy}"
if key in ("s0", "s2", "p0", "p6", "d0", "d10", "f0", "f14"):
particles = "zero"
elif key in ("s1", "p1", "p5", "d1", "d9", "f1", "f13"):
particles = "one"
else:
particles = "multiple"
return particles
@property
def numberOfCoreParticles(self):
if not self.hasCore:
return None
core_shell, _ = self.shells
core_occupancy, _ = self.occupancies
return self.countParticles(core_shell, core_occupancy)
@classmethod
def fromSubshellsAndOccupancies(cls, subshells, occupancies):
value = ",".join(
f"{subshell:s}{occupancy:d}"
for subshell, occupancy in zip(subshells, occupancies)
)
return cls(value=value)
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
return self.value < other.value
def __repr__(self):
return self.value
class Symmetry(BaseItem):
def __init__(self, parent=None, name="Symmetry", value=None):
super().__init__(parent=parent, name=name, value=value)
class Edge(BaseItem):
def __init__(self, parent=None, name="Edge", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def coreSubshells(self):
PATTERNS = (r".*\((\d\w)(\d\w)\)", r".*\((\d\w)\)")
name = self.value
tokens = (token for pattern in PATTERNS for token in re.findall(pattern, name))
[tokens] = tokens
if not tokens:
raise ValueError("The name of the edge cannot be parsed.")
if isinstance(tokens, str):
tokens = (tokens,)
return tokens
@property
def coreBlocks(self):
return tuple(subshell[1] for subshell in self.coreSubshells)
@property
def coreOccupancies(self):
return tuple(OCCUPANCIES[coreBlock] for coreBlock in self.coreBlocks)
@property
def labels(self):
CONVERTERS = {
"Kɑ": "Ka1",
"Kβ": "Kb1",
"K": "K",
"L1": "L1",
"L2,3": "L3",
"M1": "M1",
"M2,3": "M3",
"M4,5": "M5",
"N1": "N1",
"N2,3": "N3",
"N4,5": "N5",
"O1": "O1",
"O2,3": "O3",
"O4,5": "O5",
}
raw, _ = self.value.split()
names = list()
separator = "-"
if separator in raw:
names.extend(raw.split(separator))
else:
names.append(raw)
names = [CONVERTERS[name] for name in names]
return tuple(names)
class Experiment(BaseItem):
def __init__(self, parent=None, name="Experiment", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def isOneStep(self):
return self.value in ("XAS", "XPS")
@property
def isTwoSteps(self):
return not self.isOneStep
@property
def excitesToVacuum(self):
return self.value in ("XES", "XPS")
@property
def isOneDimensional(self):
return not self.isTwoDimensional
@property
def isTwoDimensional(self):
return self.value in ("RIXS",)
@property
def isEmission(self):
return self.value in ("XES",)
class Temperature(IntItem):
def __init__(self, parent=None, name="Temperature", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value < 0:
raise ValueError("The temperature cannot be negative.")
self._value = value
class MagneticField(DoubleItem):
def __init__(self, parent=None, name="Magnetic Field", value=None):
super().__init__(parent=parent, name=name, value=value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
calculation = self.ancestor
hamiltonian = calculation.hamiltonian
k = calculation.axes.xaxis.photon.k.normalized
TESLA_TO_EV = 5.7883818011084e-05
for i, name in enumerate(("Bx", "By", "Bz")):
for item in hamiltonian.findChild(name):
item.value = k[i] * value * TESLA_TO_EV
class Runner(QProcess):
outputUpdated = pyqtSignal(str)
successful = pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setProcessChannelMode(QProcess.MergedChannels)
self.startingTime = None
self.endingTime = None
self.readyRead.connect(self.updateOutput)
self.finished.connect(self.checkExitCodes)
self.output = str()
def run(self, inputName):
self.startingTime = datetime.datetime.now()
try:
self.start(self.executablePath, (inputName,))
except FileNotFoundError as error:
raise RuntimeError from error
cwd = os.getcwd()
message = f"Running Quanty {inputName} in the folder {cwd}."
logger.info(message)
def checkExitCodes(self, exitCode, exitStatus):
self.endingTime = datetime.datetime.now()
successful = False
if exitStatus == 0 and exitCode == 0:
message = "Quanty has finished successfully in "
delta = self.runningTime
hours, reminder = divmod(delta, 3600)
minutes, seconds = divmod(reminder, 60)
seconds = round(seconds, 2)
if hours > 0:
message += "{} hours {} minutes and {} seconds.".format(
hours, minutes, seconds
)
elif minutes > 0:
message += "{} minutes and {} seconds.".format(minutes, seconds)
else:
message += "{} seconds.".format(seconds)
logger.info(message)
successful = True
elif exitStatus == 0 and exitCode == 1:
message = (
"Quanty has finished unsuccessfully. "
"Check the logging window for more details."
)
logger.info(message)
elif exitStatus == 1:
message = "Quanty was stopped."
logger.info(message)
self.successful.emit(successful)
def updateOutput(self):
data = self.readAll().data()
data = data.decode("utf-8").rstrip()
self.output = self.output + data
self.outputUpdated.emit(data)
@property
def runningTime(self):
return (self.endingTime - self.startingTime).total_seconds()
@property
def executablePath(self):
path = Config().read().value("Quanty/Path")
if path is None:
message = (
"The path to the Quanty executable is not set. "
"Please use the preferences menu to set it."
)
raise FileNotFoundError(message)
with open(os.devnull, "w") as fp:
try:
subprocess.call(path, stdout=fp, stderr=fp)
except FileNotFoundError as e:
message = (
"The Quanty executable is not working properly. "
"Is the PATH set correctly?"
)
logger.error(message)
raise e
return path
class Calculation(SelectableItem):
titleChanged = pyqtSignal(str)
def __init__(
self,
symbol="Ni",
charge="2+",
symmetry="Oh",
experiment="XAS",
edge="L2,3 (2p)",
hamiltonian=True,
parent=None,
):
super().__init__(parent=parent, name="Calculation")
self._ancestor = self
self._symbols = list()
for subshell in CALCULATIONS.keys():
self._symbols.extend(CALCULATIONS[subshell]["symbols"])
self._symbols = tuple(sorted(self._symbols))
if symbol not in self.symbols:
symbol = self._symbols[0]
subshell = None
for subshell in CALCULATIONS.keys():
if symbol in CALCULATIONS[subshell]["symbols"]:
break
symbols = CALCULATIONS[subshell]["symbols"]
experiments = CALCULATIONS[subshell]["experiments"]
self._charges = tuple(symbols[symbol]["charges"])
if charge not in self._charges:
charge = self._charges[0]
self._experiments = tuple(experiments)
if experiment not in self._experiments:
experiment = self._experiments[0]
self._symmetries = tuple(experiments[experiment]["symmetries"])
if symmetry not in self._symmetries:
symmetry = self._symmetries[0]
self._edges = tuple(experiments[experiment]["edges"])
if edge not in self._edges:
edge = self._edges[0]
self.element = Element(parent=self, value=f"{symbol}{charge}")
self.symmetry = Symmetry(parent=self, value=symmetry)
self.experiment = Experiment(parent=self, value=experiment)
self.edge = Edge(parent=self, value=edge)
self.temperature = Temperature(parent=self, value=10)
self.magneticField = MagneticField(parent=self, value=0)
self.axes = Axes(parent=self)
self.spectra = Spectra(parent=self)
if hamiltonian:
self.hamiltonian = Hamiltonian(parent=self)
subshells = "".join(self.edge.coreSubshells)
element = self.element.value
symmetry = self.symmetry.value
experiment = self.experiment.value
self._value = f"{element}_{symmetry}_{experiment}_{subshells}"
self.runner = Runner()
self.runner.successful.connect(self.process)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
self.dataChanged.emit(0)
self.titleChanged.emit(value)
def data(self, column, role=Qt.DisplayRole):
if role in (Qt.EditRole, Qt.DisplayRole, Qt.UserRole):
column = 0 if column == 1 else 1
return super().data(column, role)
def setData(self, column, value, role=Qt.EditRole):
if role in (Qt.EditRole, Qt.UserRole):
column = 0 if column == 1 else 1
return super().setData(column, value, role)
def flags(self, column):
return (
Qt.ItemIsEnabled
| Qt.ItemIsSelectable
| Qt.ItemIsEditable
| Qt.ItemIsUserCheckable
)
@property
def symbols(self):
return self._symbols
@property
def charges(self):
return self._charges
@property
def symmetries(self):
return self._symmetries
@property
def experiments(self):
return self._experiments
@property
def edges(self):
return self._edges
@property
def templateName(self):
valenceSubshell = self.element.valenceSubshell
symmetry = self.symmetry.value
experiment = self.experiment.value
subshells = "".join(self.edge.coreSubshells)
return f"{valenceSubshell}_{symmetry}_{experiment}_{subshells}.lua"
@property
@lru_cache()
def configurations(self):
valenceSubshell = self.element.valenceSubshell
valenceOccupancy = self.element.valenceOccupancy
configurations = list()
initialConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(valenceSubshell,), occupancies=(valenceOccupancy,)
)
configurations.append(initialConfiguration)
if self.experiment.isOneStep:
if not self.experiment.excitesToVacuum:
valenceOccupancy += 1
(coreSubshell,) = self.edge.coreSubshells
(coreOccupancy,) = self.edge.coreOccupancies
coreOccupancy -= 1
finalConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(coreSubshell, valenceSubshell),
occupancies=(coreOccupancy, valenceOccupancy),
)
configurations.append(finalConfiguration)
else:
if not self.experiment.excitesToVacuum:
valenceOccupancy += 1
core1Subshell, core2Subshell = self.edge.coreSubshells
core1Occupancy, core2Occupancy = self.edge.coreOccupancies
core1Occupancy -= 1
core2Occupancy -= 1
intermediateConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(core1Subshell, valenceSubshell),
occupancies=(core1Occupancy, valenceOccupancy),
)
configurations.append(intermediateConfiguration)
if core2Subshell == valenceSubshell:
finalConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(valenceSubshell,),
occupancies=(valenceOccupancy - 1,),
)
else:
finalConfiguration = Configuration.fromSubshellsAndOccupancies(
subshells=(core2Subshell, valenceSubshell),
occupancies=(core2Occupancy, valenceOccupancy),
)
configurations.append(finalConfiguration)
return configurations
@property
def replacements(self):
replacements = dict()
replacements["Verbosity"] = settings.value("Quanty/Verbosity")
replacements["DenseBorder"] = settings.value("Quanty/DenseBorder")
replacements["ShiftToZero"] = settings.value("Quanty/ShiftSpectra")
subshell = self.element.valenceSubshell
occupancy = self.element.valenceOccupancy
replacements[f"NElectrons_{subshell}"] = occupancy
replacements["Temperature"] = self.temperature.value
replacements["Prefix"] = self.value
replacements.update(self.axes.xaxis.replacements)
if self.experiment.isTwoDimensional:
replacements.update(self.axes.yaxis.replacements)
replacements.update(self.spectra.replacements)
replacements.update(self.hamiltonian.replacements)
return replacements
@property
def input(self):
path = resourceAbsolutePath(
os.path.join("quanty", "templates", f"{self.templateName}")
)
try:
with open(path) as fp:
template = fp.read()
except FileNotFoundError as e:
message = f"Could not find the template file {self.templateName}."
logger.error(message)
raise e
for pattern, replacement in self.replacements.items():
if isinstance(replacement, bool):
replacement = str(replacement).lower()
else:
replacement = str(replacement)
template = template.replace(f"${pattern}", str(replacement))
return template
@property
def inputName(self):
return f"{self.value}.lua"
@property
def output(self):
return self.runner.output
def saveInput(self):
os.chdir(settings.value("CurrentPath"))
with open(self.inputName, "w") as fp:
fp.write(self.input)
def run(self):
try:
self.saveInput()
except FileNotFoundError:
return
self.runner.run(self.inputName)
def process(self, successful):
if not successful:
return
# TODO: Check if loading the spectra was successful.
self.spectra.load()
def stop(self):
self.runner.kill()
def clean(self):
os.remove(f"{self.value}.lua")
# Remove the spectra.
for spectrum in glob.glob(f"{self.value}*.spec"):
os.remove(spectrum)
def copyFrom(self, item):
super().copyFrom(item)
self.temperature.copyFrom(item.temperature)
self.magneticField.copyFrom(item.magneticField)
self.axes.copyFrom(item.axes)
self.spectra.copyFrom(item.spectra)
self.hamiltonian.copyFrom(item.hamiltonian)
def main():
pass
if __name__ == "__main__":
main()
| true
| true
|
79031e515b9c6be6ec22ecba7a1159a1d4f7ac99
| 849
|
py
|
Python
|
mainsite/models.py
|
nandosarracino/mymainsite
|
1dbb215e6ec14608e39a9bb913aad35bd97d3429
|
[
"MIT"
] | null | null | null |
mainsite/models.py
|
nandosarracino/mymainsite
|
1dbb215e6ec14608e39a9bb913aad35bd97d3429
|
[
"MIT"
] | null | null | null |
mainsite/models.py
|
nandosarracino/mymainsite
|
1dbb215e6ec14608e39a9bb913aad35bd97d3429
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class BaseView(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port1View(models.Model):
def __unicode__(self):
return self.title
class port2View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port3View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port4View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port5View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port6View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
| 18.456522
| 41
| 0.762073
|
from django.db import models
class BaseView(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port1View(models.Model):
def __unicode__(self):
return self.title
class port2View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port3View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port4View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port5View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
class port6View(models.Model):
title = models.CharField(max_length=256)
def __unicode__(self):
return self.title
| true
| true
|
79031ee1f9606392eae10f9c26d2dd5b64be72c6
| 41
|
py
|
Python
|
sfcsmCtrl/model/__init__.py
|
chenhui0228/sfcsm
|
ef9adbc7d2ec8d97cee053678002b65ca41b804b
|
[
"Apache-2.0"
] | 1
|
2018-06-04T06:26:27.000Z
|
2018-06-04T06:26:27.000Z
|
sfcsmCtrl/model/__init__.py
|
chenhui0228/sfcsm
|
ef9adbc7d2ec8d97cee053678002b65ca41b804b
|
[
"Apache-2.0"
] | null | null | null |
sfcsmCtrl/model/__init__.py
|
chenhui0228/sfcsm
|
ef9adbc7d2ec8d97cee053678002b65ca41b804b
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'Alexis.Koalla@orange.com'
| 20.5
| 40
| 0.756098
|
__author__ = 'Alexis.Koalla@orange.com'
| true
| true
|
79031f24504ec4a3d2d8947f9b101ed9a10896e0
| 7,915
|
py
|
Python
|
benchmark/btb_benchmark/kubernetes.py
|
dataronio/BTB
|
5053ed705cf2542e320e8a7605642f5b01db8272
|
[
"MIT"
] | 161
|
2017-12-20T00:17:35.000Z
|
2020-11-25T18:18:15.000Z
|
benchmark/btb_benchmark/kubernetes.py
|
pvk-developer/BTB
|
49d2f3c00881919a23c6578cd02fcfb6f4d33354
|
[
"MIT"
] | 162
|
2017-12-26T18:44:38.000Z
|
2020-11-19T15:53:03.000Z
|
benchmark/btb_benchmark/kubernetes.py
|
pvk-developer/BTB
|
49d2f3c00881919a23c6578cd02fcfb6f4d33354
|
[
"MIT"
] | 37
|
2018-01-03T09:28:08.000Z
|
2020-09-23T10:23:46.000Z
|
# -*- coding: utf-8 -*-
import argparse
import importlib
import json
import logging
import os
import re
import sys
from io import StringIO
import boto3
import tabulate
import yaml
from dask.distributed import Client
from dask_kubernetes import KubeCluster
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.config import load_kube_config
RUN_TEMPLATE = """
/bin/bash <<'EOF'
{}
EOF
"""
CONFIG_TEMPLATE = """
cat > config.json << JSON
{}
JSON
"""
WORKER_COMM = '/usr/bin/prepare.sh dask-worker --no-dashboard --memory-limit 0 --death-timeout 0'
def _import_function(config):
function = config['function']
function = function.split('.')
function_name = function[-1]
package = '.'.join(function[:-1])
module = importlib.import_module(package)
return getattr(module, function_name)
def _get_extra_setup(setup_dict):
extra_packages = []
script = setup_dict.get('script')
if script:
extra_packages.append('exec {}'.format(script))
apt_packages = setup_dict.get('apt_packages')
if apt_packages:
extra_packages.append('apt get install {}'.format(' '.join(apt_packages)))
pip_packages = setup_dict.get('pip_packages')
if pip_packages:
extra_packages.append('pip install {}'.format(' '.join(pip_packages)))
git_repository = setup_dict.get('git_repository')
if git_repository:
url = git_repository.get('url')
reference = git_repository.get('reference', 'master')
install = git_repository.get('install')
git_clone = 'git clone {} repo && cd repo'.format(url)
git_checkout = 'git checkout {}'.format(reference)
extra_packages.append('\n '.join([git_clone, git_checkout, install]))
if len(extra_packages) > 1:
return '\n '.join(extra_packages)
return extra_packages[0]
def _generate_cluster_spec(config, kubernetes=False):
extra_setup = ''
dask_cluster = config['dask_cluster']
metadata = {}
worker_config = dask_cluster.get('worker_config')
if worker_config.get('setup'):
extra_setup = _get_extra_setup(worker_config['setup'])
if kubernetes:
name = worker_config.get('image', 'daskdev/dask:latest')
name = '{}-'.format(re.sub(r'[\W_]', '-', name))
metadata['generateName'] = name
config_command = CONFIG_TEMPLATE.format(json.dumps(config))
run_command = 'python -u -m btb_benchmark.kubernetes config.json'
extra_setup = '\n'.join([extra_setup, config_command, run_command])
else:
run_command = WORKER_COMM
extra_setup = '\n'.join([extra_setup, run_command])
run_commands = RUN_TEMPLATE.format(extra_setup)
spec = {
'metadata': metadata,
'spec': {
'restartPolicy': 'Never',
'containers': [{
'args': ['-c', run_commands],
'command': ['tini', '-g', '--', '/bin/sh'],
'image': worker_config.get('image', 'daskdev/dask:latest'),
'name': 'dask-worker',
'resources': worker_config.get('resources', {})
}]
}
}
return spec
def _df_to_csv_str(df):
with StringIO() as sio:
df.to_csv(sio)
return sio.getvalue()
def _upload_to_s3(bucket, path, results, aws_key=None, aws_secret=None):
client = boto3.client('s3', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
client.put_object(Bucket=bucket, Key=path, Body=_df_to_csv_str(results))
def run_dask_function(config):
"""Start a Dask Cluster using dask-kubernetes and run a function.
Talks to kubernetes to create `n` amount of new `pods` with a dask worker inside of each
forming a `dask` cluster. Then, a function specified from `config` is being imported and
run with the given arguments. The tasks created by this `function` are being run on the
`dask` cluster for distributed computation.
The config dict must contain the following sections:
* run
* dask_cluster
* output
Args:
config (dict):
Config dictionary.
"""
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if not path:
raise ValueError('An output path must be provided when providing `output`.')
cluster_spec = _generate_cluster_spec(config, kubernetes=False)
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if not workers:
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
run = _import_function(config['run'])
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
_upload_to_s3(bucket, path, results, aws_key, aws_secret)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results
def run_on_kubernetes(config, namespace='default'):
"""Run dask function inside a pod using the given config.
Create a pod, using the local kubernetes configuration that starts a Dask Cluster
using dask-kubernetes and runs a function specified within the `config` dictionary.
Args:
config (dict):
Config dictionary.
namespace (str):
Kubernetes namespace were the pod will be created.
"""
# read local config
load_kube_config()
c = Configuration()
Configuration.set_default(c)
# create client and create pod on default namespace
core_v1 = core_v1_api.CoreV1Api()
spec = _generate_cluster_spec(config, kubernetes=True)
core_v1.create_namespaced_pod(body=spec, namespace=namespace)
print('Pod created.')
def _get_parser():
parser = argparse.ArgumentParser(description='Run on Kubernetes Command Line Interface')
parser.add_argument('config', help='Path to the JSON config file.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Be verbose. Use -vv for increased verbosity.')
parser.add_argument('--create-pod', action='store_true',
help='Create a master pod and run the given `config` from there.')
parser.add_argument('-n', '--namespace', default='default',
help='Namespace were the pod will be created.')
return parser
def main():
# Parse args
parser = _get_parser()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
# Logger setup
log_level = (3 - args.verbose) * 10
fmt = '%(asctime)s - %(process)d - %(levelname)s - %(name)s - %(module)s - %(message)s'
logging.basicConfig(level=log_level, format=fmt)
with open(args.config) as config_file:
if args.config.endswith('yaml') or args.config.endswith('yml'):
config = yaml.safe_load(config_file)
else:
config = json.load(config_file)
if args.create_pod:
run_on_kubernetes(config, args.namespace)
else:
results = run_dask_function(config)
if results is not None:
print(tabulate.tabulate(
results,
tablefmt='github',
headers=results.columns
))
if __name__ == '__main__':
main()
| 29.314815
| 97
| 0.637271
|
import argparse
import importlib
import json
import logging
import os
import re
import sys
from io import StringIO
import boto3
import tabulate
import yaml
from dask.distributed import Client
from dask_kubernetes import KubeCluster
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.config import load_kube_config
RUN_TEMPLATE = """
/bin/bash <<'EOF'
{}
EOF
"""
CONFIG_TEMPLATE = """
cat > config.json << JSON
{}
JSON
"""
WORKER_COMM = '/usr/bin/prepare.sh dask-worker --no-dashboard --memory-limit 0 --death-timeout 0'
def _import_function(config):
function = config['function']
function = function.split('.')
function_name = function[-1]
package = '.'.join(function[:-1])
module = importlib.import_module(package)
return getattr(module, function_name)
def _get_extra_setup(setup_dict):
extra_packages = []
script = setup_dict.get('script')
if script:
extra_packages.append('exec {}'.format(script))
apt_packages = setup_dict.get('apt_packages')
if apt_packages:
extra_packages.append('apt get install {}'.format(' '.join(apt_packages)))
pip_packages = setup_dict.get('pip_packages')
if pip_packages:
extra_packages.append('pip install {}'.format(' '.join(pip_packages)))
git_repository = setup_dict.get('git_repository')
if git_repository:
url = git_repository.get('url')
reference = git_repository.get('reference', 'master')
install = git_repository.get('install')
git_clone = 'git clone {} repo && cd repo'.format(url)
git_checkout = 'git checkout {}'.format(reference)
extra_packages.append('\n '.join([git_clone, git_checkout, install]))
if len(extra_packages) > 1:
return '\n '.join(extra_packages)
return extra_packages[0]
def _generate_cluster_spec(config, kubernetes=False):
extra_setup = ''
dask_cluster = config['dask_cluster']
metadata = {}
worker_config = dask_cluster.get('worker_config')
if worker_config.get('setup'):
extra_setup = _get_extra_setup(worker_config['setup'])
if kubernetes:
name = worker_config.get('image', 'daskdev/dask:latest')
name = '{}-'.format(re.sub(r'[\W_]', '-', name))
metadata['generateName'] = name
config_command = CONFIG_TEMPLATE.format(json.dumps(config))
run_command = 'python -u -m btb_benchmark.kubernetes config.json'
extra_setup = '\n'.join([extra_setup, config_command, run_command])
else:
run_command = WORKER_COMM
extra_setup = '\n'.join([extra_setup, run_command])
run_commands = RUN_TEMPLATE.format(extra_setup)
spec = {
'metadata': metadata,
'spec': {
'restartPolicy': 'Never',
'containers': [{
'args': ['-c', run_commands],
'command': ['tini', '-g', '--', '/bin/sh'],
'image': worker_config.get('image', 'daskdev/dask:latest'),
'name': 'dask-worker',
'resources': worker_config.get('resources', {})
}]
}
}
return spec
def _df_to_csv_str(df):
with StringIO() as sio:
df.to_csv(sio)
return sio.getvalue()
def _upload_to_s3(bucket, path, results, aws_key=None, aws_secret=None):
client = boto3.client('s3', aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
client.put_object(Bucket=bucket, Key=path, Body=_df_to_csv_str(results))
def run_dask_function(config):
output_conf = config.get('output')
if output_conf:
path = output_conf.get('path')
if not path:
raise ValueError('An output path must be provided when providing `output`.')
cluster_spec = _generate_cluster_spec(config, kubernetes=False)
cluster = KubeCluster.from_dict(cluster_spec)
workers = config['dask_cluster'].get('workers')
if not workers:
cluster.adapt()
elif isinstance(workers, int):
cluster.scale(workers)
else:
cluster.adapt(**workers)
client = Client(cluster)
client.get_versions(check=True)
try:
run = _import_function(config['run'])
kwargs = config['run']['args']
results = run(**kwargs)
finally:
client.close()
cluster.close()
if output_conf:
bucket = output_conf.get('bucket')
try:
if bucket:
aws_key = output_conf.get('key')
aws_secret = output_conf.get('secret_key')
_upload_to_s3(bucket, path, results, aws_key, aws_secret)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
results.to_csv(path)
except Exception:
print('Error storing results. Falling back to console dump.')
print(_df_to_csv_str(results))
else:
return results
def run_on_kubernetes(config, namespace='default'):
load_kube_config()
c = Configuration()
Configuration.set_default(c)
core_v1 = core_v1_api.CoreV1Api()
spec = _generate_cluster_spec(config, kubernetes=True)
core_v1.create_namespaced_pod(body=spec, namespace=namespace)
print('Pod created.')
def _get_parser():
parser = argparse.ArgumentParser(description='Run on Kubernetes Command Line Interface')
parser.add_argument('config', help='Path to the JSON config file.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Be verbose. Use -vv for increased verbosity.')
parser.add_argument('--create-pod', action='store_true',
help='Create a master pod and run the given `config` from there.')
parser.add_argument('-n', '--namespace', default='default',
help='Namespace were the pod will be created.')
return parser
def main():
parser = _get_parser()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
log_level = (3 - args.verbose) * 10
fmt = '%(asctime)s - %(process)d - %(levelname)s - %(name)s - %(module)s - %(message)s'
logging.basicConfig(level=log_level, format=fmt)
with open(args.config) as config_file:
if args.config.endswith('yaml') or args.config.endswith('yml'):
config = yaml.safe_load(config_file)
else:
config = json.load(config_file)
if args.create_pod:
run_on_kubernetes(config, args.namespace)
else:
results = run_dask_function(config)
if results is not None:
print(tabulate.tabulate(
results,
tablefmt='github',
headers=results.columns
))
if __name__ == '__main__':
main()
| true
| true
|
79031fbd015e3fe884659e5cb80d8ca3b0a52a07
| 248
|
py
|
Python
|
append and delete.py
|
kasyap1234/codingproblems
|
7368222c5fb67b4796410597f68401654878fee0
|
[
"MIT"
] | 1
|
2021-04-15T16:09:52.000Z
|
2021-04-15T16:09:52.000Z
|
append and delete.py
|
kasyap1234/codingproblems
|
7368222c5fb67b4796410597f68401654878fee0
|
[
"MIT"
] | null | null | null |
append and delete.py
|
kasyap1234/codingproblems
|
7368222c5fb67b4796410597f68401654878fee0
|
[
"MIT"
] | null | null | null |
def appendAndDelete(s, t, k):
iter=0
s=[]
t=[]
while s:
s.pop(0)
iter+=1
for i in t:
s.append(i)
iter+=1
if iter==k:
print("Yes")
else:
print("No")
| 13.052632
| 29
| 0.362903
|
def appendAndDelete(s, t, k):
iter=0
s=[]
t=[]
while s:
s.pop(0)
iter+=1
for i in t:
s.append(i)
iter+=1
if iter==k:
print("Yes")
else:
print("No")
| true
| true
|
7903206627a571e763876f6dde9b116f493b1161
| 47,203
|
py
|
Python
|
sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py
|
erexer/polyaxon
|
be14dae1ed56d568983388736bcdaf27a7baa4a4
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py
|
erexer/polyaxon
|
be14dae1ed56d568983388736bcdaf27a7baa4a4
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py
|
erexer/polyaxon
|
be14dae1ed56d568983388736bcdaf27a7baa4a4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ApiTypeError, ApiValueError # noqa: F401
class UsersV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_token(self, body, **kwargs): # noqa: E501
"""Create token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_token(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1Token body: Token body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_token_with_http_info(body, **kwargs) # noqa: E501
def create_token_with_http_info(self, body, **kwargs): # noqa: E501
"""Create token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_token_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1Token body: Token body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `create_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def delete_token(self, uuid, **kwargs): # noqa: E501
"""Delete token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_token(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.delete_token_with_http_info(uuid, **kwargs) # noqa: E501
def delete_token_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Delete token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_token_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["uuid"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and (
"uuid" not in local_var_params
or local_var_params["uuid"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `uuid` when calling `delete_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "uuid" in local_var_params:
path_params["uuid"] = local_var_params["uuid"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens/{uuid}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_token(self, uuid, **kwargs): # noqa: E501
"""Get token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_token(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_token_with_http_info(uuid, **kwargs) # noqa: E501
def get_token_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Get token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_token_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["uuid"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and (
"uuid" not in local_var_params
or local_var_params["uuid"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `uuid` when calling `get_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "uuid" in local_var_params:
path_params["uuid"] = local_var_params["uuid"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens/{uuid}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_user(self, **kwargs): # noqa: E501
"""Get current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1User
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_user_with_http_info(**kwargs) # noqa: E501
def get_user_with_http_info(self, **kwargs): # noqa: E501
"""Get current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = []
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1User", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def list_tokens(self, **kwargs): # noqa: E501
"""List tokens # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tokens(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListTokenResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.list_tokens_with_http_info(**kwargs) # noqa: E501
def list_tokens_with_http_info(self, **kwargs): # noqa: E501
"""List tokens # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tokens_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListTokenResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["offset", "limit", "sort", "query"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_tokens" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
collection_formats = {}
path_params = {}
query_params = []
if (
"offset" in local_var_params and local_var_params["offset"] is not None
): # noqa: E501
query_params.append(("offset", local_var_params["offset"])) # noqa: E501
if (
"limit" in local_var_params and local_var_params["limit"] is not None
): # noqa: E501
query_params.append(("limit", local_var_params["limit"])) # noqa: E501
if (
"sort" in local_var_params and local_var_params["sort"] is not None
): # noqa: E501
query_params.append(("sort", local_var_params["sort"])) # noqa: E501
if (
"query" in local_var_params and local_var_params["query"] is not None
): # noqa: E501
query_params.append(("query", local_var_params["query"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ListTokenResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def patch_token(self, token_uuid, body, **kwargs): # noqa: E501
"""Patch token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_token(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.patch_token_with_http_info(token_uuid, body, **kwargs) # noqa: E501
def patch_token_with_http_info(self, token_uuid, body, **kwargs): # noqa: E501
"""Patch token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_token_with_http_info(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["token_uuid", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'token_uuid' is set
if self.api_client.client_side_validation and (
"token_uuid" not in local_var_params
or local_var_params["token_uuid"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `token_uuid` when calling `patch_token`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `patch_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "token_uuid" in local_var_params:
path_params["token.uuid"] = local_var_params["token_uuid"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens/{token.uuid}",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def patch_user(self, body, **kwargs): # noqa: E501
"""Patch current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_user(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1User
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.patch_user_with_http_info(body, **kwargs) # noqa: E501
def patch_user_with_http_info(self, body, **kwargs): # noqa: E501
"""Patch current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_user_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_user" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `patch_user`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1User", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_token(self, token_uuid, body, **kwargs): # noqa: E501
"""Update token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_token(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.update_token_with_http_info(
token_uuid, body, **kwargs
) # noqa: E501
def update_token_with_http_info(self, token_uuid, body, **kwargs): # noqa: E501
"""Update token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_token_with_http_info(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["token_uuid", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'token_uuid' is set
if self.api_client.client_side_validation and (
"token_uuid" not in local_var_params
or local_var_params["token_uuid"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `token_uuid` when calling `update_token`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `update_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "token_uuid" in local_var_params:
path_params["token.uuid"] = local_var_params["token_uuid"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens/{token.uuid}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_user(self, body, **kwargs): # noqa: E501
"""Update current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1User
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.update_user_with_http_info(body, **kwargs) # noqa: E501
def update_user_with_http_info(self, body, **kwargs): # noqa: E501
"""Update current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `update_user`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1User", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| 39.867399
| 89
| 0.569815
|
from __future__ import absolute_import
import re
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ApiTypeError, ApiValueError
class UsersV1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_token(self, body, **kwargs):
kwargs["_return_http_data_only"] = True
return self.create_token_with_http_info(body, **kwargs)
def create_token_with_http_info(self, body, **kwargs):
local_var_params = locals()
all_params = ["body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None
):
raise ApiValueError(
"Missing the required parameter `body` when calling `create_token`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
header_params[
"Content-Type"
] = self.api_client.select_header_content_type(
["application/json"]
)
auth_settings = ["ApiKey"]
return self.api_client.call_api(
"/api/v1/users/tokens",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token",
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def delete_token(self, uuid, **kwargs):
kwargs["_return_http_data_only"] = True
return self.delete_token_with_http_info(uuid, **kwargs)
def delete_token_with_http_info(self, uuid, **kwargs):
local_var_params = locals()
all_params = ["uuid"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
if self.api_client.client_side_validation and (
"uuid" not in local_var_params
or local_var_params["uuid"] is None
):
raise ApiValueError(
"Missing the required parameter `uuid` when calling `delete_token`"
)
collection_formats = {}
path_params = {}
if "uuid" in local_var_params:
path_params["uuid"] = local_var_params["uuid"]
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
auth_settings = ["ApiKey"]
return self.api_client.call_api(
"/api/v1/users/tokens/{uuid}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_token(self, uuid, **kwargs):
kwargs["_return_http_data_only"] = True
return self.get_token_with_http_info(uuid, **kwargs)
def get_token_with_http_info(self, uuid, **kwargs):
local_var_params = locals()
all_params = ["uuid"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
if self.api_client.client_side_validation and (
"uuid" not in local_var_params
or local_var_params["uuid"] is None
):
raise ApiValueError(
"Missing the required parameter `uuid` when calling `get_token`"
)
collection_formats = {}
path_params = {}
if "uuid" in local_var_params:
path_params["uuid"] = local_var_params["uuid"]
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
auth_settings = ["ApiKey"]
return self.api_client.call_api(
"/api/v1/users/tokens/{uuid}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token",
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_user(self, **kwargs):
kwargs["_return_http_data_only"] = True
return self.get_user_with_http_info(**kwargs)
def get_user_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = []
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
auth_settings = ["ApiKey"]
return self.api_client.call_api(
"/api/v1/users",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1User",
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def list_tokens(self, **kwargs):
kwargs["_return_http_data_only"] = True
return self.list_tokens_with_http_info(**kwargs)
def list_tokens_with_http_info(self, **kwargs):
local_var_params = locals()
all_params = ["offset", "limit", "sort", "query"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_tokens" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
collection_formats = {}
path_params = {}
query_params = []
if (
"offset" in local_var_params and local_var_params["offset"] is not None
):
query_params.append(("offset", local_var_params["offset"]))
if (
"limit" in local_var_params and local_var_params["limit"] is not None
):
query_params.append(("limit", local_var_params["limit"]))
if (
"sort" in local_var_params and local_var_params["sort"] is not None
):
query_params.append(("sort", local_var_params["sort"]))
if (
"query" in local_var_params and local_var_params["query"] is not None
):
query_params.append(("query", local_var_params["query"]))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
auth_settings = ["ApiKey"]
return self.api_client.call_api(
"/api/v1/users/tokens",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ListTokenResponse",
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def patch_token(self, token_uuid, body, **kwargs):
kwargs["_return_http_data_only"] = True
return self.patch_token_with_http_info(token_uuid, body, **kwargs)
def patch_token_with_http_info(self, token_uuid, body, **kwargs):
local_var_params = locals()
all_params = ["token_uuid", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
if self.api_client.client_side_validation and (
"token_uuid" not in local_var_params
or local_var_params["token_uuid"] is None
):
raise ApiValueError(
"Missing the required parameter `token_uuid` when calling `patch_token`"
)
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None
):
raise ApiValueError(
"Missing the required parameter `body` when calling `patch_token`"
)
collection_formats = {}
path_params = {}
if "token_uuid" in local_var_params:
path_params["token.uuid"] = local_var_params["token_uuid"]
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
header_params[
"Content-Type"
] = self.api_client.select_header_content_type(
["application/json"]
)
auth_settings = ["ApiKey"]
return self.api_client.call_api(
"/api/v1/users/tokens/{token.uuid}",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token",
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def patch_user(self, body, **kwargs):
kwargs["_return_http_data_only"] = True
return self.patch_user_with_http_info(body, **kwargs)
def patch_user_with_http_info(self, body, **kwargs):
local_var_params = locals()
all_params = ["body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_user" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None
):
raise ApiValueError(
"Missing the required parameter `body` when calling `patch_user`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
header_params[
"Content-Type"
] = self.api_client.select_header_content_type(
["application/json"]
)
auth_settings = ["ApiKey"]
return self.api_client.call_api(
"/api/v1/users",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1User",
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_token(self, token_uuid, body, **kwargs):
kwargs["_return_http_data_only"] = True
return self.update_token_with_http_info(
token_uuid, body, **kwargs
)
def update_token_with_http_info(self, token_uuid, body, **kwargs):
local_var_params = locals()
all_params = ["token_uuid", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
if self.api_client.client_side_validation and (
"token_uuid" not in local_var_params
or local_var_params["token_uuid"] is None
):
raise ApiValueError(
"Missing the required parameter `token_uuid` when calling `update_token`"
)
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None
):
raise ApiValueError(
"Missing the required parameter `body` when calling `update_token`"
)
collection_formats = {}
path_params = {}
if "token_uuid" in local_var_params:
path_params["token.uuid"] = local_var_params["token_uuid"]
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
header_params[
"Content-Type"
] = self.api_client.select_header_content_type(
["application/json"]
)
auth_settings = ["ApiKey"]
return self.api_client.call_api(
"/api/v1/users/tokens/{token.uuid}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token",
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_user(self, body, **kwargs):
kwargs["_return_http_data_only"] = True
return self.update_user_with_http_info(body, **kwargs)
def update_user_with_http_info(self, body, **kwargs):
local_var_params = locals()
all_params = ["body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None
):
raise ApiValueError(
"Missing the required parameter `body` when calling `update_user`"
)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
)
header_params[
"Content-Type"
] = self.api_client.select_header_content_type(
["application/json"]
)
auth_settings = ["ApiKey"]
return self.api_client.call_api(
"/api/v1/users",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1User",
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
),
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| true
| true
|
790321021ac2785106da737587b334ddfd60d1c0
| 6,688
|
py
|
Python
|
unified_planning/engines/parallel.py
|
aiplan4eu/unified-planning
|
d2fd18baa3a2110595e5dfdc3f55254df72c3016
|
[
"Apache-2.0"
] | 9
|
2022-02-18T14:51:58.000Z
|
2022-03-31T06:02:43.000Z
|
unified_planning/engines/parallel.py
|
aiplan4eu/unified-planning
|
d2fd18baa3a2110595e5dfdc3f55254df72c3016
|
[
"Apache-2.0"
] | 37
|
2022-02-01T10:44:38.000Z
|
2022-03-31T09:13:42.000Z
|
unified_planning/engines/parallel.py
|
aiplan4eu/unified-planning
|
d2fd18baa3a2110595e5dfdc3f55254df72c3016
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import unified_planning as up
import unified_planning.engines as engines
from unified_planning.plans import Plan
from unified_planning.model import ProblemKind
from unified_planning.exceptions import UPUsageError
from unified_planning.engines.results import LogLevel, PlanGenerationResultStatus, Result, ValidationResult, PlanGenerationResult
from typing import IO, Callable, Dict, List, Optional, Tuple, Type, cast
from fractions import Fraction
from multiprocessing import Process, Queue
class Parallel(engines.engine.Engine,
engines.mixins.OneshotPlannerMixin,
engines.mixins.PlanValidatorMixin):
"""Create a parallel instance of multiple Engines."""
def __init__(self, engines: List[Tuple[Type[engines.engine.Engine], Dict[str, str]]]):
self.engines = engines
@property
def name(self) -> str:
return 'Parallel'
@staticmethod
def supports(problem_kind: 'ProblemKind') -> bool:
# The supported features depends on its actual engines
return True
def _run_parallel(self, fname, *args) -> List[Result]:
signaling_queue: Queue = Queue()
processes = []
for idx, (engine_class, opts) in enumerate(self.engines):
options = opts
_p = Process(name=str(idx),
target=_run,
args=(idx, engine_class, options,
signaling_queue, fname, *args))
processes.append(_p)
_p.start()
processes_alive = len(processes)
results: List[Result] = []
definitive_result_found: bool = False
while True:
if processes_alive == 0: # Every planner gave a result
break
(idx, res) = signaling_queue.get(block=True)
processes_alive -= 1
if isinstance(res, BaseException):
raise res
else:
assert isinstance(res, Result)
# If the planner is sure about the result (optimality of the result or impossibility of the problem or the problem does not need optimality) exit the loop
if res.is_definitive_result(*args):
definitive_result_found = True
break
else:
results.append(res)
for p in processes:
p.terminate()
if definitive_result_found: # A planner found a definitive result
return [res]
return results
def _solve(self, problem: 'up.model.AbstractProblem',
callback: Optional[Callable[['up.engines.results.PlanGenerationResult'], None]] = None,
timeout: Optional[float] = None,
output_stream: Optional[IO[str]] = None) -> 'up.engines.results.PlanGenerationResult':
for engine, _ in self.engines:
assert issubclass(engine, engines.mixins.OneshotPlannerMixin)
if not engine.supports(problem.kind):
raise UPUsageError('Parallel engines cannot solve this kind of problem!')
if callback is not None:
warnings.warn('Parallel engines do not support the callback system.', UserWarning)
if output_stream is not None:
warnings.warn('Parallel engines do not support the output stream system.', UserWarning)
final_reports = self._run_parallel('solve', problem, None, timeout, None)
result_order: List[PlanGenerationResultStatus] = [
PlanGenerationResultStatus.SOLVED_OPTIMALLY, # List containing the results in the order we prefer them
PlanGenerationResultStatus.UNSOLVABLE_PROVEN,
PlanGenerationResultStatus.SOLVED_SATISFICING,
PlanGenerationResultStatus.UNSOLVABLE_INCOMPLETELY,
PlanGenerationResultStatus.TIMEOUT,
PlanGenerationResultStatus.MEMOUT,
PlanGenerationResultStatus.INTERNAL_ERROR,
PlanGenerationResultStatus.UNSUPPORTED_PROBLEM]
final_result: Optional[PlanGenerationResult] = None
result_found: bool = False
for ro in result_order:
if result_found:
break
for r in final_reports:
pgr = cast(PlanGenerationResult, r)
if pgr.status == ro:
result_found = True
final_result = pgr
break
logs = [up.engines.LogMessage(LogLevel.INFO, str(fr)) for fr in final_reports]
# if no results are given by the planner, we create a default one
if final_result is None:
return up.engines.PlanGenerationResult(PlanGenerationResultStatus.UNSOLVABLE_INCOMPLETELY,
None, self.name, log_messages=logs)
new_plan = problem.normalize_plan(final_result.plan) if final_result.plan is not None else None
if final_result.log_messages is not None:
logs = final_result.log_messages + logs
return up.engines.results.PlanGenerationResult(
final_result.status,
new_plan,
final_result.engine_name,
final_result.metrics,
logs
)
def _validate(self, problem: 'up.model.AbstractProblem',
plan: Plan) -> 'up.engines.results.ValidationResult':
for engine, _ in self.engines:
assert issubclass(engine, engines.mixins.PlanValidatorMixin)
if not engine.supports(problem.kind):
raise UPUsageError('Parallel engines cannot validate this kind of problem!')
return cast(ValidationResult, self._run_parallel('validate', problem, plan)[0])
def _run(idx: int, EngineClass: type, options: Dict[str, str], signaling_queue: Queue, fname: str, *args):
with EngineClass(**options) as s:
try:
local_res = getattr(s, fname)(*args)
except Exception as ex:
signaling_queue.put((idx, ex))
return
signaling_queue.put((idx, local_res))
| 44.885906
| 170
| 0.638906
|
import warnings
import unified_planning as up
import unified_planning.engines as engines
from unified_planning.plans import Plan
from unified_planning.model import ProblemKind
from unified_planning.exceptions import UPUsageError
from unified_planning.engines.results import LogLevel, PlanGenerationResultStatus, Result, ValidationResult, PlanGenerationResult
from typing import IO, Callable, Dict, List, Optional, Tuple, Type, cast
from fractions import Fraction
from multiprocessing import Process, Queue
class Parallel(engines.engine.Engine,
engines.mixins.OneshotPlannerMixin,
engines.mixins.PlanValidatorMixin):
def __init__(self, engines: List[Tuple[Type[engines.engine.Engine], Dict[str, str]]]):
self.engines = engines
@property
def name(self) -> str:
return 'Parallel'
@staticmethod
def supports(problem_kind: 'ProblemKind') -> bool:
return True
def _run_parallel(self, fname, *args) -> List[Result]:
signaling_queue: Queue = Queue()
processes = []
for idx, (engine_class, opts) in enumerate(self.engines):
options = opts
_p = Process(name=str(idx),
target=_run,
args=(idx, engine_class, options,
signaling_queue, fname, *args))
processes.append(_p)
_p.start()
processes_alive = len(processes)
results: List[Result] = []
definitive_result_found: bool = False
while True:
if processes_alive == 0:
break
(idx, res) = signaling_queue.get(block=True)
processes_alive -= 1
if isinstance(res, BaseException):
raise res
else:
assert isinstance(res, Result)
if res.is_definitive_result(*args):
definitive_result_found = True
break
else:
results.append(res)
for p in processes:
p.terminate()
if definitive_result_found:
return [res]
return results
def _solve(self, problem: 'up.model.AbstractProblem',
callback: Optional[Callable[['up.engines.results.PlanGenerationResult'], None]] = None,
timeout: Optional[float] = None,
output_stream: Optional[IO[str]] = None) -> 'up.engines.results.PlanGenerationResult':
for engine, _ in self.engines:
assert issubclass(engine, engines.mixins.OneshotPlannerMixin)
if not engine.supports(problem.kind):
raise UPUsageError('Parallel engines cannot solve this kind of problem!')
if callback is not None:
warnings.warn('Parallel engines do not support the callback system.', UserWarning)
if output_stream is not None:
warnings.warn('Parallel engines do not support the output stream system.', UserWarning)
final_reports = self._run_parallel('solve', problem, None, timeout, None)
result_order: List[PlanGenerationResultStatus] = [
PlanGenerationResultStatus.SOLVED_OPTIMALLY,
PlanGenerationResultStatus.UNSOLVABLE_PROVEN,
PlanGenerationResultStatus.SOLVED_SATISFICING,
PlanGenerationResultStatus.UNSOLVABLE_INCOMPLETELY,
PlanGenerationResultStatus.TIMEOUT,
PlanGenerationResultStatus.MEMOUT,
PlanGenerationResultStatus.INTERNAL_ERROR,
PlanGenerationResultStatus.UNSUPPORTED_PROBLEM]
final_result: Optional[PlanGenerationResult] = None
result_found: bool = False
for ro in result_order:
if result_found:
break
for r in final_reports:
pgr = cast(PlanGenerationResult, r)
if pgr.status == ro:
result_found = True
final_result = pgr
break
logs = [up.engines.LogMessage(LogLevel.INFO, str(fr)) for fr in final_reports]
if final_result is None:
return up.engines.PlanGenerationResult(PlanGenerationResultStatus.UNSOLVABLE_INCOMPLETELY,
None, self.name, log_messages=logs)
new_plan = problem.normalize_plan(final_result.plan) if final_result.plan is not None else None
if final_result.log_messages is not None:
logs = final_result.log_messages + logs
return up.engines.results.PlanGenerationResult(
final_result.status,
new_plan,
final_result.engine_name,
final_result.metrics,
logs
)
def _validate(self, problem: 'up.model.AbstractProblem',
plan: Plan) -> 'up.engines.results.ValidationResult':
for engine, _ in self.engines:
assert issubclass(engine, engines.mixins.PlanValidatorMixin)
if not engine.supports(problem.kind):
raise UPUsageError('Parallel engines cannot validate this kind of problem!')
return cast(ValidationResult, self._run_parallel('validate', problem, plan)[0])
def _run(idx: int, EngineClass: type, options: Dict[str, str], signaling_queue: Queue, fname: str, *args):
with EngineClass(**options) as s:
try:
local_res = getattr(s, fname)(*args)
except Exception as ex:
signaling_queue.put((idx, ex))
return
signaling_queue.put((idx, local_res))
| true
| true
|
790322b05cee3400c76b80845682fea10cefc3b3
| 351
|
py
|
Python
|
prompt412/round-101/c.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | 2
|
2019-02-08T01:23:07.000Z
|
2020-11-19T12:23:52.000Z
|
prompt412/round-101/c.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | null | null | null |
prompt412/round-101/c.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | null | null | null |
def solve(n):
a = []
for _ in range(n):
name, h = input().split()
h = float(h)
a.append((name, h))
a.sort(key = lambda t: t[1], reverse=True)
m = a[0][1]
for n, h in a:
if h != m: break
print(n, end = " ")
print()
while True:
n = int(input())
if n == 0: break
solve(n)
| 18.473684
| 46
| 0.433048
|
def solve(n):
a = []
for _ in range(n):
name, h = input().split()
h = float(h)
a.append((name, h))
a.sort(key = lambda t: t[1], reverse=True)
m = a[0][1]
for n, h in a:
if h != m: break
print(n, end = " ")
print()
while True:
n = int(input())
if n == 0: break
solve(n)
| true
| true
|
79032397aef89eafe997e1c467dfa1ed5f356ebb
| 745
|
py
|
Python
|
test/unittests/test_UrbanQTotal.py
|
rajadain/gwlf-e
|
ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3
|
[
"Apache-2.0"
] | null | null | null |
test/unittests/test_UrbanQTotal.py
|
rajadain/gwlf-e
|
ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3
|
[
"Apache-2.0"
] | null | null | null |
test/unittests/test_UrbanQTotal.py
|
rajadain/gwlf-e
|
ba2fb9dbc08a3d7a4ced4b83b6f0f1307814e2a3
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Discharge import UrbanQTotal
class TestUrbanQTotal(VariableUnitTest):
def test_UrbanQTotal(self):
z = self.z
np.testing.assert_array_almost_equal(
UrbanQTotal.UrbanQTotal_f(z.NYrs, z.DaysMonth, z.NRur, z.NUrb, z.Temp, z.InitSnow_0, z.Prec, z.Area,
z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA),
UrbanQTotal.UrbanQTotal(z.NYrs, z.DaysMonth, z.NRur, z.NUrb, z.Temp, z.InitSnow_0, z.Prec, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA), decimal=7)
| 43.823529
| 119
| 0.606711
|
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Discharge import UrbanQTotal
class TestUrbanQTotal(VariableUnitTest):
def test_UrbanQTotal(self):
z = self.z
np.testing.assert_array_almost_equal(
UrbanQTotal.UrbanQTotal_f(z.NYrs, z.DaysMonth, z.NRur, z.NUrb, z.Temp, z.InitSnow_0, z.Prec, z.Area,
z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA),
UrbanQTotal.UrbanQTotal(z.NYrs, z.DaysMonth, z.NRur, z.NUrb, z.Temp, z.InitSnow_0, z.Prec, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA), decimal=7)
| true
| true
|
790323f724e852cdcf7d4d9d3e4d89703473f768
| 3,725
|
py
|
Python
|
panel/routes/server.py
|
emilio2hd/pz-panel
|
6b53f465b2c041e963e2b75e48b1612549ad6fea
|
[
"MIT"
] | null | null | null |
panel/routes/server.py
|
emilio2hd/pz-panel
|
6b53f465b2c041e963e2b75e48b1612549ad6fea
|
[
"MIT"
] | null | null | null |
panel/routes/server.py
|
emilio2hd/pz-panel
|
6b53f465b2c041e963e2b75e48b1612549ad6fea
|
[
"MIT"
] | null | null | null |
import glob
import time
from os import path
from flask import Blueprint, jsonify, current_app, request, Response, json
from flask_login import login_required
from .. import pz_server_state
from ..services.power_actions_service import is_valid_power_action, execute_action
from ..services.server_options_service import read_config, save_config, prepared_config_to_view, formatted_config_lines
from ..services.server_status_service import get_server_status
from ..utils.resources_functions import server_resources
server_blueprint = Blueprint('server', __name__, url_prefix='/server')
@server_blueprint.route('/status')
@login_required
def status():
rcon_host = current_app.config['RCON_HOST']
rcon_password = current_app.config['RCON_PASSWORD']
server_state, players = get_server_status(rcon_host, rcon_password)
return jsonify(
server_state=server_state,
online_players=players,
server_resources=server_resources()
)
@server_blueprint.route('/power-actions', methods=['POST'])
@login_required
def power_actions():
request_data = request.get_json()
pz_user_home = current_app.config["PZ_USER_HOME"]
power_action = request_data.get("power_action", None)
if not is_valid_power_action(power_action):
return jsonify(error="Unknown action"), 400
if not execute_action(power_action, pz_user_home):
return '', 500
return jsonify(server_state=pz_server_state.state)
def get_config(pz_server_config):
config = read_config(pz_server_config)
return {
"WorkshopItems": config["WorkshopItems"],
"Mods": config["Mods"]
}
@server_blueprint.route('/options')
@login_required
def list_workshop_items():
export_config = get_config(current_app.config['PZ_SERVER_CONFIG'])
return jsonify(
WorkshopItems=prepared_config_to_view(export_config["WorkshopItems"]),
Mods=prepared_config_to_view(export_config["Mods"])
)
@server_blueprint.route('/options/export')
@login_required
def export_server_config():
export_config = get_config(current_app.config['PZ_SERVER_CONFIG'])
return current_app.response_class(
formatted_config_lines(export_config),
mimetype='text/event-stream',
headers={"Content-Disposition": "attachment;filename=server_config.ini"}
)
@server_blueprint.route('/options', methods=['POST'])
@login_required
def save_items():
request_data = request.get_json()
config = save_config(current_app.config['PZ_SERVER_CONFIG'], request_data)
export_config = {
"WorkshopItems": prepared_config_to_view(config["WorkshopItems"]),
"Mods": prepared_config_to_view(config["Mods"])
}
return jsonify(export_config)
@server_blueprint.route('/log')
@login_required
def listen_log():
def followLog(serverLogsDir):
logFilePattern = "*_DebugLog-server.txt"
logFiles = glob.glob(path.join(serverLogsDir, logFilePattern))
if not logFiles:
yield 'data: {}\n\n'.format(
json.dumps({"error": True, "errorMessage": "No log file found"})
)
return
logFiles.sort(reverse=True)
with open(logFiles[0]) as serverLogFile:
try:
while True:
line = serverLogFile.readline()
if not line:
continue
time.sleep(0.01)
yield 'data: {}\n\n'.format(
json.dumps({"log": line.strip()})
)
finally:
pass
serverLogsDir = current_app.config['PZ_SERVER_LOGS_DIR']
return Response(followLog(serverLogsDir), mimetype='text/event-stream')
| 29.8
| 119
| 0.68698
|
import glob
import time
from os import path
from flask import Blueprint, jsonify, current_app, request, Response, json
from flask_login import login_required
from .. import pz_server_state
from ..services.power_actions_service import is_valid_power_action, execute_action
from ..services.server_options_service import read_config, save_config, prepared_config_to_view, formatted_config_lines
from ..services.server_status_service import get_server_status
from ..utils.resources_functions import server_resources
server_blueprint = Blueprint('server', __name__, url_prefix='/server')
@server_blueprint.route('/status')
@login_required
def status():
rcon_host = current_app.config['RCON_HOST']
rcon_password = current_app.config['RCON_PASSWORD']
server_state, players = get_server_status(rcon_host, rcon_password)
return jsonify(
server_state=server_state,
online_players=players,
server_resources=server_resources()
)
@server_blueprint.route('/power-actions', methods=['POST'])
@login_required
def power_actions():
request_data = request.get_json()
pz_user_home = current_app.config["PZ_USER_HOME"]
power_action = request_data.get("power_action", None)
if not is_valid_power_action(power_action):
return jsonify(error="Unknown action"), 400
if not execute_action(power_action, pz_user_home):
return '', 500
return jsonify(server_state=pz_server_state.state)
def get_config(pz_server_config):
config = read_config(pz_server_config)
return {
"WorkshopItems": config["WorkshopItems"],
"Mods": config["Mods"]
}
@server_blueprint.route('/options')
@login_required
def list_workshop_items():
export_config = get_config(current_app.config['PZ_SERVER_CONFIG'])
return jsonify(
WorkshopItems=prepared_config_to_view(export_config["WorkshopItems"]),
Mods=prepared_config_to_view(export_config["Mods"])
)
@server_blueprint.route('/options/export')
@login_required
def export_server_config():
export_config = get_config(current_app.config['PZ_SERVER_CONFIG'])
return current_app.response_class(
formatted_config_lines(export_config),
mimetype='text/event-stream',
headers={"Content-Disposition": "attachment;filename=server_config.ini"}
)
@server_blueprint.route('/options', methods=['POST'])
@login_required
def save_items():
request_data = request.get_json()
config = save_config(current_app.config['PZ_SERVER_CONFIG'], request_data)
export_config = {
"WorkshopItems": prepared_config_to_view(config["WorkshopItems"]),
"Mods": prepared_config_to_view(config["Mods"])
}
return jsonify(export_config)
@server_blueprint.route('/log')
@login_required
def listen_log():
def followLog(serverLogsDir):
logFilePattern = "*_DebugLog-server.txt"
logFiles = glob.glob(path.join(serverLogsDir, logFilePattern))
if not logFiles:
yield 'data: {}\n\n'.format(
json.dumps({"error": True, "errorMessage": "No log file found"})
)
return
logFiles.sort(reverse=True)
with open(logFiles[0]) as serverLogFile:
try:
while True:
line = serverLogFile.readline()
if not line:
continue
time.sleep(0.01)
yield 'data: {}\n\n'.format(
json.dumps({"log": line.strip()})
)
finally:
pass
serverLogsDir = current_app.config['PZ_SERVER_LOGS_DIR']
return Response(followLog(serverLogsDir), mimetype='text/event-stream')
| true
| true
|
790324f66d285f888aac7461cc4ad401e26f05c1
| 1,939
|
py
|
Python
|
setup.py
|
fserena/kg-search
|
1f71ff6b90534720bf041a8a87b32b964d5471da
|
[
"Apache-2.0"
] | 1
|
2022-03-19T07:04:28.000Z
|
2022-03-19T07:04:28.000Z
|
setup.py
|
fserena/kg-search
|
1f71ff6b90534720bf041a8a87b32b964d5471da
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
fserena/kg-search
|
1f71ff6b90534720bf041a8a87b32b964d5471da
|
[
"Apache-2.0"
] | null | null | null |
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Ontology Engineering Group
http://www.oeg-upm.net/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2016 Ontology Engineering Group.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import json
from setuptools import setup, find_packages
__author__ = 'Fernando Serena'
with open("kg_search/metadata.json", 'r') as stream:
metadata = json.load(stream)
setup(
name="kg-search",
version=metadata['version'],
author=metadata['author'],
author_email=metadata['email'],
description=metadata['description'],
license="Apache 2",
keywords=["knowledge graph", "wikidata"],
url=metadata['github'],
download_url="https://github.com/fserena/kg-search/tarball/{}".format(metadata['version']),
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
install_requires=['Flask', 'Flask-Cache', 'gunicorn', 'futures', 'requests', 'urllib3', 'rdflib==4.2.0',
'python-dateutil', 'pyld', 'rdflib-jsonld', 'shortuuid', 'wikipedia==1.4.0'],
classifiers=[],
package_dir={'kg_search': 'kg_search'},
package_data={'kg_search': ['metadata.json']},
scripts=['kg-search']
)
| 39.571429
| 108
| 0.581743
|
import json
from setuptools import setup, find_packages
__author__ = 'Fernando Serena'
with open("kg_search/metadata.json", 'r') as stream:
metadata = json.load(stream)
setup(
name="kg-search",
version=metadata['version'],
author=metadata['author'],
author_email=metadata['email'],
description=metadata['description'],
license="Apache 2",
keywords=["knowledge graph", "wikidata"],
url=metadata['github'],
download_url="https://github.com/fserena/kg-search/tarball/{}".format(metadata['version']),
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
install_requires=['Flask', 'Flask-Cache', 'gunicorn', 'futures', 'requests', 'urllib3', 'rdflib==4.2.0',
'python-dateutil', 'pyld', 'rdflib-jsonld', 'shortuuid', 'wikipedia==1.4.0'],
classifiers=[],
package_dir={'kg_search': 'kg_search'},
package_data={'kg_search': ['metadata.json']},
scripts=['kg-search']
)
| true
| true
|
7903279d69766bd89a827f59c2ccea42083baa16
| 5,727
|
py
|
Python
|
tools/license_header.py
|
zhoujqhappy/mxnet-1.3.0-dist-nccl
|
efd4f887c576e5deec3177e69fd1d928bbc999b0
|
[
"Apache-2.0"
] | 399
|
2017-05-30T05:12:48.000Z
|
2022-01-29T05:53:08.000Z
|
tools/license_header.py
|
zhoujqhappy/mxnet-1.3.0-dist-nccl
|
efd4f887c576e5deec3177e69fd1d928bbc999b0
|
[
"Apache-2.0"
] | 58
|
2017-05-30T23:25:32.000Z
|
2019-11-18T09:30:54.000Z
|
tools/license_header.py
|
zhoujqhappy/mxnet-1.3.0-dist-nccl
|
efd4f887c576e5deec3177e69fd1d928bbc999b0
|
[
"Apache-2.0"
] | 107
|
2017-05-30T05:53:22.000Z
|
2021-06-24T02:43:31.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add or check license header
Usuage:
- add the default license header to source files that do not contain a valid
license:
python license_header.py add
- check if every files has a license header
python license_header.py check
"""
import re
import os
import argparse
# the default apache license
_LICENSE = """Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License."""
# if a file contains any str in the list, then consider it has been licensed
_LICENSE_PATTERNS = ['Licensed to the Apache Software Foundation']
# the folders or files that will be ignored
_WHITE_LIST = ['R-package/',
'cub/',
'dlpack/',
'dmlc-core/',
'mshadow/',
'nnvm',
'ps-lite',
'src/operator/mkl/',
'cmake/Modules/FindJeMalloc.cmake',
'src/operator/special_functions-inl.h',
'src/operator/nn/pool.h',
'src/operator/contrib/psroi_pooling-inl.h',
'src/operator/contrib/nn/deformable_im2col.h',
'example/speech-demo/io_func/convert2kaldi.py',
'example/speech-demo/decode_mxnet.sh',
'example/image-classification/predict-cpp/image-classification-predict.cc',
'src/operator/contrib/ctc_include/',
'cmake/Modules/FindJeMalloc.cmake']
# language extensions and the according commment mark
_LANGS = {'.cc':'*', '.h':'*', '.cu':'*', '.cuh':'*', '.py':'#',
'.pm':'#', '.scala':'*', '.cc':'*', '.sh':'#', '.cmake':'#',
'.java':'*', '.sh':'#', '.cpp':'*', '.hpp':'*', '.c':'*',
'.bat':'rem', '.pl':'#'}
# Previous license header, which will be removed
_OLD_LICENSE = re.compile('.*Copyright.*by Contributors')
def _has_license(lines):
return any([any([p in l.decode('utf-8') for p in _LICENSE_PATTERNS]) for l in lines])
def _get_license(comment_mark):
if comment_mark == '*':
body = '/*\n'
else:
body = ''
for l in _LICENSE.split('\n'):
if comment_mark == '*':
body += ' '
body += comment_mark
if len(l):
body += ' ' + l
body += '\n'
if comment_mark == '*':
body += ' */\n'
body += '\n'
return body
def _valid_file(fname, verbose=False):
if any([l in fname for l in _WHITE_LIST]):
if verbose:
print('skip ' + fname + ', it matches the white list')
return False
_, ext = os.path.splitext(fname)
if ext not in _LANGS:
if verbose:
print('skip ' + fname + ', unknown file extension')
return False
return True
def process_file(fname, action, verbose=True):
if not _valid_file(fname, verbose):
return True
with open(fname, 'rb') as f:
lines = f.readlines()
if not lines:
return True
if _has_license(lines):
return True
elif action == 'check':
return False
_, ext = os.path.splitext(fname)
with open(fname, 'wb') as f:
# shebang line
if lines[0].startswith(b'#!'):
f.write(lines[0].rstrip()+b'\n\n')
del lines[0]
f.write(str.encode(_get_license(_LANGS[ext])))
for l in lines:
f.write(l.rstrip()+b'\n')
print('added license header to ' + fname)
return False
def process_folder(root, action):
excepts = []
for root, _, files in os.walk(root):
for f in files:
fname = os.path.normpath(os.path.join(root, f))
if not process_file(fname, action):
excepts.append(fname)
if action == 'check' and excepts:
raise Exception('The following files do not contain a valid license, '+
'you can use `python tools/license_header.py add` to add'+
'them automatically', excepts)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Add or check source license header')
parser.add_argument(
'action', nargs=1, type=str,
choices=['add', 'check'], default='add',
help = 'add or check')
args = parser.parse_args()
process_folder(os.path.join(os.path.dirname(__file__), '..'), args.action[0])
| 35.134969
| 90
| 0.624411
|
import re
import os
import argparse
_LICENSE = """Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License."""
_LICENSE_PATTERNS = ['Licensed to the Apache Software Foundation']
_WHITE_LIST = ['R-package/',
'cub/',
'dlpack/',
'dmlc-core/',
'mshadow/',
'nnvm',
'ps-lite',
'src/operator/mkl/',
'cmake/Modules/FindJeMalloc.cmake',
'src/operator/special_functions-inl.h',
'src/operator/nn/pool.h',
'src/operator/contrib/psroi_pooling-inl.h',
'src/operator/contrib/nn/deformable_im2col.h',
'example/speech-demo/io_func/convert2kaldi.py',
'example/speech-demo/decode_mxnet.sh',
'example/image-classification/predict-cpp/image-classification-predict.cc',
'src/operator/contrib/ctc_include/',
'cmake/Modules/FindJeMalloc.cmake']
_LANGS = {'.cc':'*', '.h':'*', '.cu':'*', '.cuh':'*', '.py':'#',
'.pm':'#', '.scala':'*', '.cc':'*', '.sh':'#', '.cmake':'#',
'.java':'*', '.sh':'#', '.cpp':'*', '.hpp':'*', '.c':'*',
'.bat':'rem', '.pl':'#'}
_OLD_LICENSE = re.compile('.*Copyright.*by Contributors')
def _has_license(lines):
return any([any([p in l.decode('utf-8') for p in _LICENSE_PATTERNS]) for l in lines])
def _get_license(comment_mark):
if comment_mark == '*':
body = '/*\n'
else:
body = ''
for l in _LICENSE.split('\n'):
if comment_mark == '*':
body += ' '
body += comment_mark
if len(l):
body += ' ' + l
body += '\n'
if comment_mark == '*':
body += ' */\n'
body += '\n'
return body
def _valid_file(fname, verbose=False):
if any([l in fname for l in _WHITE_LIST]):
if verbose:
print('skip ' + fname + ', it matches the white list')
return False
_, ext = os.path.splitext(fname)
if ext not in _LANGS:
if verbose:
print('skip ' + fname + ', unknown file extension')
return False
return True
def process_file(fname, action, verbose=True):
if not _valid_file(fname, verbose):
return True
with open(fname, 'rb') as f:
lines = f.readlines()
if not lines:
return True
if _has_license(lines):
return True
elif action == 'check':
return False
_, ext = os.path.splitext(fname)
with open(fname, 'wb') as f:
if lines[0].startswith(b'#!'):
f.write(lines[0].rstrip()+b'\n\n')
del lines[0]
f.write(str.encode(_get_license(_LANGS[ext])))
for l in lines:
f.write(l.rstrip()+b'\n')
print('added license header to ' + fname)
return False
def process_folder(root, action):
excepts = []
for root, _, files in os.walk(root):
for f in files:
fname = os.path.normpath(os.path.join(root, f))
if not process_file(fname, action):
excepts.append(fname)
if action == 'check' and excepts:
raise Exception('The following files do not contain a valid license, '+
'you can use `python tools/license_header.py add` to add'+
'them automatically', excepts)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Add or check source license header')
parser.add_argument(
'action', nargs=1, type=str,
choices=['add', 'check'], default='add',
help = 'add or check')
args = parser.parse_args()
process_folder(os.path.join(os.path.dirname(__file__), '..'), args.action[0])
| true
| true
|
7903286da4685a99816f9e4c3cd9c781984ed661
| 2,376
|
py
|
Python
|
demos/python/tutorial/tutorial_modules/tutorial_2_send_ble_commands/ble_command_load_group.py
|
JKlingPhotos/OpenGoPro
|
6f01e0d2212e840af3650cbdbf8a648467eed89a
|
[
"MIT"
] | null | null | null |
demos/python/tutorial/tutorial_modules/tutorial_2_send_ble_commands/ble_command_load_group.py
|
JKlingPhotos/OpenGoPro
|
6f01e0d2212e840af3650cbdbf8a648467eed89a
|
[
"MIT"
] | 1
|
2022-02-03T09:00:45.000Z
|
2022-02-04T09:28:34.000Z
|
demos/python/tutorial/tutorial_modules/tutorial_2_send_ble_commands/ble_command_load_group.py
|
JKlingPhotos/OpenGoPro
|
6f01e0d2212e840af3650cbdbf8a648467eed89a
|
[
"MIT"
] | null | null | null |
# ble_command_load_group.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:57 PM
import sys
import asyncio
import logging
import argparse
from typing import Optional
from binascii import hexlify
from bleak import BleakClient
from tutorial_modules import GOPRO_BASE_UUID, connect_ble
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
async def main(identifier: Optional[str]) -> None:
# Synchronization event to wait until notification response is received
event = asyncio.Event()
# UUIDs to write to and receive responses from
COMMAND_REQ_UUID = GOPRO_BASE_UUID.format("0072")
COMMAND_RSP_UUID = GOPRO_BASE_UUID.format("0073")
response_uuid = COMMAND_RSP_UUID
client: BleakClient
def notification_handler(handle: int, data: bytes) -> None:
logger.info(f'Received response at {handle=}: {hexlify(data, ":")!r}')
# If this is the correct handle and the status is success, the command was a success
if client.services.characteristics[handle].uuid == response_uuid and data[2] == 0x00:
logger.info("Command sent successfully")
# Anything else is unexpected. This shouldn't happen
else:
logger.error("Unexpected response")
# Notify the writer
event.set()
client = await connect_ble(notification_handler, identifier)
# Write to command request BleUUID to load the video preset group
logger.info("Loading the video preset group...")
event.clear()
await client.write_gatt_char(COMMAND_REQ_UUID, bytearray([0x04, 0x3E, 0x02, 0x03, 0xE8]))
await event.wait() # Wait to receive the notification response
await client.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Connect to a GoPro camera, then change the Preset Group to Video."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. If not used, first discovered GoPro will be connected to",
default=None,
)
args = parser.parse_args()
try:
asyncio.run(main(args.identifier))
except:
sys.exit(-1)
else:
sys.exit(0)
| 33
| 165
| 0.694444
|
import sys
import asyncio
import logging
import argparse
from typing import Optional
from binascii import hexlify
from bleak import BleakClient
from tutorial_modules import GOPRO_BASE_UUID, connect_ble
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
async def main(identifier: Optional[str]) -> None:
event = asyncio.Event()
COMMAND_REQ_UUID = GOPRO_BASE_UUID.format("0072")
COMMAND_RSP_UUID = GOPRO_BASE_UUID.format("0073")
response_uuid = COMMAND_RSP_UUID
client: BleakClient
def notification_handler(handle: int, data: bytes) -> None:
logger.info(f'Received response at {handle=}: {hexlify(data, ":")!r}')
if client.services.characteristics[handle].uuid == response_uuid and data[2] == 0x00:
logger.info("Command sent successfully")
else:
logger.error("Unexpected response")
# Notify the writer
event.set()
client = await connect_ble(notification_handler, identifier)
# Write to command request BleUUID to load the video preset group
logger.info("Loading the video preset group...")
event.clear()
await client.write_gatt_char(COMMAND_REQ_UUID, bytearray([0x04, 0x3E, 0x02, 0x03, 0xE8]))
await event.wait() # Wait to receive the notification response
await client.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Connect to a GoPro camera, then change the Preset Group to Video."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. If not used, first discovered GoPro will be connected to",
default=None,
)
args = parser.parse_args()
try:
asyncio.run(main(args.identifier))
except:
sys.exit(-1)
else:
sys.exit(0)
| true
| true
|
790328d2d53e6cdc77e4c7f4348a684c609ecd09
| 20,950
|
py
|
Python
|
tests/hosting/test_server.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 33
|
2019-05-27T13:04:35.000Z
|
2022-03-17T13:33:05.000Z
|
tests/hosting/test_server.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 31
|
2019-06-10T01:55:47.000Z
|
2022-03-09T07:27:49.000Z
|
tests/hosting/test_server.py
|
DaeunYim/pgtoolsservice
|
b7e548718d797883027b2caee2d4722810b33c0f
|
[
"MIT"
] | 25
|
2019-05-13T18:39:24.000Z
|
2021-11-16T03:07:33.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import io
from queue import Queue
import time
import unittest
import unittest.mock as mock
from ossdbtoolsservice.hosting.json_rpc_server import (
JSONRPCServer,
IncomingMessageConfiguration,
NotificationContext, RequestContext
)
from ossdbtoolsservice.hosting.json_message import JSONRPCMessage, JSONRPCMessageType
from ossdbtoolsservice.hosting.json_reader import JSONRPCReader
from ossdbtoolsservice.hosting.json_writer import JSONRPCWriter
import tests.utils as utils
class JSONRPCServerTests(unittest.TestCase):
def test_handler_init(self):
# If: I create a Handler class
handler = JSONRPCServer.Handler('class', 'handler')
# Then: The values should be available
self.assertEqual(handler.class_, 'class')
self.assertEqual(handler.handler, 'handler')
def test_server_init(self):
# Setup: Create objects to init the server with
input_stream = io.BytesIO()
output_stream = io.BytesIO()
logger = utils.get_mock_logger()
# If: I create a server
server = JSONRPCServer(input_stream, output_stream, logger=logger)
# Then: The state should be initialized as defined
self.assertIsInstance(server.writer, JSONRPCWriter)
self.assertIsInstance(server.reader, JSONRPCReader)
self.assertIs(server._logger, logger)
self.assertEqual(server._version, '0')
self.assertFalse(server._stop_requested)
# ... The output queue should be empty
self.assertIsInstance(server._output_queue, Queue)
self.assertTrue(server._output_queue.all_tasks_done)
self.assertDictEqual(server._notification_handlers, {})
self.assertListEqual(server._shutdown_handlers, [])
# ... The threads shouldn't be assigned yet
self.assertIsNone(server._output_consumer)
self.assertIsNone(server._input_consumer)
# ... The built-in handlers should be assigned
self.assertTrue('echo' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['echo'])
self.assertTrue('version' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['version'].handler)
self.assertTrue('shutdown' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['shutdown'].handler)
self.assertTrue('exit' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['exit'].handler)
def test_add_shutdown_handler(self):
# If: I add a shutdown handler
handler = mock.MagicMock()
server = JSONRPCServer(None, None)
server.add_shutdown_handler(handler)
# Then: The shutdown handlers should contain the handler
self.assertTrue(handler in server._shutdown_handlers)
def test_set_request_handler(self):
# If: I add a request handler
params = IncomingMessageConfiguration('test/test', int)
handler = mock.MagicMock()
server = JSONRPCServer(None, None)
server.set_request_handler(params, handler)
# Then: The request handler should contain the handler
self.assertTrue(params.method in server._request_handlers)
self.assertIsNotNone(server._request_handlers[params.method])
self.assertIs(server._request_handlers[params.method].class_, int)
self.assertIs(server._request_handlers[params.method].handler, handler)
def test_set_notification_handler(self):
# If: I add a notification handler
params = IncomingMessageConfiguration('test/test', int)
handler = mock.MagicMock()
server = JSONRPCServer(None, None)
server.set_notification_handler(params, handler)
# Then: The request handler should contain the handler
self.assertTrue(params.method in server._notification_handlers)
self.assertIsNotNone(server._notification_handlers[params.method])
self.assertIs(server._notification_handlers[params.method].class_, int)
self.assertIs(server._notification_handlers[params.method].handler, handler)
# BUILT-IN HANDLER TESTS ###############################################
@staticmethod
def test_echo_request():
# If: I send a request for an echo
rc = utils.MockRequestContext()
params = {}
JSONRPCServer._handle_echo_request(rc, params)
# Then: The params should have been echoed back
rc.send_response.assert_called_once_with(params)
rc.send_notification.assert_not_called()
rc.send_error.assert_not_called()
@staticmethod
def test_version_request():
# If: I send a request for the version
rc = utils.MockRequestContext()
server = JSONRPCServer(None, None)
server._handle_version_request(rc, None)
# Then: I should get a response
rc.send_response.assert_called_once_with(server._version)
rc.send_error.assert_not_called()
rc.send_notification.assert_not_called()
def test_shutdown_request(self):
# If: I send a request for the service to shutdown
rc = utils.MockRequestContext()
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.add_shutdown_handler(handler)
server._handle_shutdown_request(rc, None)
# Then:
# ... The server should be shutting down
self.assertTrue(server._stop_requested)
# ... The shutdown handler should be called
handler.assert_called_once()
# RequestContext TESTS #################################################
def test_request_context_init_test(self):
# If: I create a request context
queue = Queue()
message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(message, queue)
# Then: The internal state should be set up correctly
self.assertIs(rc._message, message)
self.assertIs(rc._queue, queue)
def test_request_context_send_response(self):
# Setup: Create a request context
queue = Queue()
in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(in_message, queue)
# If: I send a response via the response handler
params = {}
rc.send_response(params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.ResponseSuccess)
self.assertEqual(out_message.message_id, '123')
self.assertEqual(out_message.message_result, params)
def test_request_context_send_notification(self):
# Setup: Create a request context
queue = Queue()
in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(in_message, queue)
# If: I send a notification
params = {}
method = 'test/test'
rc.send_notification(method, params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.Notification)
self.assertIsNone(out_message.message_id)
self.assertEqual(out_message.message_params, params)
def test_request_context_send_error(self):
# Setup: Create a request context
queue = Queue()
in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(in_message, queue)
# If: I send an error
params = {}
rc.send_error(params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.ResponseError)
self.assertEqual(out_message.message_id, '123')
self.assertIsInstance(out_message.message_error, dict)
self.assertIs(out_message.message_error['message'], params)
# DISPATCHER TESTS #####################################################
@staticmethod
def test_dispatch_response_success():
# TODO: Replace with robust logic once response routing is implemented
# If: I dispatch a response message
message = JSONRPCMessage.create_response('123', {})
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server._dispatch_message(message)
# Then: Nothing should have happened
@staticmethod
def test_dispatch_response_error():
# TODO: Replace with robust logic once error routing is implemented
# If: I dispatch an error message
message = JSONRPCMessage.create_error('123', 0, message='', data={})
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server._dispatch_message(message)
# Then: Nothing should have happened
@staticmethod
def test_dispatch_invalid():
# If: I dispatch an invalid message
message = JSONRPCMessage('invalidType')
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server._dispatch_message(message)
# Then: Nothing should have happened
@staticmethod
def test_dispatch_request_no_handler():
# If: I dispatch a message that has no handler
logger = utils.get_mock_logger()
message = JSONRPCMessage.create_request('123', 'non_existent', {})
server = JSONRPCServer(None, None, logger=logger)
server._dispatch_message(message)
# Then:
# ... Nothing should have happened
# TODO: Capture that an error was sent
# ... A warning should have been logged
logger.warn.assert_called_once()
def test_dispatch_request_none_class(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', None)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_request_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_request('123', 'test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], RequestContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIs(handler.mock_calls[0][1][0]._message, message)
self.assertIs(handler.mock_calls[0][1][1], params)
def test_dispatch_request_normal(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', _TestParams)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_request_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_request('123', 'test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], RequestContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIs(handler.mock_calls[0][1][0]._message, message)
self.assertIsInstance(handler.mock_calls[0][1][1], _TestParams)
@staticmethod
def test_dispatch_notification_no_handler():
# If: I dispatch a message that has no handler
logger = utils.get_mock_logger()
message = JSONRPCMessage.create_notification('non_existent', {})
server = JSONRPCServer(None, None, logger=logger)
server._dispatch_message(message)
# Then:
# ... Nothing should have happened
# TODO: Capture that an error was sent
# ... A warning should have been logged
logger.warn.assert_called_once()
def test_dispatch_notification_none_class(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', None)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_notification_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_notification('test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], NotificationContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIs(handler.mock_calls[0][1][1], params)
def test_dispatch_notification_normal(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', _TestParams)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_notification_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_notification('test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], NotificationContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIsInstance(handler.mock_calls[0][1][1], _TestParams)
# RequestContext TESTS #################################################
def test_notification_context_init_test(self):
# If: I create a notification context
queue = Queue()
nc = NotificationContext(queue)
# Then: The internal state should be set up correctly
self.assertIs(nc._queue, queue)
def test_notification_context_send(self):
# Setup: Create a request context
queue = Queue()
nc = NotificationContext(queue)
# If: I send a response via the response handler
method = 'test/test'
params = {}
nc.send_notification(method, params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.Notification)
self.assertIsNone(out_message.message_id)
self.assertEqual(out_message.message_params, params)
self.assertEqual(out_message.message_method, method)
# END-TO-END TESTS #####################################################
def test_request_enqueued(self):
# Setup: Create empty io streams
input_stream = io.BytesIO()
output_stream = io.BytesIO()
# If: I submit an outbound request
test_client = JSONRPCServer(input_stream, output_stream)
test_client.send_request('test/test', {'test': 'test'})
# Then:
# ... There should be one request in the outbound queue
request = test_client._output_queue.get()
# ... The queued message should match the request we sent
self.assertEqual(request.message_method, 'test/test')
self.assertDictEqual(request.message_params, {'test': 'test'})
def test_notification_enqueued(self):
# Setup: Create empty io streams
input_stream = io.BytesIO()
output_stream = io.BytesIO()
# If: I submit an outbound request
test_client = JSONRPCServer(input_stream, output_stream)
test_client.send_notification('test/test', {'test': 'test'})
# Then:
# ... There should be one request in the outbound queue
request = test_client._output_queue.get()
# ... The queued message should match the request we sent
self.assertEqual(request.message_method, 'test/test')
self.assertDictEqual(request.message_params, {'test': 'test'})
def test_reads_message(self):
# Setup:
# ... Create an input stream with a single message
input_stream = io.BytesIO(b'Content-Length: 30\r\n\r\n{"method":"test", "params":{}}')
output_stream = io.BytesIO()
# ... Create a server that uses the input and output streams
server = JSONRPCServer(input_stream, output_stream, logger=utils.get_mock_logger())
# ... Patch the server to not dispatch a message
dispatch_mock = mock.MagicMock()
server._dispatch_message = dispatch_mock
# If: I start the server, run it for a bit, and stop it
# TODO: Remove explicit sleep and add spin-locks
server.start()
time.sleep(1)
server.stop()
server.wait_for_exit()
# Then: The dispatch method should have been called
expected_output = JSONRPCMessage.from_dictionary({"method": "test", "params": {}})
dispatch_mock.assert_called_once()
self.assertDictEqual(dispatch_mock.mock_calls[0][1][0].dictionary, expected_output.dictionary)
# Teardown: All background threads should be shut down.
self.assertFalse(server._input_consumer.isAlive())
self.assertFalse(server._output_consumer.isAlive())
def test_read_multiple_messages(self):
# Setup:
# ... Create an input stream with two messages
test_bytes = b'Content-Length: 30\r\n\r\n{"method":"test", "params":{}}'
input_stream = io.BytesIO(test_bytes + test_bytes)
output_stream = io.BytesIO()
# ... Create a server that uses the input and output streams
server = JSONRPCServer(input_stream, output_stream, logger=utils.get_mock_logger())
# ... Patch the server to not dispatch a message
dispatch_mock = mock.MagicMock()
server._dispatch_message = dispatch_mock
# If: I start the server, run it for a bit, and stop it
server.start()
time.sleep(1)
server.stop()
server.wait_for_exit()
# Then: The dispatch method should have been called twice
expected_output = JSONRPCMessage.from_dictionary({"method": "test", "params": {}})
self.assertEqual(len(dispatch_mock.mock_calls), 2)
self.assertDictEqual(dispatch_mock.mock_calls[0][1][0].dictionary, expected_output.dictionary)
self.assertDictEqual(dispatch_mock.mock_calls[1][1][0].dictionary, expected_output.dictionary)
# Teardown: All background threads should be shut down.
self.assertFalse(server._input_consumer.isAlive())
self.assertFalse(server._output_consumer.isAlive())
class _TestParams:
@classmethod
def from_dict(cls, dictionary):
return _TestParams()
def __init__(self):
pass
if __name__ == '__main__':
unittest.main()
| 41.403162
| 104
| 0.664916
|
import io
from queue import Queue
import time
import unittest
import unittest.mock as mock
from ossdbtoolsservice.hosting.json_rpc_server import (
JSONRPCServer,
IncomingMessageConfiguration,
NotificationContext, RequestContext
)
from ossdbtoolsservice.hosting.json_message import JSONRPCMessage, JSONRPCMessageType
from ossdbtoolsservice.hosting.json_reader import JSONRPCReader
from ossdbtoolsservice.hosting.json_writer import JSONRPCWriter
import tests.utils as utils
class JSONRPCServerTests(unittest.TestCase):
def test_handler_init(self):
handler = JSONRPCServer.Handler('class', 'handler')
self.assertEqual(handler.class_, 'class')
self.assertEqual(handler.handler, 'handler')
def test_server_init(self):
input_stream = io.BytesIO()
output_stream = io.BytesIO()
logger = utils.get_mock_logger()
server = JSONRPCServer(input_stream, output_stream, logger=logger)
self.assertIsInstance(server.writer, JSONRPCWriter)
self.assertIsInstance(server.reader, JSONRPCReader)
self.assertIs(server._logger, logger)
self.assertEqual(server._version, '0')
self.assertFalse(server._stop_requested)
self.assertIsInstance(server._output_queue, Queue)
self.assertTrue(server._output_queue.all_tasks_done)
self.assertDictEqual(server._notification_handlers, {})
self.assertListEqual(server._shutdown_handlers, [])
self.assertIsNone(server._output_consumer)
self.assertIsNone(server._input_consumer)
# ... The built-in handlers should be assigned
self.assertTrue('echo' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['echo'])
self.assertTrue('version' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['version'].handler)
self.assertTrue('shutdown' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['shutdown'].handler)
self.assertTrue('exit' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['exit'].handler)
def test_add_shutdown_handler(self):
# If: I add a shutdown handler
handler = mock.MagicMock()
server = JSONRPCServer(None, None)
server.add_shutdown_handler(handler)
# Then: The shutdown handlers should contain the handler
self.assertTrue(handler in server._shutdown_handlers)
def test_set_request_handler(self):
# If: I add a request handler
params = IncomingMessageConfiguration('test/test', int)
handler = mock.MagicMock()
server = JSONRPCServer(None, None)
server.set_request_handler(params, handler)
# Then: The request handler should contain the handler
self.assertTrue(params.method in server._request_handlers)
self.assertIsNotNone(server._request_handlers[params.method])
self.assertIs(server._request_handlers[params.method].class_, int)
self.assertIs(server._request_handlers[params.method].handler, handler)
def test_set_notification_handler(self):
# If: I add a notification handler
params = IncomingMessageConfiguration('test/test', int)
handler = mock.MagicMock()
server = JSONRPCServer(None, None)
server.set_notification_handler(params, handler)
# Then: The request handler should contain the handler
self.assertTrue(params.method in server._notification_handlers)
self.assertIsNotNone(server._notification_handlers[params.method])
self.assertIs(server._notification_handlers[params.method].class_, int)
self.assertIs(server._notification_handlers[params.method].handler, handler)
# BUILT-IN HANDLER TESTS ###############################################
@staticmethod
def test_echo_request():
# If: I send a request for an echo
rc = utils.MockRequestContext()
params = {}
JSONRPCServer._handle_echo_request(rc, params)
# Then: The params should have been echoed back
rc.send_response.assert_called_once_with(params)
rc.send_notification.assert_not_called()
rc.send_error.assert_not_called()
@staticmethod
def test_version_request():
# If: I send a request for the version
rc = utils.MockRequestContext()
server = JSONRPCServer(None, None)
server._handle_version_request(rc, None)
# Then: I should get a response
rc.send_response.assert_called_once_with(server._version)
rc.send_error.assert_not_called()
rc.send_notification.assert_not_called()
def test_shutdown_request(self):
# If: I send a request for the service to shutdown
rc = utils.MockRequestContext()
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.add_shutdown_handler(handler)
server._handle_shutdown_request(rc, None)
# Then:
# ... The server should be shutting down
self.assertTrue(server._stop_requested)
# ... The shutdown handler should be called
handler.assert_called_once()
# RequestContext TESTS #################################################
def test_request_context_init_test(self):
# If: I create a request context
queue = Queue()
message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(message, queue)
# Then: The internal state should be set up correctly
self.assertIs(rc._message, message)
self.assertIs(rc._queue, queue)
def test_request_context_send_response(self):
# Setup: Create a request context
queue = Queue()
in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(in_message, queue)
# If: I send a response via the response handler
params = {}
rc.send_response(params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.ResponseSuccess)
self.assertEqual(out_message.message_id, '123')
self.assertEqual(out_message.message_result, params)
def test_request_context_send_notification(self):
# Setup: Create a request context
queue = Queue()
in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(in_message, queue)
# If: I send a notification
params = {}
method = 'test/test'
rc.send_notification(method, params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.Notification)
self.assertIsNone(out_message.message_id)
self.assertEqual(out_message.message_params, params)
def test_request_context_send_error(self):
# Setup: Create a request context
queue = Queue()
in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(in_message, queue)
# If: I send an error
params = {}
rc.send_error(params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.ResponseError)
self.assertEqual(out_message.message_id, '123')
self.assertIsInstance(out_message.message_error, dict)
self.assertIs(out_message.message_error['message'], params)
# DISPATCHER TESTS #####################################################
@staticmethod
def test_dispatch_response_success():
# TODO: Replace with robust logic once response routing is implemented
# If: I dispatch a response message
message = JSONRPCMessage.create_response('123', {})
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server._dispatch_message(message)
# Then: Nothing should have happened
@staticmethod
def test_dispatch_response_error():
# TODO: Replace with robust logic once error routing is implemented
# If: I dispatch an error message
message = JSONRPCMessage.create_error('123', 0, message='', data={})
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server._dispatch_message(message)
# Then: Nothing should have happened
@staticmethod
def test_dispatch_invalid():
# If: I dispatch an invalid message
message = JSONRPCMessage('invalidType')
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server._dispatch_message(message)
# Then: Nothing should have happened
@staticmethod
def test_dispatch_request_no_handler():
# If: I dispatch a message that has no handler
logger = utils.get_mock_logger()
message = JSONRPCMessage.create_request('123', 'non_existent', {})
server = JSONRPCServer(None, None, logger=logger)
server._dispatch_message(message)
# Then:
# ... Nothing should have happened
# TODO: Capture that an error was sent
# ... A warning should have been logged
logger.warn.assert_called_once()
def test_dispatch_request_none_class(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', None)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_request_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_request('123', 'test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], RequestContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIs(handler.mock_calls[0][1][0]._message, message)
self.assertIs(handler.mock_calls[0][1][1], params)
def test_dispatch_request_normal(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', _TestParams)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_request_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_request('123', 'test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], RequestContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIs(handler.mock_calls[0][1][0]._message, message)
self.assertIsInstance(handler.mock_calls[0][1][1], _TestParams)
@staticmethod
def test_dispatch_notification_no_handler():
# If: I dispatch a message that has no handler
logger = utils.get_mock_logger()
message = JSONRPCMessage.create_notification('non_existent', {})
server = JSONRPCServer(None, None, logger=logger)
server._dispatch_message(message)
# Then:
# ... Nothing should have happened
# TODO: Capture that an error was sent
# ... A warning should have been logged
logger.warn.assert_called_once()
def test_dispatch_notification_none_class(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', None)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_notification_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_notification('test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], NotificationContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIs(handler.mock_calls[0][1][1], params)
def test_dispatch_notification_normal(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', _TestParams)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_notification_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_notification('test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], NotificationContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIsInstance(handler.mock_calls[0][1][1], _TestParams)
# RequestContext TESTS #################################################
def test_notification_context_init_test(self):
# If: I create a notification context
queue = Queue()
nc = NotificationContext(queue)
# Then: The internal state should be set up correctly
self.assertIs(nc._queue, queue)
def test_notification_context_send(self):
# Setup: Create a request context
queue = Queue()
nc = NotificationContext(queue)
# If: I send a response via the response handler
method = 'test/test'
params = {}
nc.send_notification(method, params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.Notification)
self.assertIsNone(out_message.message_id)
self.assertEqual(out_message.message_params, params)
self.assertEqual(out_message.message_method, method)
# END-TO-END TESTS #####################################################
def test_request_enqueued(self):
# Setup: Create empty io streams
input_stream = io.BytesIO()
output_stream = io.BytesIO()
# If: I submit an outbound request
test_client = JSONRPCServer(input_stream, output_stream)
test_client.send_request('test/test', {'test': 'test'})
# Then:
# ... There should be one request in the outbound queue
request = test_client._output_queue.get()
# ... The queued message should match the request we sent
self.assertEqual(request.message_method, 'test/test')
self.assertDictEqual(request.message_params, {'test': 'test'})
def test_notification_enqueued(self):
# Setup: Create empty io streams
input_stream = io.BytesIO()
output_stream = io.BytesIO()
# If: I submit an outbound request
test_client = JSONRPCServer(input_stream, output_stream)
test_client.send_notification('test/test', {'test': 'test'})
# Then:
# ... There should be one request in the outbound queue
request = test_client._output_queue.get()
# ... The queued message should match the request we sent
self.assertEqual(request.message_method, 'test/test')
self.assertDictEqual(request.message_params, {'test': 'test'})
def test_reads_message(self):
# Setup:
# ... Create an input stream with a single message
input_stream = io.BytesIO(b'Content-Length: 30\r\n\r\n{"method":"test", "params":{}}')
output_stream = io.BytesIO()
# ... Create a server that uses the input and output streams
server = JSONRPCServer(input_stream, output_stream, logger=utils.get_mock_logger())
# ... Patch the server to not dispatch a message
dispatch_mock = mock.MagicMock()
server._dispatch_message = dispatch_mock
# If: I start the server, run it for a bit, and stop it
# TODO: Remove explicit sleep and add spin-locks
server.start()
time.sleep(1)
server.stop()
server.wait_for_exit()
# Then: The dispatch method should have been called
expected_output = JSONRPCMessage.from_dictionary({"method": "test", "params": {}})
dispatch_mock.assert_called_once()
self.assertDictEqual(dispatch_mock.mock_calls[0][1][0].dictionary, expected_output.dictionary)
# Teardown: All background threads should be shut down.
self.assertFalse(server._input_consumer.isAlive())
self.assertFalse(server._output_consumer.isAlive())
def test_read_multiple_messages(self):
# Setup:
# ... Create an input stream with two messages
test_bytes = b'Content-Length: 30\r\n\r\n{"method":"test", "params":{}}'
input_stream = io.BytesIO(test_bytes + test_bytes)
output_stream = io.BytesIO()
# ... Create a server that uses the input and output streams
server = JSONRPCServer(input_stream, output_stream, logger=utils.get_mock_logger())
# ... Patch the server to not dispatch a message
dispatch_mock = mock.MagicMock()
server._dispatch_message = dispatch_mock
# If: I start the server, run it for a bit, and stop it
server.start()
time.sleep(1)
server.stop()
server.wait_for_exit()
# Then: The dispatch method should have been called twice
expected_output = JSONRPCMessage.from_dictionary({"method": "test", "params": {}})
self.assertEqual(len(dispatch_mock.mock_calls), 2)
self.assertDictEqual(dispatch_mock.mock_calls[0][1][0].dictionary, expected_output.dictionary)
self.assertDictEqual(dispatch_mock.mock_calls[1][1][0].dictionary, expected_output.dictionary)
# Teardown: All background threads should be shut down.
self.assertFalse(server._input_consumer.isAlive())
self.assertFalse(server._output_consumer.isAlive())
class _TestParams:
@classmethod
def from_dict(cls, dictionary):
return _TestParams()
def __init__(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
790328e46c96c024cf64b5cf09694459179e5a14
| 1,696
|
py
|
Python
|
sa/profiles/DCN/DCWS/get_version.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
sa/profiles/DCN/DCWS/get_version.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
sa/profiles/DCN/DCWS/get_version.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# Vendor: DCN
# OS: DCWS
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "DCN.DCWS.get_version"
cache = True
interface = IGetVersion
rx_platform = re.compile(r"\s*(?P<platform>\S+)\s+Device.", re.MULTILINE)
rx_ver = re.compile(r"^\s*Soft[Ww]are\s+Version\s+(?P<version>\S+)\n", re.MULTILINE)
rx_bver = re.compile(r"^\s*Boot[Rr]om\s+Version\s+(?P<bversion>\S+)\n", re.MULTILINE)
rx_hver = re.compile(r"^\s*Hard[Ww]are\s+Version\s+(?P<hversion>\S+)\n", re.MULTILINE)
rx_serial = re.compile(r"^\s*Serial\s+No\s+(?P<serial>\S+)\n", re.MULTILINE)
def execute(self):
ver = self.cli("show version", cached=True)
match = self.re_search(self.rx_platform, ver)
vmatch = self.re_search(self.rx_ver, ver)
bmatch = self.re_search(self.rx_bver, ver)
hmatch = self.re_search(self.rx_hver, ver)
smatch = self.re_search(self.rx_serial, ver)
return {
"vendor": "DCN",
"platform": match.group("platform"),
"version": vmatch.group("version"),
"attributes": {
"Bootrom version": bmatch.group("bversion"),
"HW version": hmatch.group("hversion"),
"Serial Number": smatch.group("serial"),
},
}
| 37.688889
| 90
| 0.533019
|
import re
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "DCN.DCWS.get_version"
cache = True
interface = IGetVersion
rx_platform = re.compile(r"\s*(?P<platform>\S+)\s+Device.", re.MULTILINE)
rx_ver = re.compile(r"^\s*Soft[Ww]are\s+Version\s+(?P<version>\S+)\n", re.MULTILINE)
rx_bver = re.compile(r"^\s*Boot[Rr]om\s+Version\s+(?P<bversion>\S+)\n", re.MULTILINE)
rx_hver = re.compile(r"^\s*Hard[Ww]are\s+Version\s+(?P<hversion>\S+)\n", re.MULTILINE)
rx_serial = re.compile(r"^\s*Serial\s+No\s+(?P<serial>\S+)\n", re.MULTILINE)
def execute(self):
ver = self.cli("show version", cached=True)
match = self.re_search(self.rx_platform, ver)
vmatch = self.re_search(self.rx_ver, ver)
bmatch = self.re_search(self.rx_bver, ver)
hmatch = self.re_search(self.rx_hver, ver)
smatch = self.re_search(self.rx_serial, ver)
return {
"vendor": "DCN",
"platform": match.group("platform"),
"version": vmatch.group("version"),
"attributes": {
"Bootrom version": bmatch.group("bversion"),
"HW version": hmatch.group("hversion"),
"Serial Number": smatch.group("serial"),
},
}
| true
| true
|
79032ad483b099f9c6ebdd662967e19e2229d7fb
| 9,122
|
py
|
Python
|
sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py
|
shitanshu-google/beam
|
9cd959f61d377874ee1839c2de4bb8f65a948ecc
|
[
"Apache-2.0"
] | 3
|
2020-08-28T17:47:26.000Z
|
2021-08-17T06:38:58.000Z
|
sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py
|
shitanshu-google/beam
|
9cd959f61d377874ee1839c2de4bb8f65a948ecc
|
[
"Apache-2.0"
] | 5
|
2020-11-13T19:06:10.000Z
|
2021-11-10T19:56:12.000Z
|
sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py
|
shitanshu-google/beam
|
9cd959f61d377874ee1839c2de4bb8f65a948ecc
|
[
"Apache-2.0"
] | 1
|
2021-10-05T20:53:52.000Z
|
2021-10-05T20:53:52.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Nexmark launcher.
The Nexmark suite is a series of queries (streaming pipelines) performed
on a simulation of auction events. The launcher orchestrates the generation
and parsing of streaming events and the running of queries.
Model
- Person: Author of an auction or a bid.
- Auction: Item under auction.
- Bid: A bid for an item under auction.
Events
- Create Person
- Create Auction
- Create Bid
Queries
- Query0: Pass through (send and receive auction events).
Usage
- DirectRunner
python nexmark_launcher.py \
--query/q <query number> \
--project <project id> \
--loglevel=DEBUG (optional) \
--wait_until_finish_duration <time_in_ms> \
--streaming
- DataflowRunner
python nexmark_launcher.py \
--query/q <query number> \
--project <project id> \
--region <GCE region> \
--loglevel=DEBUG (optional) \
--wait_until_finish_duration <time_in_ms> \
--streaming \
--sdk_location <apache_beam tar.gz> \
--staging_location=gs://... \
--temp_location=gs://
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import argparse
import logging
import sys
import uuid
from google.cloud import pubsub
import apache_beam as beam
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.testing.benchmarks.nexmark.nexmark_util import Command
from apache_beam.testing.benchmarks.nexmark.queries import query0
from apache_beam.testing.benchmarks.nexmark.queries import query1
from apache_beam.testing.benchmarks.nexmark.queries import query2
class NexmarkLauncher(object):
def __init__(self):
self.parse_args()
self.uuid = str(uuid.uuid4())
self.topic_name = self.args.topic_name + self.uuid
self.subscription_name = self.args.subscription_name + self.uuid
publish_client = pubsub.Client(project=self.project)
topic = publish_client.topic(self.topic_name)
if topic.exists():
logging.info('deleting topic %s', self.topic_name)
topic.delete()
logging.info('creating topic %s', self.topic_name)
topic.create()
sub = topic.subscription(self.subscription_name)
if sub.exists():
logging.info('deleting sub %s', self.topic_name)
sub.delete()
logging.info('creating sub %s', self.topic_name)
sub.create()
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'--query',
'-q',
type=int,
action='append',
required=True,
choices=[0, 1, 2],
help='Query to run')
parser.add_argument(
'--subscription_name',
type=str,
help='Pub/Sub subscription to read from')
parser.add_argument(
'--topic_name', type=str, help='Pub/Sub topic to read from')
parser.add_argument(
'--loglevel',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level to debug')
parser.add_argument(
'--input',
type=str,
required=True,
help='Path to the data file containing nexmark events.')
self.args, self.pipeline_args = parser.parse_known_args()
logging.basicConfig(
level=getattr(logging, self.args.loglevel, None),
format='(%(threadName)-10s) %(message)s')
self.pipeline_options = PipelineOptions(self.pipeline_args)
logging.debug('args, pipeline_args: %s, %s', self.args, self.pipeline_args)
# Usage with Dataflow requires a project to be supplied.
self.project = self.pipeline_options.view_as(GoogleCloudOptions).project
if self.project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# Pub/Sub is currently available for use only in streaming pipelines.
self.streaming = self.pipeline_options.view_as(StandardOptions).streaming
if self.streaming is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --streaming is required')
sys.exit(1)
# wait_until_finish ensures that the streaming job is canceled.
self.wait_until_finish_duration = (
self.pipeline_options.view_as(TestOptions).wait_until_finish_duration)
if self.wait_until_finish_duration is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --wait_until_finish_duration is required') # pylint: disable=line-too-long
sys.exit(1)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
self.pipeline_options.view_as(SetupOptions).save_main_session = True
def generate_events(self):
publish_client = pubsub.Client(project=self.project)
topic = publish_client.topic(self.topic_name)
sub = topic.subscription(self.subscription_name)
logging.info('Generating auction events to topic %s', topic.name)
if self.args.input.startswith('gs://'):
from apache_beam.io.gcp.gcsfilesystem import GCSFileSystem
fs = GCSFileSystem(self.pipeline_options)
with fs.open(self.args.input) as infile:
for line in infile:
topic.publish(line)
else:
with open(self.args.input) as infile:
for line in infile:
topic.publish(line)
logging.info('Finished event generation.')
# Read from PubSub into a PCollection.
if self.args.subscription_name:
raw_events = self.pipeline | 'ReadPubSub' >> beam.io.ReadFromPubSub(
subscription=sub.full_name)
else:
raw_events = self.pipeline | 'ReadPubSub' >> beam.io.ReadFromPubSub(
topic=topic.full_name)
return raw_events
def run_query(self, query, query_args, query_errors):
try:
self.parse_args()
self.pipeline = beam.Pipeline(options=self.pipeline_options)
raw_events = self.generate_events()
query.load(raw_events, query_args)
result = self.pipeline.run()
job_duration = (
self.pipeline_options.view_as(TestOptions).wait_until_finish_duration)
if self.pipeline_options.view_as(StandardOptions).runner == 'DataflowRunner': # pylint: disable=line-too-long
result.wait_until_finish(duration=job_duration)
result.cancel()
else:
result.wait_until_finish()
except Exception as exc:
query_errors.append(str(exc))
raise
def cleanup(self):
publish_client = pubsub.Client(project=self.project)
topic = publish_client.topic(self.topic_name)
if topic.exists():
topic.delete()
sub = topic.subscription(self.subscription_name)
if sub.exists():
sub.delete()
def run(self):
queries = {
0: query0,
1: query1,
2: query2, # TODO(mariagh): Add more queries.
}
# TODO(mariagh): Move to a config file.
query_args = {2: {'auction_id': 'a1003'}}
query_errors = []
for i in self.args.query:
self.parse_args()
logging.info('Running query %d', i)
# The DirectRunner is the default runner, and it needs
# special handling to cancel streaming jobs.
launch_from_direct_runner = self.pipeline_options.view_as(
StandardOptions).runner in [None, 'DirectRunner']
query_duration = self.pipeline_options.view_as(TestOptions).wait_until_finish_duration # pylint: disable=line-too-long
if launch_from_direct_runner:
command = Command(
self.run_query, args=[queries[i], query_args.get(i), query_errors])
command.run(timeout=query_duration // 1000)
else:
try:
self.run_query(queries[i], query_args.get(i), query_errors=None)
except Exception as exc:
query_errors.append(exc)
if query_errors:
logging.error('Query failed with %s', ', '.join(query_errors))
else:
logging.info('Queries run: %s', self.args.query)
if __name__ == '__main__':
launcher = NexmarkLauncher()
launcher.run()
launcher.cleanup()
| 34.164794
| 125
| 0.691515
|
from __future__ import absolute_import
from __future__ import print_function
import argparse
import logging
import sys
import uuid
from google.cloud import pubsub
import apache_beam as beam
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.testing.benchmarks.nexmark.nexmark_util import Command
from apache_beam.testing.benchmarks.nexmark.queries import query0
from apache_beam.testing.benchmarks.nexmark.queries import query1
from apache_beam.testing.benchmarks.nexmark.queries import query2
class NexmarkLauncher(object):
def __init__(self):
self.parse_args()
self.uuid = str(uuid.uuid4())
self.topic_name = self.args.topic_name + self.uuid
self.subscription_name = self.args.subscription_name + self.uuid
publish_client = pubsub.Client(project=self.project)
topic = publish_client.topic(self.topic_name)
if topic.exists():
logging.info('deleting topic %s', self.topic_name)
topic.delete()
logging.info('creating topic %s', self.topic_name)
topic.create()
sub = topic.subscription(self.subscription_name)
if sub.exists():
logging.info('deleting sub %s', self.topic_name)
sub.delete()
logging.info('creating sub %s', self.topic_name)
sub.create()
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'--query',
'-q',
type=int,
action='append',
required=True,
choices=[0, 1, 2],
help='Query to run')
parser.add_argument(
'--subscription_name',
type=str,
help='Pub/Sub subscription to read from')
parser.add_argument(
'--topic_name', type=str, help='Pub/Sub topic to read from')
parser.add_argument(
'--loglevel',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level to debug')
parser.add_argument(
'--input',
type=str,
required=True,
help='Path to the data file containing nexmark events.')
self.args, self.pipeline_args = parser.parse_known_args()
logging.basicConfig(
level=getattr(logging, self.args.loglevel, None),
format='(%(threadName)-10s) %(message)s')
self.pipeline_options = PipelineOptions(self.pipeline_args)
logging.debug('args, pipeline_args: %s, %s', self.args, self.pipeline_args)
self.project = self.pipeline_options.view_as(GoogleCloudOptions).project
if self.project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
self.streaming = self.pipeline_options.view_as(StandardOptions).streaming
if self.streaming is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --streaming is required')
sys.exit(1)
self.wait_until_finish_duration = (
self.pipeline_options.view_as(TestOptions).wait_until_finish_duration)
if self.wait_until_finish_duration is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --wait_until_finish_duration is required')
sys.exit(1)
# workflow rely on global context (e.g., a module imported at module level).
self.pipeline_options.view_as(SetupOptions).save_main_session = True
def generate_events(self):
publish_client = pubsub.Client(project=self.project)
topic = publish_client.topic(self.topic_name)
sub = topic.subscription(self.subscription_name)
logging.info('Generating auction events to topic %s', topic.name)
if self.args.input.startswith('gs://'):
from apache_beam.io.gcp.gcsfilesystem import GCSFileSystem
fs = GCSFileSystem(self.pipeline_options)
with fs.open(self.args.input) as infile:
for line in infile:
topic.publish(line)
else:
with open(self.args.input) as infile:
for line in infile:
topic.publish(line)
logging.info('Finished event generation.')
# Read from PubSub into a PCollection.
if self.args.subscription_name:
raw_events = self.pipeline | 'ReadPubSub' >> beam.io.ReadFromPubSub(
subscription=sub.full_name)
else:
raw_events = self.pipeline | 'ReadPubSub' >> beam.io.ReadFromPubSub(
topic=topic.full_name)
return raw_events
def run_query(self, query, query_args, query_errors):
try:
self.parse_args()
self.pipeline = beam.Pipeline(options=self.pipeline_options)
raw_events = self.generate_events()
query.load(raw_events, query_args)
result = self.pipeline.run()
job_duration = (
self.pipeline_options.view_as(TestOptions).wait_until_finish_duration)
if self.pipeline_options.view_as(StandardOptions).runner == 'DataflowRunner': # pylint: disable=line-too-long
result.wait_until_finish(duration=job_duration)
result.cancel()
else:
result.wait_until_finish()
except Exception as exc:
query_errors.append(str(exc))
raise
def cleanup(self):
publish_client = pubsub.Client(project=self.project)
topic = publish_client.topic(self.topic_name)
if topic.exists():
topic.delete()
sub = topic.subscription(self.subscription_name)
if sub.exists():
sub.delete()
def run(self):
queries = {
0: query0,
1: query1,
2: query2, # TODO(mariagh): Add more queries.
}
# TODO(mariagh): Move to a config file.
query_args = {2: {'auction_id': 'a1003'}}
query_errors = []
for i in self.args.query:
self.parse_args()
logging.info('Running query %d', i)
# The DirectRunner is the default runner, and it needs
# special handling to cancel streaming jobs.
launch_from_direct_runner = self.pipeline_options.view_as(
StandardOptions).runner in [None, 'DirectRunner']
query_duration = self.pipeline_options.view_as(TestOptions).wait_until_finish_duration # pylint: disable=line-too-long
if launch_from_direct_runner:
command = Command(
self.run_query, args=[queries[i], query_args.get(i), query_errors])
command.run(timeout=query_duration // 1000)
else:
try:
self.run_query(queries[i], query_args.get(i), query_errors=None)
except Exception as exc:
query_errors.append(exc)
if query_errors:
logging.error('Query failed with %s', ', '.join(query_errors))
else:
logging.info('Queries run: %s', self.args.query)
if __name__ == '__main__':
launcher = NexmarkLauncher()
launcher.run()
launcher.cleanup()
| true
| true
|
79032cdf6c2c38fdab80a0f0def42921f387d137
| 14,378
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/FabricSheetType.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/FabricSheetType.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/FabricSheetType.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class FabricSheetType(ElementType,IDisposable):
""" Represents a fabric sheet type,used in the generation of fabric wires. """
@staticmethod
def CreateDefaultFabricSheetType(ADoc):
"""
CreateDefaultFabricSheetType(ADoc: Document) -> ElementId
Creates a new FabricSheetType object with a default name.
ADoc: The document.
Returns: The newly created type id.
"""
pass
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetReinforcementRoundingManager(self):
"""
GetReinforcementRoundingManager(self: FabricSheetType) -> FabricRoundingManager
Returns an object for managing reinforcement rounding override settings.
Returns: The rounding manager.
"""
pass
def GetWireItem(self,wireIndex,direction):
"""
GetWireItem(self: FabricSheetType,wireIndex: int,direction: WireDistributionDirection) -> FabricWireItem
Gets the Wire stored in the FabricSheetType at the associated index.
wireIndex: Item index in the Fabric Sheet
direction: Wire distribution direction of the inquired item
Returns: Fabric wire Item
"""
pass
def IsCustom(self):
"""
IsCustom(self: FabricSheetType) -> bool
Verifies if the type is Custom Fabric Sheet
Returns: True if Layout is set on Custom and if the wireArr is not null
"""
pass
def IsValidMajorLapSplice(self,majorLapSplice):
"""
IsValidMajorLapSplice(self: FabricSheetType,majorLapSplice: float) -> bool
Identifies if the input value is valid to be applied as the major lap splice
value for this FabricSheetType.
"""
pass
def IsValidMinorLapSplice(self,minorLapSplice):
"""
IsValidMinorLapSplice(self: FabricSheetType,minorLapSplice: float) -> bool
Identifies if the input value is valid to be applied as the minor lap splice
value for this FabricSheetType.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetLayoutAsCustomPattern(self,minorStartOverhang,minorEndOverhang,majorStartOverhang,majorEndOverhang,minorFabricWireItems,majorFabricWireItems):
""" SetLayoutAsCustomPattern(self: FabricSheetType,minorStartOverhang: float,minorEndOverhang: float,majorStartOverhang: float,majorEndOverhang: float,minorFabricWireItems: IList[FabricWireItem],majorFabricWireItems: IList[FabricWireItem]) """
pass
def SetMajorLayoutAsActualSpacing(self,overallWidth,minorStartOverhang,spacing):
"""
SetMajorLayoutAsActualSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,spacing: float)
Sets the major layout pattern as ActualSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
spacing: The distance between the wires in the major direction.
"""
pass
def SetMajorLayoutAsFixedNumber(self,overallWidth,minorStartOverhang,minorEndOverhang,numberOfWires):
"""
SetMajorLayoutAsFixedNumber(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,minorEndOverhang: float,numberOfWires: int)
Sets the major layout pattern as FixedNumber,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction.
numberOfWires: The number of the wires to set in the major direction.
"""
pass
def SetMajorLayoutAsMaximumSpacing(self,overallWidth,minorStartOverhang,minorEndOverhang,spacing):
"""
SetMajorLayoutAsMaximumSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,minorEndOverhang: float,spacing: float)
Sets the major layout pattern as MaximumSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
minorEndOverhang: The distance from the last wire to the edge of the sheet in the minor direction.
spacing: The distance between the wires in the major direction.
"""
pass
def SetMajorLayoutAsNumberWithSpacing(self,overallWidth,minorStartOverhang,numberOfWires,spacing):
"""
SetMajorLayoutAsNumberWithSpacing(self: FabricSheetType,overallWidth: float,minorStartOverhang: float,numberOfWires: int,spacing: float)
Sets the major layout pattern as NumberWithSpacing,while specifying the needed
parameters for this pattern.
overallWidth: The entire width of the wire sheet in the minor direction.
minorStartOverhang: The distance from the edge of the sheet to the first wire in the minor
direction.
numberOfWires: The number of the wires to set in the major direction.
spacing: The distance between the wires in the major direction.
"""
pass
def SetMinorLayoutAsActualSpacing(self,overallLength,majorStartOverhang,spacing):
"""
SetMinorLayoutAsActualSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,spacing: float)
Sets the minor layout pattern as ActualSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
spacing: The distance between the wires in the minor direction.
"""
pass
def SetMinorLayoutAsFixedNumber(self,overallLength,majorStartOverhang,majorEndOverhang,numberOfWires):
"""
SetMinorLayoutAsFixedNumber(self: FabricSheetType,overallLength: float,majorStartOverhang: float,majorEndOverhang: float,numberOfWires: int)
Sets the major layout pattern as FixedNumber,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction.
numberOfWires: The number of the wires to set in the minor direction.
"""
pass
def SetMinorLayoutAsMaximumSpacing(self,overallLength,majorStartOverhang,majorEndOverhang,spacing):
"""
SetMinorLayoutAsMaximumSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,majorEndOverhang: float,spacing: float)
Sets the major layout pattern as MaximumSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
majorEndOverhang: The distance from the last wire to the edge of the sheet in the major direction.
spacing: The distance between the wires in the minor direction.
"""
pass
def SetMinorLayoutAsNumberWithSpacing(self,overallLength,majorStartOverhang,numberOfWires,spacing):
"""
SetMinorLayoutAsNumberWithSpacing(self: FabricSheetType,overallLength: float,majorStartOverhang: float,numberOfWires: int,spacing: float)
Sets the major layout pattern as NumberWithSpacing,while specifying the needed
parameters for this pattern.
overallLength: The entire length of the wire sheet in the major direction.
majorStartOverhang: The distance from the edge of the sheet to the first wire in the major
direction.
numberOfWires: The number of wires in the minor direction.
spacing: The distance between the wires in the minor direction.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
MajorDirectionWireType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The id of the FabricWireType to be used in the major direction.
Get: MajorDirectionWireType(self: FabricSheetType) -> ElementId
Set: MajorDirectionWireType(self: FabricSheetType)=value
"""
MajorEndOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the last wire (measured in the major direction).
Get: MajorEndOverhang(self: FabricSheetType) -> float
"""
MajorLapSpliceLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The lap splice length in the major direction.
Get: MajorLapSpliceLength(self: FabricSheetType) -> float
Set: MajorLapSpliceLength(self: FabricSheetType)=value
"""
MajorLayoutPattern=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The layout pattern in the major direction.
Get: MajorLayoutPattern(self: FabricSheetType) -> FabricSheetLayoutPattern
"""
MajorNumberOfWires=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of wires used in the major direction (includes the first and last wires).
Get: MajorNumberOfWires(self: FabricSheetType) -> int
"""
MajorReinforcementArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The area of fabric divided by the spacing of the wire in the major direction.
Get: MajorReinforcementArea(self: FabricSheetType) -> float
"""
MajorSpacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The spacing between the wires in the major direction (not including the overhangs).
Get: MajorSpacing(self: FabricSheetType) -> float
"""
MajorStartOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the first wire (measured in the major direction).
Get: MajorStartOverhang(self: FabricSheetType) -> float
"""
Material=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The id of the material assigned to wires.
Get: Material(self: FabricSheetType) -> ElementId
Set: Material(self: FabricSheetType)=value
"""
MinorDirectionWireType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The id of the FabricWireType to be used in the minor direction.
Get: MinorDirectionWireType(self: FabricSheetType) -> ElementId
Set: MinorDirectionWireType(self: FabricSheetType)=value
"""
MinorEndOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the last wire (measured in the minor direction).
Get: MinorEndOverhang(self: FabricSheetType) -> float
"""
MinorLapSpliceLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The lap splice length in the minor direction.
Get: MinorLapSpliceLength(self: FabricSheetType) -> float
Set: MinorLapSpliceLength(self: FabricSheetType)=value
"""
MinorLayoutPattern=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The layout pattern in the minor direction.
Get: MinorLayoutPattern(self: FabricSheetType) -> FabricSheetLayoutPattern
"""
MinorNumberOfWires=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of wires used in the minor direction (includes the 1st and last wires).
Get: MinorNumberOfWires(self: FabricSheetType) -> int
"""
MinorReinforcementArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The area of fabric divided by the spacing of the wire in the minor direction.
Get: MinorReinforcementArea(self: FabricSheetType) -> float
"""
MinorSpacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The spacing between the wires in the minor direction (not including the overhangs).
Get: MinorSpacing(self: FabricSheetType) -> float
"""
MinorStartOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The distance from the edge of the sheet to the first wire (measured in the minor direction).
Get: MinorStartOverhang(self: FabricSheetType) -> float
"""
OverallLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The length of the wire sheet (including overhangs) in the major direction.
Get: OverallLength(self: FabricSheetType) -> float
"""
OverallWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The length of the wire sheet (including overhangs) in the minor direction.
Get: OverallWidth(self: FabricSheetType) -> float
"""
SheetMass=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sheet mass.
Get: SheetMass(self: FabricSheetType) -> float
Set: SheetMass(self: FabricSheetType)=value
"""
SheetMassUnit=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sheet mass per area unit.
Get: SheetMassUnit(self: FabricSheetType) -> float
"""
| 26.237226
| 246
| 0.728265
|
class FabricSheetType(ElementType,IDisposable):
@staticmethod
def CreateDefaultFabricSheetType(ADoc):
pass
def Dispose(self):
pass
def getBoundingBox(self,*args):
pass
def GetReinforcementRoundingManager(self):
pass
def GetWireItem(self,wireIndex,direction):
pass
def IsCustom(self):
pass
def IsValidMajorLapSplice(self,majorLapSplice):
pass
def IsValidMinorLapSplice(self,minorLapSplice):
pass
def ReleaseUnmanagedResources(self,*args):
pass
def setElementType(self,*args):
pass
def SetLayoutAsCustomPattern(self,minorStartOverhang,minorEndOverhang,majorStartOverhang,majorEndOverhang,minorFabricWireItems,majorFabricWireItems):
pass
def SetMajorLayoutAsActualSpacing(self,overallWidth,minorStartOverhang,spacing):
pass
def SetMajorLayoutAsFixedNumber(self,overallWidth,minorStartOverhang,minorEndOverhang,numberOfWires):
pass
def SetMajorLayoutAsMaximumSpacing(self,overallWidth,minorStartOverhang,minorEndOverhang,spacing):
pass
def SetMajorLayoutAsNumberWithSpacing(self,overallWidth,minorStartOverhang,numberOfWires,spacing):
pass
def SetMinorLayoutAsActualSpacing(self,overallLength,majorStartOverhang,spacing):
pass
def SetMinorLayoutAsFixedNumber(self,overallLength,majorStartOverhang,majorEndOverhang,numberOfWires):
pass
def SetMinorLayoutAsMaximumSpacing(self,overallLength,majorStartOverhang,majorEndOverhang,spacing):
pass
def SetMinorLayoutAsNumberWithSpacing(self,overallLength,majorStartOverhang,numberOfWires,spacing):
pass
def __enter__(self,*args):
pass
def __exit__(self,*args):
pass
def __init__(self,*args):
pass
MajorDirectionWireType=property(lambda self: object(),lambda self,v: None,lambda self: None)
MajorEndOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
MajorLapSpliceLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
MajorLayoutPattern=property(lambda self: object(),lambda self,v: None,lambda self: None)
MajorNumberOfWires=property(lambda self: object(),lambda self,v: None,lambda self: None)
MajorReinforcementArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
MajorSpacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
MajorStartOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
Material=property(lambda self: object(),lambda self,v: None,lambda self: None)
MinorDirectionWireType=property(lambda self: object(),lambda self,v: None,lambda self: None)
MinorEndOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
MinorLapSpliceLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
MinorLayoutPattern=property(lambda self: object(),lambda self,v: None,lambda self: None)
MinorNumberOfWires=property(lambda self: object(),lambda self,v: None,lambda self: None)
MinorReinforcementArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
MinorSpacing=property(lambda self: object(),lambda self,v: None,lambda self: None)
MinorStartOverhang=property(lambda self: object(),lambda self,v: None,lambda self: None)
OverallLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
OverallWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
SheetMass=property(lambda self: object(),lambda self,v: None,lambda self: None)
SheetMassUnit=property(lambda self: object(),lambda self,v: None,lambda self: None)
| true
| true
|
79032df596fc0850146cc2b93ec8d69e99711771
| 9,721
|
py
|
Python
|
flywheel_bids/curate_bids.py
|
AndysWorth/bids-client
|
6c613581e63662d79189a9ead677189cc978c4d0
|
[
"MIT"
] | null | null | null |
flywheel_bids/curate_bids.py
|
AndysWorth/bids-client
|
6c613581e63662d79189a9ead677189cc978c4d0
|
[
"MIT"
] | null | null | null |
flywheel_bids/curate_bids.py
|
AndysWorth/bids-client
|
6c613581e63662d79189a9ead677189cc978c4d0
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import json
import os
import tempfile
import sys
import re
import flywheel
from .supporting_files import bidsify_flywheel, utils, templates
from .supporting_files.project_tree import get_project_tree
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('curate-bids')
def clear_meta_info(context, template):
if 'info' in context and template.namespace in context['info']:
del context['info'][template.namespace]
def format_validation_error(err):
path = '/'.join(err.path)
if path:
return path + ' ' + err.message
return err.message
def validate_meta_info(container, template):
""" Validate meta information
Adds 'BIDS.NA' if no BIDS info present
Adds 'BIDS.valid' and 'BIDS.error_message'
to communicate to user if values are valid
Currently, validation is only checking if
mandatory properties are non-empty strings
Could add the following checks:
Are the values alpha numeric?
"""
# Get namespace
namespace = template.namespace
# If 'info' is NOT in container, then must not
# have matched to a template, create 'info'
# field with object {'BIDS': 'NA'}
if 'info' not in container:
container['info'] = {namespace: 'NA'}
# if the namespace ('BIDS') is NOT in 'info',
# then must not have matched to a template,
# add {'BIDS': 'NA'} to the meta info
elif namespace not in container['info']:
container['info'][namespace] = 'NA'
# If already assigned BIDS 'NA', then break
elif container['info'][namespace] == 'NA':
pass
# Otherwise, iterate over keys within container
else:
valid = True
error_message = ''
# Find template
templateName = container['info'][namespace].get('template')
if templateName:
templateDef = template.definitions.get(templateName)
if templateDef:
errors = template.validate(templateDef, container['info'][namespace])
if errors:
valid = False
error_message = '\n'.join([format_validation_error(err) for err in errors])
else:
valid = False
error_message += 'Unknown template: %s. ' % templateName
# Assign 'valid' and 'error_message' values
container['info'][namespace]['valid'] = valid
container['info'][namespace]['error_message'] = error_message
def update_meta_info(fw, context):
""" Update file information
"""
# Modify file
if context['container_type'] == 'file':
# Modify acquisition file
if context['parent_container_type'] == 'acquisition':
fw.set_acquisition_file_info(
context['acquisition']['id'],
context['file']['name'],
context['file']['info']
)
# Modify project file
elif context['parent_container_type'] == 'project':
fw.set_project_file_info(
context['project']['id'],
context['file']['name'],
context['file']['info']
)
# Modify session file
elif context['parent_container_type'] == 'session':
fw.set_session_file_info(
context['session']['id'],
context['file']['name'],
context['file']['info']
)
else:
logger.info('Cannot determine file parent container type: ' + context['parent_container_type'])
# Modify project
elif context['container_type'] == 'project':
fw.replace_project_info(context['project']['id'], context['project']['info'])
# Modify session
elif context['container_type'] == 'session':
fw.replace_session_info(context['session']['id'], context['session']['info'])
# Modify acquisition
elif context['container_type'] == 'acquisition':
fw.replace_acquisition_info(context['acquisition']['id'], context['acquisition']['info'])
# Cannot determine container type
else:
logger.info('Cannot determine container type: ' + context['container_type'])
def curate_bids_dir(fw, project_id, session_id=None, reset=False, template_file=None, session_only=False):
"""
fw: Flywheel client
project_id: project id of project to curate
session_id: The optional session id to curate
reset: Whether or not to reset bids info before curation
template_file: The template file to use
session_only: If true, then only curate the provided session
"""
project = get_project_tree(fw, project_id, session_id=session_id, session_only=session_only)
curate_bids_tree(fw, project, reset, template_file, True)
def curate_bids_tree(fw, project, reset=False, template_file=None, update=True):
# Get project
project_files = project.get('files', [])
# Get template (for now, just use default)
template = templates.DEFAULT_TEMPLATE
# Check for project file
if not template_file:
template_filename = utils.find_custom_template(project_files)
if template_filename:
fd, path = tempfile.mkstemp('.json')
os.close(fd)
logger.info('Using project template: {0}'.format(template_filename))
fw.download_file_from_project(project['id'], template_filename, path)
template_file = path
if template_file:
template = templates.loadTemplate(template_file)
##
# Curation is now a 3-pass process
# 1. Do initial template matching and updating
# 2. Perform any path resolutions
# 3. Send updates to server
##
# 1. Do initial template matching and updating
for context in project.context_iter():
ctype = context['container_type']
parent_ctype = context['parent_container_type']
if reset:
clear_meta_info(context[ctype], template)
elif context[ctype].get('info',{}).get('BIDS') == 'NA':
continue
if ctype == 'project':
bidsify_flywheel.process_matching_templates(context, template)
# Validate meta information
# TODO: Improve the validator to understand what is valid for dataset_description file...
# validate_meta_info(context['project'])
elif ctype == 'session':
bidsify_flywheel.process_matching_templates(context, template)
# Add run_counter
context['run_counters'] = utils.RunCounterMap()
elif ctype == 'acquisition':
bidsify_flywheel.process_matching_templates(context, template)
elif ctype == 'file':
if parent_ctype == 'project' and PROJECT_TEMPLATE_FILE_NAME_REGEX.search(context['file']['name']):
# Don't BIDSIFY project template
continue
# Process matching
context['file'] = bidsify_flywheel.process_matching_templates(context, template)
# Validate meta information
validate_meta_info(context['file'], template)
# 2. Perform any path resolutions
session = None
for context in project.context_iter():
# Resolution
bidsify_flywheel.process_resolvers(context, template)
# 3. Send updates to server
if update:
for context in project.context_iter():
ctype = context['container_type']
node = context[ctype]
if node.is_dirty():
update_meta_info(fw, context)
def main_with_args(api_key, session_id, reset, session_only):
### Prep
# Check API key - raises Error if key is invalid
fw = flywheel.Flywheel(api_key)
if session_id:
project_id = utils.get_project_id_from_session_id(fw, session_id)
else:
print('Session id is required!')
sys.exit(1)
### Curate BIDS project
curate_bids_dir(fw, project_id, session_id, reset=reset, session_only=session_only)
def main():
### Read in arguments
parser = argparse.ArgumentParser(description='BIDS Curation')
parser.add_argument('--api-key', dest='api_key', action='store',
required=True, help='API key')
parser.add_argument('-p', dest='project_label', action='store',
required=False, default=None, help='Project Label on Flywheel instance')
parser.add_argument('--session', dest='session_id', action='store',
required=False, default=None, help='Session ID, used to look up project if project label is not readily available')
parser.add_argument('--reset', dest='reset', action='store_true',
default=False, help='Reset BIDS data before running')
parser.add_argument('--session-only', dest='session_only', action='store_true',
default=False, help='Only curate the session identified by --session')
parser.add_argument('--template-file', dest='template_file', action='store',
default=None, help='Template file to use')
args = parser.parse_args()
### Prep
# Check API key - raises Error if key is invalid
fw = flywheel.Flywheel(args.api_key)
# Get project id from label
if args.project_label:
project_id = utils.validate_project_label(fw, args.project_label)
elif args.session_id:
project_id = utils.get_project_id_from_session_id(fw, args.session_id)
else:
print('Either project label or session id is required!')
sys.exit(1)
### Curate BIDS project
curate_bids_dir(fw, project_id, args.session_id, reset=args.reset, template_file=args.template_file, session_only=args.session_only)
if __name__ == '__main__':
main()
| 36.82197
| 136
| 0.640778
|
import argparse
import logging
import json
import os
import tempfile
import sys
import re
import flywheel
from .supporting_files import bidsify_flywheel, utils, templates
from .supporting_files.project_tree import get_project_tree
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('curate-bids')
def clear_meta_info(context, template):
if 'info' in context and template.namespace in context['info']:
del context['info'][template.namespace]
def format_validation_error(err):
path = '/'.join(err.path)
if path:
return path + ' ' + err.message
return err.message
def validate_meta_info(container, template):
namespace = template.namespace
if 'info' not in container:
container['info'] = {namespace: 'NA'}
elif namespace not in container['info']:
container['info'][namespace] = 'NA'
elif container['info'][namespace] == 'NA':
pass
else:
valid = True
error_message = ''
templateName = container['info'][namespace].get('template')
if templateName:
templateDef = template.definitions.get(templateName)
if templateDef:
errors = template.validate(templateDef, container['info'][namespace])
if errors:
valid = False
error_message = '\n'.join([format_validation_error(err) for err in errors])
else:
valid = False
error_message += 'Unknown template: %s. ' % templateName
container['info'][namespace]['valid'] = valid
container['info'][namespace]['error_message'] = error_message
def update_meta_info(fw, context):
if context['container_type'] == 'file':
if context['parent_container_type'] == 'acquisition':
fw.set_acquisition_file_info(
context['acquisition']['id'],
context['file']['name'],
context['file']['info']
)
elif context['parent_container_type'] == 'project':
fw.set_project_file_info(
context['project']['id'],
context['file']['name'],
context['file']['info']
)
elif context['parent_container_type'] == 'session':
fw.set_session_file_info(
context['session']['id'],
context['file']['name'],
context['file']['info']
)
else:
logger.info('Cannot determine file parent container type: ' + context['parent_container_type'])
elif context['container_type'] == 'project':
fw.replace_project_info(context['project']['id'], context['project']['info'])
elif context['container_type'] == 'session':
fw.replace_session_info(context['session']['id'], context['session']['info'])
elif context['container_type'] == 'acquisition':
fw.replace_acquisition_info(context['acquisition']['id'], context['acquisition']['info'])
else:
logger.info('Cannot determine container type: ' + context['container_type'])
def curate_bids_dir(fw, project_id, session_id=None, reset=False, template_file=None, session_only=False):
project = get_project_tree(fw, project_id, session_id=session_id, session_only=session_only)
curate_bids_tree(fw, project, reset, template_file, True)
def curate_bids_tree(fw, project, reset=False, template_file=None, update=True):
project_files = project.get('files', [])
template = templates.DEFAULT_TEMPLATE
if not template_file:
template_filename = utils.find_custom_template(project_files)
if template_filename:
fd, path = tempfile.mkstemp('.json')
os.close(fd)
logger.info('Using project template: {0}'.format(template_filename))
fw.download_file_from_project(project['id'], template_filename, path)
template_file = path
if template_file:
template = templates.loadTemplate(template_file)
for context in project.context_iter():
ctype = context['container_type']
parent_ctype = context['parent_container_type']
if reset:
clear_meta_info(context[ctype], template)
elif context[ctype].get('info',{}).get('BIDS') == 'NA':
continue
if ctype == 'project':
bidsify_flywheel.process_matching_templates(context, template)
elif ctype == 'session':
bidsify_flywheel.process_matching_templates(context, template)
context['run_counters'] = utils.RunCounterMap()
elif ctype == 'acquisition':
bidsify_flywheel.process_matching_templates(context, template)
elif ctype == 'file':
if parent_ctype == 'project' and PROJECT_TEMPLATE_FILE_NAME_REGEX.search(context['file']['name']):
continue
# Process matching
context['file'] = bidsify_flywheel.process_matching_templates(context, template)
# Validate meta information
validate_meta_info(context['file'], template)
# 2. Perform any path resolutions
session = None
for context in project.context_iter():
# Resolution
bidsify_flywheel.process_resolvers(context, template)
# 3. Send updates to server
if update:
for context in project.context_iter():
ctype = context['container_type']
node = context[ctype]
if node.is_dirty():
update_meta_info(fw, context)
def main_with_args(api_key, session_id, reset, session_only):
### Prep
# Check API key - raises Error if key is invalid
fw = flywheel.Flywheel(api_key)
if session_id:
project_id = utils.get_project_id_from_session_id(fw, session_id)
else:
print('Session id is required!')
sys.exit(1)
### Curate BIDS project
curate_bids_dir(fw, project_id, session_id, reset=reset, session_only=session_only)
def main():
### Read in arguments
parser = argparse.ArgumentParser(description='BIDS Curation')
parser.add_argument('--api-key', dest='api_key', action='store',
required=True, help='API key')
parser.add_argument('-p', dest='project_label', action='store',
required=False, default=None, help='Project Label on Flywheel instance')
parser.add_argument('--session', dest='session_id', action='store',
required=False, default=None, help='Session ID, used to look up project if project label is not readily available')
parser.add_argument('--reset', dest='reset', action='store_true',
default=False, help='Reset BIDS data before running')
parser.add_argument('--session-only', dest='session_only', action='store_true',
default=False, help='Only curate the session identified by --session')
parser.add_argument('--template-file', dest='template_file', action='store',
default=None, help='Template file to use')
args = parser.parse_args()
### Prep
# Check API key - raises Error if key is invalid
fw = flywheel.Flywheel(args.api_key)
# Get project id from label
if args.project_label:
project_id = utils.validate_project_label(fw, args.project_label)
elif args.session_id:
project_id = utils.get_project_id_from_session_id(fw, args.session_id)
else:
print('Either project label or session id is required!')
sys.exit(1)
### Curate BIDS project
curate_bids_dir(fw, project_id, args.session_id, reset=args.reset, template_file=args.template_file, session_only=args.session_only)
if __name__ == '__main__':
main()
| true
| true
|
79032ebd5c06411c65fe7b6b75516feadbaf0fde
| 7,939
|
py
|
Python
|
exp/inference/inference_dir.py
|
ericwang0701/Graphonomy
|
1942bd41723ec48e5133f932082a49d1c17050ad
|
[
"MIT"
] | null | null | null |
exp/inference/inference_dir.py
|
ericwang0701/Graphonomy
|
1942bd41723ec48e5133f932082a49d1c17050ad
|
[
"MIT"
] | null | null | null |
exp/inference/inference_dir.py
|
ericwang0701/Graphonomy
|
1942bd41723ec48e5133f932082a49d1c17050ad
|
[
"MIT"
] | null | null | null |
import socket
import timeit
import numpy as np
from PIL import Image
from datetime import datetime
import os
import sys
from collections import OrderedDict
sys.path.append('./')
# PyTorch includes
import torch
from torch.autograd import Variable
from torchvision import transforms
import cv2
# Custom includes
from networks import deeplab_xception_transfer, graph
from dataloaders import custom_transforms as tr
#
import argparse
import torch.nn.functional as F
label_colours = [(0,0,0)
, (128,0,0), (255,0,0), (0,85,0), (170,0,51), (255,85,0), (0,0,85), (0,119,221), (85,85,0), (0,85,85), (85,51,0), (52,86,128), (0,128,0)
, (0,0,255), (51,170,221), (0,255,255), (85,255,170), (170,255,85), (255,255,0), (255,170,0)]
def flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def flip_cihp(tail_list):
'''
:param tail_list: tail_list size is 1 x n_class x h x w
:return:
'''
# tail_list = tail_list[0]
tail_list_rev = [None] * 20
for xx in range(14):
tail_list_rev[xx] = tail_list[xx].unsqueeze(0)
tail_list_rev[14] = tail_list[15].unsqueeze(0)
tail_list_rev[15] = tail_list[14].unsqueeze(0)
tail_list_rev[16] = tail_list[17].unsqueeze(0)
tail_list_rev[17] = tail_list[16].unsqueeze(0)
tail_list_rev[18] = tail_list[19].unsqueeze(0)
tail_list_rev[19] = tail_list[18].unsqueeze(0)
return torch.cat(tail_list_rev,dim=0)
def decode_labels(mask, num_images=1, num_classes=20):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
n, h, w = mask.shape
assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (
n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_, j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs
def read_img(img_path):
_img = Image.open(img_path).convert('RGB') # return is RGB pic
return _img
def img_transform(img, transform=None):
sample = {'image': img, 'label': 0}
sample = transform(sample)
return sample
def get_img_paths(imgs_dir):
img_paths = []
for dirpath, dirnames, filenames in os.walk(imgs_dir):
for filename in [f for f in filenames if f.endswith('.png') or f.endswith('.PNG') or f.endswith('.jpg') or f.endswith('.JPG') or f.endswith('.jpeg') or f.endswith('.JPEG')]:
img_paths.append(os.path.join(dirpath,filename))
img_paths.sort()
return img_paths
def inference(net, img_path='', output_path='./', output_name='f', use_gpu=True):
'''
:param net:
:param img_path:
:param output_path:
:return:
'''
# adj
adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda().transpose(2, 3)
adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()
cihp_adj = graph.preprocess_adj(graph.cihp_graph)
adj3_ = Variable(torch.from_numpy(cihp_adj).float())
adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()
# multi-scale
scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]
img = read_img(img_path)
testloader_list = []
testloader_flip_list = []
for pv in scale_list:
composed_transforms_ts = transforms.Compose([
tr.Scale_only_img(pv),
tr.Normalize_xception_tf_only_img(),
tr.ToTensor_only_img()])
composed_transforms_ts_flip = transforms.Compose([
tr.Scale_only_img(pv),
tr.HorizontalFlip_only_img(),
tr.Normalize_xception_tf_only_img(),
tr.ToTensor_only_img()])
testloader_list.append(img_transform(img, composed_transforms_ts))
# print(img_transform(img, composed_transforms_ts))
testloader_flip_list.append(img_transform(img, composed_transforms_ts_flip))
# print(testloader_list)
start_time = timeit.default_timer()
# One testing epoch
net.eval()
# 1 0.5 0.75 1.25 1.5 1.75 ; flip:
for iii, sample_batched in enumerate(zip(testloader_list, testloader_flip_list)):
inputs, labels = sample_batched[0]['image'], sample_batched[0]['label']
inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label']
inputs = inputs.unsqueeze(0)
inputs_f = inputs_f.unsqueeze(0)
inputs = torch.cat((inputs, inputs_f), dim=0)
if iii == 0:
_, _, h, w = inputs.size()
# assert inputs.size() == inputs_f.size()
# Forward pass of the mini-batch
inputs = Variable(inputs, requires_grad=False)
with torch.no_grad():
if use_gpu >= 0:
inputs = inputs.cuda()
# outputs = net.forward(inputs)
outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda())
outputs = (outputs[0] + flip(flip_cihp(outputs[1]), dim=-1)) / 2
outputs = outputs.unsqueeze(0)
if iii > 0:
outputs = F.upsample(outputs, size=(h, w), mode='bilinear', align_corners=True)
outputs_final = outputs_final + outputs
else:
outputs_final = outputs.clone()
################ plot pic
predictions = torch.max(outputs_final, 1)[1]
results = predictions.cpu().numpy()
vis_res = decode_labels(results)
parsing_im = Image.fromarray(vis_res[0])
parsing_im.save(output_path+'/{}.png'.format(output_name))
#we don't need the gray image
#cv2.imwrite(output_path+'/{}_gray.png'.format(output_name), results[0, :, :])
end_time = timeit.default_timer()
print('time used for the multi-scale image inference' + ' is :' + str(end_time - start_time))
if __name__ == '__main__':
'''argparse begin'''
parser = argparse.ArgumentParser()
# parser.add_argument('--loadmodel',default=None,type=str)
parser.add_argument('--loadmodel', default='', type=str)
parser.add_argument('--imgs_dir', default='', type=str)
parser.add_argument('--output_dir', default='', type=str)
parser.add_argument('--use_gpu', default=1, type=int)
opts = parser.parse_args()
net = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=20,
hidden_layers=128,
source_classes=7, )
if not opts.loadmodel == '':
x = torch.load(opts.loadmodel)
net.load_source_model(x)
print('load model:', opts.loadmodel)
else:
print('no model load !!!!!!!!')
raise RuntimeError('No model!!!!')
if opts.use_gpu >0 :
net.cuda()
use_gpu = True
else:
use_gpu = False
raise RuntimeError('must use the gpu!!!!')
img_paths = get_img_paths(opts.imgs_dir)
for idx, path in enumerate(img_paths):
filename = os.path.splitext(os.path.basename(path))[0]
output_name = filename +"_seg"
inference(net=net, img_path=path, output_path=opts.output_dir , output_name=output_name, use_gpu=use_gpu)
| 36.417431
| 181
| 0.623504
|
import socket
import timeit
import numpy as np
from PIL import Image
from datetime import datetime
import os
import sys
from collections import OrderedDict
sys.path.append('./')
import torch
from torch.autograd import Variable
from torchvision import transforms
import cv2
from networks import deeplab_xception_transfer, graph
from dataloaders import custom_transforms as tr
import argparse
import torch.nn.functional as F
label_colours = [(0,0,0)
, (128,0,0), (255,0,0), (0,85,0), (170,0,51), (255,85,0), (0,0,85), (0,119,221), (85,85,0), (0,85,85), (85,51,0), (52,86,128), (0,128,0)
, (0,0,255), (51,170,221), (0,255,255), (85,255,170), (170,255,85), (255,255,0), (255,170,0)]
def flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def flip_cihp(tail_list):
tail_list_rev = [None] * 20
for xx in range(14):
tail_list_rev[xx] = tail_list[xx].unsqueeze(0)
tail_list_rev[14] = tail_list[15].unsqueeze(0)
tail_list_rev[15] = tail_list[14].unsqueeze(0)
tail_list_rev[16] = tail_list[17].unsqueeze(0)
tail_list_rev[17] = tail_list[16].unsqueeze(0)
tail_list_rev[18] = tail_list[19].unsqueeze(0)
tail_list_rev[19] = tail_list[18].unsqueeze(0)
return torch.cat(tail_list_rev,dim=0)
def decode_labels(mask, num_images=1, num_classes=20):
n, h, w = mask.shape
assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (
n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_, j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs
def read_img(img_path):
_img = Image.open(img_path).convert('RGB')
return _img
def img_transform(img, transform=None):
sample = {'image': img, 'label': 0}
sample = transform(sample)
return sample
def get_img_paths(imgs_dir):
img_paths = []
for dirpath, dirnames, filenames in os.walk(imgs_dir):
for filename in [f for f in filenames if f.endswith('.png') or f.endswith('.PNG') or f.endswith('.jpg') or f.endswith('.JPG') or f.endswith('.jpeg') or f.endswith('.JPEG')]:
img_paths.append(os.path.join(dirpath,filename))
img_paths.sort()
return img_paths
def inference(net, img_path='', output_path='./', output_name='f', use_gpu=True):
adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda().transpose(2, 3)
adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()
cihp_adj = graph.preprocess_adj(graph.cihp_graph)
adj3_ = Variable(torch.from_numpy(cihp_adj).float())
adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()
scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]
img = read_img(img_path)
testloader_list = []
testloader_flip_list = []
for pv in scale_list:
composed_transforms_ts = transforms.Compose([
tr.Scale_only_img(pv),
tr.Normalize_xception_tf_only_img(),
tr.ToTensor_only_img()])
composed_transforms_ts_flip = transforms.Compose([
tr.Scale_only_img(pv),
tr.HorizontalFlip_only_img(),
tr.Normalize_xception_tf_only_img(),
tr.ToTensor_only_img()])
testloader_list.append(img_transform(img, composed_transforms_ts))
testloader_flip_list.append(img_transform(img, composed_transforms_ts_flip))
start_time = timeit.default_timer()
net.eval()
for iii, sample_batched in enumerate(zip(testloader_list, testloader_flip_list)):
inputs, labels = sample_batched[0]['image'], sample_batched[0]['label']
inputs_f, _ = sample_batched[1]['image'], sample_batched[1]['label']
inputs = inputs.unsqueeze(0)
inputs_f = inputs_f.unsqueeze(0)
inputs = torch.cat((inputs, inputs_f), dim=0)
if iii == 0:
_, _, h, w = inputs.size()
inputs = Variable(inputs, requires_grad=False)
with torch.no_grad():
if use_gpu >= 0:
inputs = inputs.cuda()
outputs = net.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda())
outputs = (outputs[0] + flip(flip_cihp(outputs[1]), dim=-1)) / 2
outputs = outputs.unsqueeze(0)
if iii > 0:
outputs = F.upsample(outputs, size=(h, w), mode='bilinear', align_corners=True)
outputs_final = outputs_final + outputs
else:
outputs_final = outputs.clone()
imwrite(output_path+'/{}_gray.png'.format(output_name), results[0, :, :])
end_time = timeit.default_timer()
print('time used for the multi-scale image inference' + ' is :' + str(end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument('--loadmodel',default=None,type=str)
parser.add_argument('--loadmodel', default='', type=str)
parser.add_argument('--imgs_dir', default='', type=str)
parser.add_argument('--output_dir', default='', type=str)
parser.add_argument('--use_gpu', default=1, type=int)
opts = parser.parse_args()
net = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=20,
hidden_layers=128,
source_classes=7, )
if not opts.loadmodel == '':
x = torch.load(opts.loadmodel)
net.load_source_model(x)
print('load model:', opts.loadmodel)
else:
print('no model load !!!!!!!!')
raise RuntimeError('No model!!!!')
if opts.use_gpu >0 :
net.cuda()
use_gpu = True
else:
use_gpu = False
raise RuntimeError('must use the gpu!!!!')
img_paths = get_img_paths(opts.imgs_dir)
for idx, path in enumerate(img_paths):
filename = os.path.splitext(os.path.basename(path))[0]
output_name = filename +"_seg"
inference(net=net, img_path=path, output_path=opts.output_dir , output_name=output_name, use_gpu=use_gpu)
| true
| true
|
79032f18745a632a75505068cc5317f7be76e766
| 4,662
|
py
|
Python
|
app/main.py
|
ntmk/battlesnake-2019-pixelated
|
cd589c51c892943a37c1c594848524fb6667bf87
|
[
"MIT"
] | 1
|
2019-11-20T18:17:23.000Z
|
2019-11-20T18:17:23.000Z
|
app/main.py
|
ntmk/battlesnake-pixelated-2019
|
cd589c51c892943a37c1c594848524fb6667bf87
|
[
"MIT"
] | 1
|
2019-03-08T23:16:23.000Z
|
2019-07-13T15:32:39.000Z
|
app/main.py
|
ntmk/battlesnake-pixelated-2019
|
cd589c51c892943a37c1c594848524fb6667bf87
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import bottle
import os, json
from .utils import distance, neighbours, direction
from .defensive import find_my_tail, trouble, find_enemy_tail, eat_food, find_my_tail_emergency
from .snake import Snake
from .gameboard import GameBoard
SAFTEY = 0
SNAKE = 1
FOOD = 3
DANGER = 5
def move_response(move):
assert move in ['up', 'down', 'left', 'right'], \
"Move must be one of [up, down, left, right]"
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"move": move
})
)
def init(data):
"""
Initialize grid and update cell values\n
@param data -> Json response from bottle\n
@return game_id -> Game id for debuggin purposes when displaying grid\n
@return grid -> Grid with updated cell values\n
@return food -> Sorted array of food by closest to charlie\n
@return charlie -> My snake\n
@return enemies -> Array of all enemy snakes\n
@return check_food -> Secondary grid to look ahead when eating food
"""
food = []
enemies = []
grid = GameBoard(data['board']['height'], data['board']['width'])
check_food = GameBoard(data['board']['height'], data['board']['width'])
charlie = Snake(data['you'])
for i in data['board']['food']:
food.append([i['x'], i['y']])
grid.set_cell([i['x'], i['y']], FOOD)
check_food.set_cell([i['x'], i['y']], FOOD)
for snake in data['board']['snakes']:
snake = Snake(snake)
for coord in snake.coords:
grid.set_cell(coord, SNAKE)
check_food.set_cell(coord, SNAKE)
if snake.health < 100 and snake.length > 2 and data['turn'] >= 3:
grid.set_cell(snake.tail, SAFTEY)
check_food.set_cell(snake.tail, SAFTEY)
if snake.id != charlie.id:
for neighbour in neighbours(snake.head, grid, 0, snake.coords, [1]):
if snake.length >= charlie.length:
grid.set_cell(neighbour, DANGER)
check_food.set_cell(neighbour, DANGER)
enemies.append(snake)
food = sorted(food, key = lambda p: distance(p, charlie.head))
game_id = data['game']['id']
# print("turn is {}".format(data['turn']))
return game_id, grid, food, charlie, enemies, check_food
@bottle.post('/ping')
def ping():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({})
)
@bottle.post('/start')
def start():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"color": '#002080',
'headType': 'pixel',
'tailType': 'pixel'
})
)
@bottle.post('/move')
def move():
data = bottle.request.json
game_id, grid, food, charlie, enemies, check_food = init(data)
# grid.display_game(game_id)
if len(enemies) > 2 or charlie.length <= 25 or charlie.health <= 60:
path = eat_food(charlie, grid, food, check_food)
if path:
# print('eat path {}'.format(path))
return move_response(direction(path[0], path[1]))
if charlie.length >= 3:
path = find_my_tail(charlie, grid)
if path:
# print('find my tail path {}'.format(path))
return move_response(direction(path[0], path[1]))
if not path:
path = find_enemy_tail(charlie, enemies, grid)
if path:
# print('find enemy tail path {}'.format(path))
return move_response(direction(path[0], path[1]))
# # if our length is greater than threshold and no other path was available
if charlie.length >= 3:
path = find_my_tail_emergency(charlie, grid)
if path:
# print('find my tail emergency path {}'.format(path))
return move_response(direction(path[0], path[1]))
# Choose a random free space if no available enemy tail
if not path:
path = trouble(charlie, grid)
if path:
# print('trouble path {}'.format(path))
return move_response(direction(path[0], path[1]))
@bottle.post('/end')
def end():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({})
)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(application, host=os.getenv('IP', '0.0.0.0'), port=os.getenv('PORT', '8080'), quiet = True)
| 29.506329
| 103
| 0.586015
|
import bottle
import os, json
from .utils import distance, neighbours, direction
from .defensive import find_my_tail, trouble, find_enemy_tail, eat_food, find_my_tail_emergency
from .snake import Snake
from .gameboard import GameBoard
SAFTEY = 0
SNAKE = 1
FOOD = 3
DANGER = 5
def move_response(move):
assert move in ['up', 'down', 'left', 'right'], \
"Move must be one of [up, down, left, right]"
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"move": move
})
)
def init(data):
food = []
enemies = []
grid = GameBoard(data['board']['height'], data['board']['width'])
check_food = GameBoard(data['board']['height'], data['board']['width'])
charlie = Snake(data['you'])
for i in data['board']['food']:
food.append([i['x'], i['y']])
grid.set_cell([i['x'], i['y']], FOOD)
check_food.set_cell([i['x'], i['y']], FOOD)
for snake in data['board']['snakes']:
snake = Snake(snake)
for coord in snake.coords:
grid.set_cell(coord, SNAKE)
check_food.set_cell(coord, SNAKE)
if snake.health < 100 and snake.length > 2 and data['turn'] >= 3:
grid.set_cell(snake.tail, SAFTEY)
check_food.set_cell(snake.tail, SAFTEY)
if snake.id != charlie.id:
for neighbour in neighbours(snake.head, grid, 0, snake.coords, [1]):
if snake.length >= charlie.length:
grid.set_cell(neighbour, DANGER)
check_food.set_cell(neighbour, DANGER)
enemies.append(snake)
food = sorted(food, key = lambda p: distance(p, charlie.head))
game_id = data['game']['id']
return game_id, grid, food, charlie, enemies, check_food
@bottle.post('/ping')
def ping():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({})
)
@bottle.post('/start')
def start():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({
"color": '#002080',
'headType': 'pixel',
'tailType': 'pixel'
})
)
@bottle.post('/move')
def move():
data = bottle.request.json
game_id, grid, food, charlie, enemies, check_food = init(data)
if len(enemies) > 2 or charlie.length <= 25 or charlie.health <= 60:
path = eat_food(charlie, grid, food, check_food)
if path:
return move_response(direction(path[0], path[1]))
if charlie.length >= 3:
path = find_my_tail(charlie, grid)
if path:
return move_response(direction(path[0], path[1]))
if not path:
path = find_enemy_tail(charlie, enemies, grid)
if path:
return move_response(direction(path[0], path[1]))
e, grid)
if path:
return move_response(direction(path[0], path[1]))
if not path:
path = trouble(charlie, grid)
if path:
return move_response(direction(path[0], path[1]))
@bottle.post('/end')
def end():
return bottle.HTTPResponse(
status=200,
headers={
"Content-Type": "application/json"
},
body=json.dumps({})
)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(application, host=os.getenv('IP', '0.0.0.0'), port=os.getenv('PORT', '8080'), quiet = True)
| true
| true
|
7903304b2bafccb55d17a46f82882ca7708ef2ae
| 107
|
py
|
Python
|
lab_assignment/lab_bla/linux_mac/sample/matrix_transpose.py
|
caru1613/introduction_to_python_TEAMLAB_MOOC
|
e0ac95f7a6b889e7d18b7bdaaab49820e73d5477
|
[
"MIT"
] | null | null | null |
lab_assignment/lab_bla/linux_mac/sample/matrix_transpose.py
|
caru1613/introduction_to_python_TEAMLAB_MOOC
|
e0ac95f7a6b889e7d18b7bdaaab49820e73d5477
|
[
"MIT"
] | null | null | null |
lab_assignment/lab_bla/linux_mac/sample/matrix_transpose.py
|
caru1613/introduction_to_python_TEAMLAB_MOOC
|
e0ac95f7a6b889e7d18b7bdaaab49820e73d5477
|
[
"MIT"
] | null | null | null |
matrix_a = [[1,2,3], [4,5,6]]
result = [ [ element for element in t] for t in zip(*matrix_a)]
print(result)
| 35.666667
| 63
| 0.635514
|
matrix_a = [[1,2,3], [4,5,6]]
result = [ [ element for element in t] for t in zip(*matrix_a)]
print(result)
| true
| true
|
79033105a332cf5f0ab126fa06b0320675587b9b
| 11,198
|
py
|
Python
|
google/ads/googleads/v4/services/services/ad_group_service/transports/grpc.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/services/services/ad_group_service/transports/grpc.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/services/services/ad_group_service/transports/grpc.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v4.resources.types import ad_group
from google.ads.googleads.v4.services.types import ad_group_service
from .base import AdGroupServiceTransport, DEFAULT_CLIENT_INFO
class AdGroupServiceGrpcTransport(AdGroupServiceTransport):
"""gRPC backend transport for AdGroupService.
Service to manage ad groups.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_ad_group(
self,
) -> Callable[[ad_group_service.GetAdGroupRequest], ad_group.AdGroup]:
r"""Return a callable for the get ad group method over gRPC.
Returns the requested ad group in full detail.
Returns:
Callable[[~.GetAdGroupRequest],
~.AdGroup]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_ad_group" not in self._stubs:
self._stubs["get_ad_group"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.AdGroupService/GetAdGroup",
request_serializer=ad_group_service.GetAdGroupRequest.serialize,
response_deserializer=ad_group.AdGroup.deserialize,
)
return self._stubs["get_ad_group"]
@property
def mutate_ad_groups(
self,
) -> Callable[
[ad_group_service.MutateAdGroupsRequest],
ad_group_service.MutateAdGroupsResponse,
]:
r"""Return a callable for the mutate ad groups method over gRPC.
Creates, updates, or removes ad groups. Operation
statuses are returned.
Returns:
Callable[[~.MutateAdGroupsRequest],
~.MutateAdGroupsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_ad_groups" not in self._stubs:
self._stubs["mutate_ad_groups"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.AdGroupService/MutateAdGroups",
request_serializer=ad_group_service.MutateAdGroupsRequest.serialize,
response_deserializer=ad_group_service.MutateAdGroupsResponse.deserialize,
)
return self._stubs["mutate_ad_groups"]
__all__ = ("AdGroupServiceGrpcTransport",)
| 41.169118
| 90
| 0.621361
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
from google import auth
from google.auth import credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.ads.googleads.v4.resources.types import ad_group
from google.ads.googleads.v4.services.types import ad_group_service
from .base import AdGroupServiceTransport, DEFAULT_CLIENT_INFO
class AdGroupServiceGrpcTransport(AdGroupServiceTransport):
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {}
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def get_ad_group(
self,
) -> Callable[[ad_group_service.GetAdGroupRequest], ad_group.AdGroup]:
if "get_ad_group" not in self._stubs:
self._stubs["get_ad_group"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.AdGroupService/GetAdGroup",
request_serializer=ad_group_service.GetAdGroupRequest.serialize,
response_deserializer=ad_group.AdGroup.deserialize,
)
return self._stubs["get_ad_group"]
@property
def mutate_ad_groups(
self,
) -> Callable[
[ad_group_service.MutateAdGroupsRequest],
ad_group_service.MutateAdGroupsResponse,
]:
if "mutate_ad_groups" not in self._stubs:
self._stubs["mutate_ad_groups"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.AdGroupService/MutateAdGroups",
request_serializer=ad_group_service.MutateAdGroupsRequest.serialize,
response_deserializer=ad_group_service.MutateAdGroupsResponse.deserialize,
)
return self._stubs["mutate_ad_groups"]
__all__ = ("AdGroupServiceGrpcTransport",)
| true
| true
|
790331921901efd91310267d0e6875aef4916335
| 1,990
|
py
|
Python
|
score_system.py
|
charlieconneely/countdown
|
e941d8e89091a5bfcc5af77d5e0742725ce4b7fd
|
[
"MIT"
] | 1
|
2020-06-17T21:00:18.000Z
|
2020-06-17T21:00:18.000Z
|
score_system.py
|
charlieconneely/countdown
|
e941d8e89091a5bfcc5af77d5e0742725ce4b7fd
|
[
"MIT"
] | null | null | null |
score_system.py
|
charlieconneely/countdown
|
e941d8e89091a5bfcc5af77d5e0742725ce4b7fd
|
[
"MIT"
] | 2
|
2020-06-09T18:31:55.000Z
|
2020-06-09T18:33:29.000Z
|
# Charlie Conneely
# Score Keeper
from player import Player
ranks_file = "rankings.txt"
class ScoreKeeper:
def __init__(self):
self.ranks = []
"""
Check if player score ranks against scores in rankings.txt
"""
def check_ranking(self, p):
self.populate_ranks_array(ranks_file)
# check score against rankings
top5 = self.compare_score(p)
if top5:
print("Well Done! You ranked Top 5!")
print("\nNew Rankings:")
for i in self.ranks:
print(i.name + " - " + str(i.score))
self.append_file(ranks_file)
else:
print("Sorry, your score didn't rank top 5!")
print("\nCurrent Rankings:")
for i in self.ranks:
print(i.name + " - " + str(i.score))
# Clear ranks array
self.ranks = []
"""
Append ranks file with new score
"""
def append_file(self, rfile):
with open(rfile, 'w') as file:
for p in self.ranks:
file.write(str(p.name) + " " + str(p.score) + "\n")
"""
Check if score beats that of any currently ranked players
If true - Add player to rankings, resort array, pop last item from the end.
returns Boolean
"""
def compare_score(self, player):
does_rank = False
for p in self.ranks:
if (int(player.score) > int(p.score)):
does_rank = True
if does_rank:
self.ranks.append(player)
# sort ranks array by scores
self.ranks.sort(key=lambda p: int(p.score), reverse=True)
# remove the last item
self.ranks.pop()
return does_rank
"""
Populate local array with scores from txt file
"""
def populate_ranks_array(self, scores_file):
with open(scores_file) as f:
for line in f:
(n, s) = line.split()
self.ranks.append(Player(n,s))
| 28.428571
| 80
| 0.548241
|
from player import Player
ranks_file = "rankings.txt"
class ScoreKeeper:
def __init__(self):
self.ranks = []
def check_ranking(self, p):
self.populate_ranks_array(ranks_file)
top5 = self.compare_score(p)
if top5:
print("Well Done! You ranked Top 5!")
print("\nNew Rankings:")
for i in self.ranks:
print(i.name + " - " + str(i.score))
self.append_file(ranks_file)
else:
print("Sorry, your score didn't rank top 5!")
print("\nCurrent Rankings:")
for i in self.ranks:
print(i.name + " - " + str(i.score))
# Clear ranks array
self.ranks = []
def append_file(self, rfile):
with open(rfile, 'w') as file:
for p in self.ranks:
file.write(str(p.name) + " " + str(p.score) + "\n")
def compare_score(self, player):
does_rank = False
for p in self.ranks:
if (int(player.score) > int(p.score)):
does_rank = True
if does_rank:
self.ranks.append(player)
# sort ranks array by scores
self.ranks.sort(key=lambda p: int(p.score), reverse=True)
# remove the last item
self.ranks.pop()
return does_rank
def populate_ranks_array(self, scores_file):
with open(scores_file) as f:
for line in f:
(n, s) = line.split()
self.ranks.append(Player(n,s))
| true
| true
|
790332e6267a04177d813875e9e0670bc4500d5e
| 1,487
|
py
|
Python
|
python/etc/preprocessing/norway/norway_preprocessing.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 89
|
2015-02-13T13:46:06.000Z
|
2022-03-13T16:42:44.000Z
|
python/etc/preprocessing/norway/norway_preprocessing.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 91
|
2015-03-12T13:31:36.000Z
|
2022-01-14T07:37:37.000Z
|
python/etc/preprocessing/norway/norway_preprocessing.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 138
|
2015-03-04T15:23:43.000Z
|
2022-03-09T15:11:52.000Z
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import argparse
from os import path
import sys
AVG_YEARLY_CONVERSION_RATES = {
"2015": 0.1119,
"2016": 0.1077
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("source_file")
args = parser.parse_args()
result = oat.analyze_csv_file(args.source_file, 500)
if result["success"]:
csv_analysis = result["data"]
print csv_analysis
else:
print result["error_msg"]
sys.exit()
dialect = csv_analysis.dialect
csv_file = open(args.source_file, "r")
reader = oat.UnicodeDictReader(csv_file, dialect=dialect)
fieldnames = reader.reader.fieldnames
modified_content = [fieldnames]
for line in reader:
rate = AVG_YEARLY_CONVERSION_RATES[line["Year"]]
euro_value = float(line["APC in NOK"]) * rate
line["APC in NOK"] = str(round(euro_value, 2))
line_as_list = [line[field] for field in fieldnames]
modified_content.append(line_as_list)
csv_file.close()
with open('out.csv', 'w') as out:
quotemask = [False, True, True, True, True, True, False, True, False]
writer = oat.OpenAPCUnicodeWriter(out, quotemask, False, True)
writer.write_rows(modified_content)
if __name__ == '__main__' and __package__ is None:
sys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))))
import openapc_toolkit as oat
main()
| 29.156863
| 99
| 0.656355
|
import argparse
from os import path
import sys
AVG_YEARLY_CONVERSION_RATES = {
"2015": 0.1119,
"2016": 0.1077
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("source_file")
args = parser.parse_args()
result = oat.analyze_csv_file(args.source_file, 500)
if result["success"]:
csv_analysis = result["data"]
print csv_analysis
else:
print result["error_msg"]
sys.exit()
dialect = csv_analysis.dialect
csv_file = open(args.source_file, "r")
reader = oat.UnicodeDictReader(csv_file, dialect=dialect)
fieldnames = reader.reader.fieldnames
modified_content = [fieldnames]
for line in reader:
rate = AVG_YEARLY_CONVERSION_RATES[line["Year"]]
euro_value = float(line["APC in NOK"]) * rate
line["APC in NOK"] = str(round(euro_value, 2))
line_as_list = [line[field] for field in fieldnames]
modified_content.append(line_as_list)
csv_file.close()
with open('out.csv', 'w') as out:
quotemask = [False, True, True, True, True, True, False, True, False]
writer = oat.OpenAPCUnicodeWriter(out, quotemask, False, True)
writer.write_rows(modified_content)
if __name__ == '__main__' and __package__ is None:
sys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))))
import openapc_toolkit as oat
main()
| false
| true
|
7903335abe3e4f390662c49d8c6f2320d97652a9
| 393
|
py
|
Python
|
1. Algorithmic Toolbox/week5_dynamic_programming1/1_money_change_again.py
|
vishweshwartyagi/Data-Structures-and-Algorithms-UCSD
|
de942b3a0eb2bf56f949f47c297fad713aa81489
|
[
"MIT"
] | null | null | null |
1. Algorithmic Toolbox/week5_dynamic_programming1/1_money_change_again.py
|
vishweshwartyagi/Data-Structures-and-Algorithms-UCSD
|
de942b3a0eb2bf56f949f47c297fad713aa81489
|
[
"MIT"
] | null | null | null |
1. Algorithmic Toolbox/week5_dynamic_programming1/1_money_change_again.py
|
vishweshwartyagi/Data-Structures-and-Algorithms-UCSD
|
de942b3a0eb2bf56f949f47c297fad713aa81489
|
[
"MIT"
] | null | null | null |
# Uses python3
import sys
def get_change(money, coins):
t = [j+1 for j in range(money+1)]
# boundary condition
t[0] = 0
for j in range(1, money+1):
for c in coins:
if c <= j:
t[j] = min(t[j], 1+t[j-c])
return t[money]
if __name__ == '__main__':
coins = [1, 3, 4]
money = int(input())
print(get_change(money, coins))
| 18.714286
| 42
| 0.516539
|
import sys
def get_change(money, coins):
t = [j+1 for j in range(money+1)]
t[0] = 0
for j in range(1, money+1):
for c in coins:
if c <= j:
t[j] = min(t[j], 1+t[j-c])
return t[money]
if __name__ == '__main__':
coins = [1, 3, 4]
money = int(input())
print(get_change(money, coins))
| true
| true
|
79033470995dc52bd40a847629f57fcf0205e8fd
| 10,160
|
py
|
Python
|
torch/utils/data/sampler.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 5
|
2021-08-17T17:44:20.000Z
|
2021-08-21T05:03:42.000Z
|
torch/utils/data/sampler.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 1
|
2021-04-22T18:37:42.000Z
|
2021-04-28T00:53:25.000Z
|
torch/utils/data/sampler.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 1
|
2022-01-19T10:55:49.000Z
|
2022-01-19T10:55:49.000Z
|
import torch
from torch import Tensor
from typing import Iterator, Optional, Sequence, List, TypeVar, Generic, Sized
T_co = TypeVar('T_co', covariant=True)
class Sampler(Generic[T_co]):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an :meth:`__iter__` method, providing a
way to iterate over indices of dataset elements, and a :meth:`__len__` method
that returns the length of the returned iterators.
.. note:: The :meth:`__len__` method isn't strictly required by
:class:`~torch.utils.data.DataLoader`, but is expected in any
calculation involving the length of a :class:`~torch.utils.data.DataLoader`.
"""
def __init__(self, data_source: Optional[Sized]) -> None:
pass
def __iter__(self) -> Iterator[T_co]:
raise NotImplementedError
# NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
#
# Many times we have an abstract class representing a collection/iterable of
# data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally
# implementing a `__len__` method. In such cases, we must make sure to not
# provide a default implementation, because both straightforward default
# implementations have their issues:
#
# + `return NotImplemented`:
# Calling `len(subclass_instance)` raises:
# TypeError: 'NotImplementedType' object cannot be interpreted as an integer
#
# + `raise NotImplementedError()`:
# This prevents triggering some fallback behavior. E.g., the built-in
# `list(X)` tries to call `len(X)` first, and executes a different code
# path if the method is not found or `NotImplemented` is returned, while
# raising an `NotImplementedError` will propagate and and make the call
# fail where it could have use `__iter__` to complete the call.
#
# Thus, the only two sensible things to do are
#
# + **not** provide a default `__len__`.
#
# + raise a `TypeError` instead, which is what Python uses when users call
# a method that is not defined on an object.
# (@ssnl verifies that this works on at least Python 3.7.)
class SequentialSampler(Sampler[int]):
r"""Samples elements sequentially, always in the same order.
Args:
data_source (Dataset): dataset to sample from
"""
data_source: Sized
def __init__(self, data_source: Sized) -> None:
self.data_source = data_source
def __iter__(self) -> Iterator[int]:
return iter(range(len(self.data_source)))
def __len__(self) -> int:
return len(self.data_source)
class RandomSampler(Sampler[int]):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Args:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn on-demand with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
generator (Generator): Generator used in sampling.
"""
data_source: Sized
replacement: bool
def __init__(self, data_source: Sized, replacement: bool = False,
num_samples: Optional[int] = None, generator=None) -> None:
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
if not isinstance(self.replacement, bool):
raise TypeError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self) -> int:
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
if self.generator is None:
self.generator = torch.Generator()
self.generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=self.generator).tolist()
else:
yield from torch.randperm(n, generator=self.generator).tolist()
def __len__(self) -> int:
return self.num_samples
class SubsetRandomSampler(Sampler[int]):
r"""Samples elements randomly from a given list of indices, without replacement.
Args:
indices (sequence): a sequence of indices
generator (Generator): Generator used in sampling.
"""
indices: Sequence[int]
def __init__(self, indices: Sequence[int], generator=None) -> None:
self.indices = indices
self.generator = generator
def __iter__(self) -> Iterator[int]:
return (self.indices[i] for i in torch.randperm(len(self.indices), generator=self.generator))
def __len__(self) -> int:
return len(self.indices)
class WeightedRandomSampler(Sampler[int]):
r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights).
Args:
weights (sequence) : a sequence of weights, not necessary summing up to one
num_samples (int): number of samples to draw
replacement (bool): if ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
generator (Generator): Generator used in sampling.
Example:
>>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
[4, 4, 1, 4, 5]
>>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
[0, 1, 4, 3, 2]
"""
weights: Tensor
num_samples: int
replacement: bool
def __init__(self, weights: Sequence[float], num_samples: int,
replacement: bool = True, generator=None) -> None:
if not isinstance(num_samples, int) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
self.generator = generator
def __iter__(self) -> Iterator[int]:
rand_tensor = torch.multinomial(self.weights, self.num_samples, self.replacement, generator=self.generator)
return iter(rand_tensor.tolist())
def __len__(self) -> int:
return self.num_samples
class BatchSampler(Sampler[List[int]]):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler or Iterable): Base sampler. Can be any iterable object
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler: Sampler[int], batch_size: int, drop_last: bool) -> None:
# Since collections.abc.Iterable does not check for `__getitem__`, which
# is one way for an object to be an iterable, we don't do an `isinstance`
# check here.
if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self) -> Iterator[List[int]]:
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self) -> int:
# Can only be called if self.sampler has __len__ implemented
# We cannot enforce this condition, so we turn off typechecking for the
# implementation below.
# Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
if self.drop_last:
return len(self.sampler) // self.batch_size # type: ignore[arg-type]
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type]
| 41.983471
| 129
| 0.638189
|
import torch
from torch import Tensor
from typing import Iterator, Optional, Sequence, List, TypeVar, Generic, Sized
T_co = TypeVar('T_co', covariant=True)
class Sampler(Generic[T_co]):
def __init__(self, data_source: Optional[Sized]) -> None:
pass
def __iter__(self) -> Iterator[T_co]:
raise NotImplementedError
class SequentialSampler(Sampler[int]):
data_source: Sized
def __init__(self, data_source: Sized) -> None:
self.data_source = data_source
def __iter__(self) -> Iterator[int]:
return iter(range(len(self.data_source)))
def __len__(self) -> int:
return len(self.data_source)
class RandomSampler(Sampler[int]):
data_source: Sized
replacement: bool
def __init__(self, data_source: Sized, replacement: bool = False,
num_samples: Optional[int] = None, generator=None) -> None:
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
if not isinstance(self.replacement, bool):
raise TypeError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self) -> int:
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
if self.generator is None:
self.generator = torch.Generator()
self.generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=self.generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=self.generator).tolist()
else:
yield from torch.randperm(n, generator=self.generator).tolist()
def __len__(self) -> int:
return self.num_samples
class SubsetRandomSampler(Sampler[int]):
indices: Sequence[int]
def __init__(self, indices: Sequence[int], generator=None) -> None:
self.indices = indices
self.generator = generator
def __iter__(self) -> Iterator[int]:
return (self.indices[i] for i in torch.randperm(len(self.indices), generator=self.generator))
def __len__(self) -> int:
return len(self.indices)
class WeightedRandomSampler(Sampler[int]):
weights: Tensor
num_samples: int
replacement: bool
def __init__(self, weights: Sequence[float], num_samples: int,
replacement: bool = True, generator=None) -> None:
if not isinstance(num_samples, int) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
self.generator = generator
def __iter__(self) -> Iterator[int]:
rand_tensor = torch.multinomial(self.weights, self.num_samples, self.replacement, generator=self.generator)
return iter(rand_tensor.tolist())
def __len__(self) -> int:
return self.num_samples
class BatchSampler(Sampler[List[int]]):
def __init__(self, sampler: Sampler[int], batch_size: int, drop_last: bool) -> None:
# check here.
if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self) -> Iterator[List[int]]:
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self) -> int:
# Can only be called if self.sampler has __len__ implemented
# We cannot enforce this condition, so we turn off typechecking for the
# implementation below.
# Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
if self.drop_last:
return len(self.sampler) // self.batch_size # type: ignore[arg-type]
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type]
| true
| true
|
790334aaa6224426d91a6b965c9ea7f4e423a405
| 4,352
|
py
|
Python
|
_unittests/ut_talk_examples/test_pydata2016_animation.py
|
sdpython/jupytalk
|
34abdf128de24becb21a9f08f243c3a74dadbfd9
|
[
"MIT"
] | null | null | null |
_unittests/ut_talk_examples/test_pydata2016_animation.py
|
sdpython/jupytalk
|
34abdf128de24becb21a9f08f243c3a74dadbfd9
|
[
"MIT"
] | 16
|
2016-11-13T19:52:35.000Z
|
2021-12-29T10:59:41.000Z
|
_unittests/ut_talk_examples/test_pydata2016_animation.py
|
sdpython/jupytalk
|
34abdf128de24becb21a9f08f243c3a74dadbfd9
|
[
"MIT"
] | 4
|
2016-09-10T10:44:50.000Z
|
2021-09-22T16:28:56.000Z
|
"""
@brief test log(time=20s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG, run_cmd
from pyquickhelper.pycode import get_temp_folder, fix_tkinter_issues_virtualenv, skipif_appveyor, skipif_travis
from pyquickhelper.pycode import add_missing_development_version
class TestPyData2016Animation(unittest.TestCase):
@skipif_appveyor("no ffmpeg installed")
@skipif_travis("issue with datashader.bokeh_ext, skipping")
@skipif_appveyor("issue with pyproj")
def test_matplotlib_example(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
progs = ["ffmpeg"]
if not sys.platform.startswith("win"):
progs.append("avconv")
errs = []
prog = None
for prog in progs:
out, err = run_cmd(prog, wait=True, fLOG=fLOG)
exps = "usage:"
if (exps not in out and exps not in err) or err is None or len(err) == 0:
errs.append((prog, err))
else:
break
if len(errs) >= len(progs):
if sys.platform.startswith("win"):
fLOG("download ffmpeg")
add_missing_development_version(
["pyensae"], __file__, hide=True)
from pyensae.datasource import download_data
download_data("ffmpeg.zip", website="xd")
else:
raise FileNotFoundError(
"Unable to find '{1}'.\nPATH='{0}'\n--------\n[OUT]\n{2}\n[ERR]\n{3}".format(
os.environ["PATH"], prog, out,
"\n----\n".join("{0}:\n{1}".format(*_) for _ in errs)))
temp = get_temp_folder(__file__, "temp_example_example")
fix_tkinter_issues_virtualenv()
# update a distribution based on new data.
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as ss
from matplotlib.animation import FuncAnimation, writers
# To get the list of available writers
if not writers.is_available(prog):
writers.register(prog)
fLOG(writers.list())
class UpdateDist:
def __init__(self, ax, prob=0.5):
self.success = 0
self.prob = prob
self.line, = ax.plot([], [], 'k-')
self.x = np.linspace(0, 1, 200)
self.ax = ax
# Set up plot parameters
self.ax.set_xlim(0, 1)
self.ax.set_ylim(0, 15)
self.ax.grid(True)
# This vertical line represents the theoretical value, to
# which the plotted distribution should converge.
self.ax.axvline(prob, linestyle='--', color='black')
def init(self):
self.success = 0
self.line.set_data([], [])
return self.line,
def __call__(self, i):
# This way the plot can continuously run and we just keep
# watching new realizations of the process
if i == 0:
return self.init()
# Choose success based on exceed a threshold with a uniform
# pick
if np.random.rand(1,) < self.prob: # pylint: disable=W0143
self.success += 1
y = ss.beta.pdf(self.x, self.success + 1,
(i - self.success) + 1)
self.line.set_data(self.x, y)
return self.line,
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ud = UpdateDist(ax, prob=0.7)
anim = FuncAnimation(fig, ud, frames=np.arange(100), init_func=ud.init,
interval=100, blit=True)
try:
Writer = writers[prog]
except KeyError as e:
if prog == "avconv":
from matplotlib.animation import AVConvWriter
Writer = AVConvWriter
else:
raise e
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
anim.save(os.path.join(temp, 'lines2.mp4'), writer=writer)
plt.close('all')
fLOG("end")
if __name__ == "__main__":
unittest.main()
| 34.816
| 111
| 0.535156
|
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG, run_cmd
from pyquickhelper.pycode import get_temp_folder, fix_tkinter_issues_virtualenv, skipif_appveyor, skipif_travis
from pyquickhelper.pycode import add_missing_development_version
class TestPyData2016Animation(unittest.TestCase):
@skipif_appveyor("no ffmpeg installed")
@skipif_travis("issue with datashader.bokeh_ext, skipping")
@skipif_appveyor("issue with pyproj")
def test_matplotlib_example(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
progs = ["ffmpeg"]
if not sys.platform.startswith("win"):
progs.append("avconv")
errs = []
prog = None
for prog in progs:
out, err = run_cmd(prog, wait=True, fLOG=fLOG)
exps = "usage:"
if (exps not in out and exps not in err) or err is None or len(err) == 0:
errs.append((prog, err))
else:
break
if len(errs) >= len(progs):
if sys.platform.startswith("win"):
fLOG("download ffmpeg")
add_missing_development_version(
["pyensae"], __file__, hide=True)
from pyensae.datasource import download_data
download_data("ffmpeg.zip", website="xd")
else:
raise FileNotFoundError(
"Unable to find '{1}'.\nPATH='{0}'\n--------\n[OUT]\n{2}\n[ERR]\n{3}".format(
os.environ["PATH"], prog, out,
"\n----\n".join("{0}:\n{1}".format(*_) for _ in errs)))
temp = get_temp_folder(__file__, "temp_example_example")
fix_tkinter_issues_virtualenv()
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as ss
from matplotlib.animation import FuncAnimation, writers
if not writers.is_available(prog):
writers.register(prog)
fLOG(writers.list())
class UpdateDist:
def __init__(self, ax, prob=0.5):
self.success = 0
self.prob = prob
self.line, = ax.plot([], [], 'k-')
self.x = np.linspace(0, 1, 200)
self.ax = ax
self.ax.set_xlim(0, 1)
self.ax.set_ylim(0, 15)
self.ax.grid(True)
self.ax.axvline(prob, linestyle='--', color='black')
def init(self):
self.success = 0
self.line.set_data([], [])
return self.line,
def __call__(self, i):
if i == 0:
return self.init()
if np.random.rand(1,) < self.prob:
self.success += 1
y = ss.beta.pdf(self.x, self.success + 1,
(i - self.success) + 1)
self.line.set_data(self.x, y)
return self.line,
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ud = UpdateDist(ax, prob=0.7)
anim = FuncAnimation(fig, ud, frames=np.arange(100), init_func=ud.init,
interval=100, blit=True)
try:
Writer = writers[prog]
except KeyError as e:
if prog == "avconv":
from matplotlib.animation import AVConvWriter
Writer = AVConvWriter
else:
raise e
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
anim.save(os.path.join(temp, 'lines2.mp4'), writer=writer)
plt.close('all')
fLOG("end")
if __name__ == "__main__":
unittest.main()
| true
| true
|
7903355e192c505c3666b728bed050fc189a8e2b
| 1,763
|
py
|
Python
|
pybgl/prune_incidence_automaton.py
|
nokia/PyBGL
|
e9868361e5a3870b5247872a8c8c91a1c065fe84
|
[
"BSD-3-Clause"
] | 11
|
2019-05-20T16:47:03.000Z
|
2021-12-17T10:24:22.000Z
|
pybgl/prune_incidence_automaton.py
|
nokia/PyBGL
|
e9868361e5a3870b5247872a8c8c91a1c065fe84
|
[
"BSD-3-Clause"
] | null | null | null |
pybgl/prune_incidence_automaton.py
|
nokia/PyBGL
|
e9868361e5a3870b5247872a8c8c91a1c065fe84
|
[
"BSD-3-Clause"
] | 3
|
2019-05-24T02:24:30.000Z
|
2020-03-17T09:55:40.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Marc-Olivier Buob, Maxime Raynal"
__maintainer__ = "Marc-Olivier Buob, Maxime Raynal"
__email__ = "{marc-olivier.buob,maxime.raynal}@nokia.com"
__copyright__ = "Copyright (C) 2020, Nokia"
__license__ = "BSD-3"
from collections import defaultdict
from pybgl.graph import Graph
from pybgl.incidence_automaton import (
IncidenceAutomaton, finals, initial, remove_vertex, vertices
)
from pybgl.depth_first_search import depth_first_search_graph
from pybgl.property_map import make_assoc_property_map
from pybgl.reverse import reverse_graph
def find_reachable_vertices(g: Graph, sources: set) -> set:
"""
Returns the set of vertices of a graph which are reachable
from a set of source vertices.
Args:
g: Graph, an instance of `Graph`
sources: set, a set of integers representing the source vertices
Returns:
The set of vertices that are reachable from the source vertices
"""
map_vcolor = defaultdict(int)
pmap_vcolor = make_assoc_property_map(map_vcolor)
depth_first_search_graph(g, sources, pmap_vcolor=pmap_vcolor)
return set(map_vcolor.keys())
def prune_incidence_automaton(g: IncidenceAutomaton):
"""
Prunes the vertices of an IncidenceAutomaton that cannot be reached
from the intial state, or that cannot reach a final state.
Args:
g: IncidenceAutomaton, an instance of IncidenceAutomaton
"""
to_keep = find_reachable_vertices(g, {initial(g)})
reverse_graph(g)
to_keep &= find_reachable_vertices(g, finals(g))
reverse_graph(g)
to_remove = set(vertices(g)) - to_keep
for q in to_remove:
remove_vertex(q, g)
| 35.979592
| 72
| 0.708452
|
__author__ = "Marc-Olivier Buob, Maxime Raynal"
__maintainer__ = "Marc-Olivier Buob, Maxime Raynal"
__email__ = "{marc-olivier.buob,maxime.raynal}@nokia.com"
__copyright__ = "Copyright (C) 2020, Nokia"
__license__ = "BSD-3"
from collections import defaultdict
from pybgl.graph import Graph
from pybgl.incidence_automaton import (
IncidenceAutomaton, finals, initial, remove_vertex, vertices
)
from pybgl.depth_first_search import depth_first_search_graph
from pybgl.property_map import make_assoc_property_map
from pybgl.reverse import reverse_graph
def find_reachable_vertices(g: Graph, sources: set) -> set:
map_vcolor = defaultdict(int)
pmap_vcolor = make_assoc_property_map(map_vcolor)
depth_first_search_graph(g, sources, pmap_vcolor=pmap_vcolor)
return set(map_vcolor.keys())
def prune_incidence_automaton(g: IncidenceAutomaton):
to_keep = find_reachable_vertices(g, {initial(g)})
reverse_graph(g)
to_keep &= find_reachable_vertices(g, finals(g))
reverse_graph(g)
to_remove = set(vertices(g)) - to_keep
for q in to_remove:
remove_vertex(q, g)
| true
| true
|
7903356d94d14d81e2b9f370eafe7346ce241eca
| 9,677
|
py
|
Python
|
sdk/python/pulumi_aws/apigateway/usage_plan.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/apigateway/usage_plan.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/apigateway/usage_plan.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class UsagePlan(pulumi.CustomResource):
api_stages: pulumi.Output[list]
"""
The associated API stages of the usage plan.
* `api_id` (`str`) - API Id of the associated API stage in a usage plan.
* `stage` (`str`) - API stage name of the associated API stage in a usage plan.
"""
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN)
"""
description: pulumi.Output[str]
"""
The description of a usage plan.
"""
name: pulumi.Output[str]
"""
The name of the usage plan.
"""
product_code: pulumi.Output[str]
"""
The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
"""
quota_settings: pulumi.Output[dict]
"""
The quota settings of the usage plan.
* `limit` (`float`) - The maximum number of requests that can be made in a given time period.
* `offset` (`float`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`str`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
"""
tags: pulumi.Output[dict]
"""
Key-value map of resource tags
"""
throttle_settings: pulumi.Output[dict]
"""
The throttling limits of the usage plan.
* `burstLimit` (`float`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`float`) - The API request steady-state rate limit.
"""
def __init__(__self__, resource_name, opts=None, api_stages=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an API Gateway Usage Plan.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
myapi = aws.apigateway.RestApi("myapi")
dev = aws.apigateway.Deployment("dev",
rest_api=myapi.id,
stage_name="dev")
prod = aws.apigateway.Deployment("prod",
rest_api=myapi.id,
stage_name="prod")
my_usage_plan = aws.apigateway.UsagePlan("myUsagePlan",
api_stages=[
{
"api_id": myapi.id,
"stage": dev.stage_name,
},
{
"api_id": myapi.id,
"stage": prod.stage_name,
},
],
description="my description",
product_code="MYCODE",
quota_settings={
"limit": 20,
"offset": 2,
"period": "WEEK",
},
throttle_settings={
"burstLimit": 5,
"rate_limit": 10,
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['api_stages'] = api_stages
__props__['description'] = description
__props__['name'] = name
__props__['product_code'] = product_code
__props__['quota_settings'] = quota_settings
__props__['tags'] = tags
__props__['throttle_settings'] = throttle_settings
__props__['arn'] = None
super(UsagePlan, __self__).__init__(
'aws:apigateway/usagePlan:UsagePlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, api_stages=None, arn=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None):
"""
Get an existing UsagePlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["api_stages"] = api_stages
__props__["arn"] = arn
__props__["description"] = description
__props__["name"] = name
__props__["product_code"] = product_code
__props__["quota_settings"] = quota_settings
__props__["tags"] = tags
__props__["throttle_settings"] = throttle_settings
return UsagePlan(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 46.524038
| 225
| 0.644105
|
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class UsagePlan(pulumi.CustomResource):
api_stages: pulumi.Output[list]
arn: pulumi.Output[str]
description: pulumi.Output[str]
name: pulumi.Output[str]
product_code: pulumi.Output[str]
quota_settings: pulumi.Output[dict]
tags: pulumi.Output[dict]
throttle_settings: pulumi.Output[dict]
def __init__(__self__, resource_name, opts=None, api_stages=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None, __props__=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['api_stages'] = api_stages
__props__['description'] = description
__props__['name'] = name
__props__['product_code'] = product_code
__props__['quota_settings'] = quota_settings
__props__['tags'] = tags
__props__['throttle_settings'] = throttle_settings
__props__['arn'] = None
super(UsagePlan, __self__).__init__(
'aws:apigateway/usagePlan:UsagePlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, api_stages=None, arn=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None):
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["api_stages"] = api_stages
__props__["arn"] = arn
__props__["description"] = description
__props__["name"] = name
__props__["product_code"] = product_code
__props__["quota_settings"] = quota_settings
__props__["tags"] = tags
__props__["throttle_settings"] = throttle_settings
return UsagePlan(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
790335f67e5e6d53312a71ca4736eeb2dd481cc6
| 2,663
|
py
|
Python
|
examples/python/quickstart_sql.py
|
backwardn/delta
|
011c122f00f8e8772de57e06b7b3e8137e1f3701
|
[
"Apache-2.0"
] | 1
|
2021-01-26T21:37:11.000Z
|
2021-01-26T21:37:11.000Z
|
examples/python/quickstart_sql.py
|
jaceklaskowski/delta
|
87fecf37b68d44cf99a18cafc16a7092bb2a723a
|
[
"Apache-2.0"
] | null | null | null |
examples/python/quickstart_sql.py
|
jaceklaskowski/delta
|
87fecf37b68d44cf99a18cafc16a7092bb2a723a
|
[
"Apache-2.0"
] | null | null | null |
from pyspark.sql import Column, DataFrame, SparkSession, functions
from pyspark.sql.functions import *
from py4j.java_collections import MapConverter
from delta.tables import *
import shutil
import threading
tableName = "tbltestpython"
# Enable SQL/DML commands and Metastore tables for the current spark session.
# We need to set the following configs
spark = SparkSession.builder \
.appName("quickstart_sql") \
.master("local[*]") \
.config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \
.config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \
.getOrCreate()
# Clear any previous runs
spark.sql("DROP TABLE IF EXISTS " + tableName)
spark.sql("DROP TABLE IF EXISTS newData")
try:
# Create a table
print("############# Creating a table ###############")
spark.sql("CREATE TABLE %s(id LONG) USING delta" % tableName)
spark.sql("INSERT INTO %s VALUES 0, 1, 2, 3, 4" % tableName)
# Read the table
print("############ Reading the table ###############")
spark.sql("SELECT * FROM %s" % tableName).show()
# Upsert (merge) new data
print("########### Upsert new data #############")
spark.sql("CREATE TABLE newData(id LONG) USING parquet")
spark.sql("INSERT INTO newData VALUES 3, 4, 5, 6")
spark.sql('''MERGE INTO {0} USING newData
ON {0}.id = newData.id
WHEN MATCHED THEN
UPDATE SET {0}.id = newData.id
WHEN NOT MATCHED THEN INSERT *
'''.format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
# Update table data
print("########## Overwrite the table ###########")
spark.sql("INSERT OVERWRITE %s select * FROM (VALUES 5, 6, 7, 8, 9) x (id)" % tableName)
spark.sql("SELECT * FROM %s" % tableName).show()
# Update every even value by adding 100 to it
print("########### Update to the table(add 100 to every even value) ##############")
spark.sql("UPDATE {0} SET id = (id + 100) WHERE (id % 2 == 0)".format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
# Delete every even value
print("######### Delete every even value ##############")
spark.sql("DELETE FROM {0} WHERE (id % 2 == 0)".format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
# Read old version of data using time travel
print("######## Read old data using time travel ############")
df = spark.read.format("delta").option("versionAsOf", 0).table(tableName)
df.show()
finally:
# cleanup
spark.sql("DROP TABLE " + tableName)
spark.sql("DROP TABLE IF EXISTS newData")
spark.stop()
| 35.986486
| 99
| 0.615096
|
from pyspark.sql import Column, DataFrame, SparkSession, functions
from pyspark.sql.functions import *
from py4j.java_collections import MapConverter
from delta.tables import *
import shutil
import threading
tableName = "tbltestpython"
spark = SparkSession.builder \
.appName("quickstart_sql") \
.master("local[*]") \
.config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \
.config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \
.getOrCreate()
spark.sql("DROP TABLE IF EXISTS " + tableName)
spark.sql("DROP TABLE IF EXISTS newData")
try:
print("############# Creating a table ###############")
spark.sql("CREATE TABLE %s(id LONG) USING delta" % tableName)
spark.sql("INSERT INTO %s VALUES 0, 1, 2, 3, 4" % tableName)
print("############ Reading the table ###############")
spark.sql("SELECT * FROM %s" % tableName).show()
print("########### Upsert new data #############")
spark.sql("CREATE TABLE newData(id LONG) USING parquet")
spark.sql("INSERT INTO newData VALUES 3, 4, 5, 6")
spark.sql('''MERGE INTO {0} USING newData
ON {0}.id = newData.id
WHEN MATCHED THEN
UPDATE SET {0}.id = newData.id
WHEN NOT MATCHED THEN INSERT *
'''.format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
print("########## Overwrite the table ###########")
spark.sql("INSERT OVERWRITE %s select * FROM (VALUES 5, 6, 7, 8, 9) x (id)" % tableName)
spark.sql("SELECT * FROM %s" % tableName).show()
print("########### Update to the table(add 100 to every even value) ##############")
spark.sql("UPDATE {0} SET id = (id + 100) WHERE (id % 2 == 0)".format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
print("######### Delete every even value ##############")
spark.sql("DELETE FROM {0} WHERE (id % 2 == 0)".format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
print("######## Read old data using time travel ############")
df = spark.read.format("delta").option("versionAsOf", 0).table(tableName)
df.show()
finally:
spark.sql("DROP TABLE " + tableName)
spark.sql("DROP TABLE IF EXISTS newData")
spark.stop()
| true
| true
|
790336255dd1898a90d62aebd5cb50c087f6beb1
| 1,930
|
py
|
Python
|
app/modules/serverinfo/models.py
|
sappykun/scpsl-masterserver
|
1ce03f7b6f8e53dd44364121eca34cc7b1fdeddd
|
[
"MIT"
] | null | null | null |
app/modules/serverinfo/models.py
|
sappykun/scpsl-masterserver
|
1ce03f7b6f8e53dd44364121eca34cc7b1fdeddd
|
[
"MIT"
] | null | null | null |
app/modules/serverinfo/models.py
|
sappykun/scpsl-masterserver
|
1ce03f7b6f8e53dd44364121eca34cc7b1fdeddd
|
[
"MIT"
] | null | null | null |
import time, datetime
from app import db
class ServerInfo(db.Model):
__tablename__ = 'servers'
__table_args__ = (db.PrimaryKeyConstraint('ip', 'port', name='_ip_port_pk'),)
ip = db.Column(db.String(128), nullable=False)
port = db.Column(db.Integer, nullable=False)
info = db.Column(db.String(1024), nullable=True)
player_count = db.Column(db.Integer, nullable=False)
player_total = db.Column(db.Integer, nullable=False)
servermod_version = db.Column(db.String(32), nullable=True)
pastebin_url = db.Column(db.String(32), nullable=True)
game_version = db.Column(db.String(32), nullable=True)
date_updated = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
self.__dict__[key] = value
@property
def serialize(self):
# du_unix = time.mktime(self.date_updated.timetuple())
# now_unix = time.mktime(datetime.datetime.now().timetuple())
return {
"ip": self.ip,
"port": self.port,
"info": self.info,
"player_count": self.player_count,
"player_total": self.player_total,
"game_version": self.game_version,
"servermod_version": self.servermod_version,
"pastebin_url": self.pastebin_url,
"date_updated": time.mktime(self.date_updated.timetuple())
}
def prettify_seconds(self, seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d: return "{} days".format(d)
if h: return "{} hours".format(h)
if m: return "{} minutes".format(m)
return "{} seconds".format(s)
| 35.090909
| 82
| 0.58601
|
import time, datetime
from app import db
class ServerInfo(db.Model):
__tablename__ = 'servers'
__table_args__ = (db.PrimaryKeyConstraint('ip', 'port', name='_ip_port_pk'),)
ip = db.Column(db.String(128), nullable=False)
port = db.Column(db.Integer, nullable=False)
info = db.Column(db.String(1024), nullable=True)
player_count = db.Column(db.Integer, nullable=False)
player_total = db.Column(db.Integer, nullable=False)
servermod_version = db.Column(db.String(32), nullable=True)
pastebin_url = db.Column(db.String(32), nullable=True)
game_version = db.Column(db.String(32), nullable=True)
date_updated = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
self.__dict__[key] = value
@property
def serialize(self):
return {
"ip": self.ip,
"port": self.port,
"info": self.info,
"player_count": self.player_count,
"player_total": self.player_total,
"game_version": self.game_version,
"servermod_version": self.servermod_version,
"pastebin_url": self.pastebin_url,
"date_updated": time.mktime(self.date_updated.timetuple())
}
def prettify_seconds(self, seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d: return "{} days".format(d)
if h: return "{} hours".format(h)
if m: return "{} minutes".format(m)
return "{} seconds".format(s)
| true
| true
|
7903367b9aa6bfbb5d9da2c38eb07d55a385c654
| 2,740
|
py
|
Python
|
vgg/test.py
|
mhd53/vgg-from-torch
|
fbcca53432648a492550fb14d2c42c10230d76f5
|
[
"MIT"
] | null | null | null |
vgg/test.py
|
mhd53/vgg-from-torch
|
fbcca53432648a492550fb14d2c42c10230d76f5
|
[
"MIT"
] | null | null | null |
vgg/test.py
|
mhd53/vgg-from-torch
|
fbcca53432648a492550fb14d2c42c10230d76f5
|
[
"MIT"
] | null | null | null |
import argparse
import torch
from tqdm import tqdm
import vgg.data_loader.data_loaders as module_data
import vgg.model.loss as module_loss
import vgg.model.metric as module_metric
import vgg.model.model as module_arch
from vgg.parse_config import ConfigParser
def main(config):
logger = config.get_logger('test')
# setup data_loader instances
data_loader = getattr(module_data, config['data_loader']['type'])(
config['data_loader']['args']['data_dir'],
batch_size=512,
shuffle=False,
validation_split=0.0,
training=False,
num_workers=2
)
# build model architecture
model = config.init_obj('arch', module_arch)
logger.info(model)
# get function handles of loss and metrics
loss_fn = getattr(module_loss, config['loss'])
metric_fns = [getattr(module_metric, met) for met in config['metrics']]
logger.info('Loading checkpoint: {} ...'.format(config.resume))
checkpoint = torch.load(config.resume)
state_dict = checkpoint['state_dict']
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(state_dict)
# prepare model for testing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
total_loss = 0.0
total_metrics = torch.zeros(len(metric_fns))
with torch.no_grad():
for i, (data, target) in enumerate(tqdm(data_loader)):
data, target = data.to(device), target.to(device)
output = model(data)
#
# save sample images, or do something with output here
#
# computing loss, metrics on test set
loss = loss_fn(output, target)
batch_size = data.shape[0]
total_loss += loss.item() * batch_size
for i, metric in enumerate(metric_fns):
total_metrics[i] += metric(output, target) * batch_size
n_samples = len(data_loader.sampler)
log = {'loss': total_loss / n_samples}
log.update({
met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns)
})
logger.info(log)
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
config = ConfigParser.from_args(args)
main(config)
| 33.414634
| 93
| 0.641241
|
import argparse
import torch
from tqdm import tqdm
import vgg.data_loader.data_loaders as module_data
import vgg.model.loss as module_loss
import vgg.model.metric as module_metric
import vgg.model.model as module_arch
from vgg.parse_config import ConfigParser
def main(config):
logger = config.get_logger('test')
data_loader = getattr(module_data, config['data_loader']['type'])(
config['data_loader']['args']['data_dir'],
batch_size=512,
shuffle=False,
validation_split=0.0,
training=False,
num_workers=2
)
model = config.init_obj('arch', module_arch)
logger.info(model)
loss_fn = getattr(module_loss, config['loss'])
metric_fns = [getattr(module_metric, met) for met in config['metrics']]
logger.info('Loading checkpoint: {} ...'.format(config.resume))
checkpoint = torch.load(config.resume)
state_dict = checkpoint['state_dict']
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(state_dict)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
total_loss = 0.0
total_metrics = torch.zeros(len(metric_fns))
with torch.no_grad():
for i, (data, target) in enumerate(tqdm(data_loader)):
data, target = data.to(device), target.to(device)
output = model(data)
loss = loss_fn(output, target)
batch_size = data.shape[0]
total_loss += loss.item() * batch_size
for i, metric in enumerate(metric_fns):
total_metrics[i] += metric(output, target) * batch_size
n_samples = len(data_loader.sampler)
log = {'loss': total_loss / n_samples}
log.update({
met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns)
})
logger.info(log)
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
config = ConfigParser.from_args(args)
main(config)
| true
| true
|
790337381459139a145ec5c72b9aba4345e71b90
| 3,297
|
py
|
Python
|
tockloader/tab.py
|
torfmaster/tockloader
|
f833879dfb870d45c5ac0970f4cb4f8e8c515c48
|
[
"MIT"
] | null | null | null |
tockloader/tab.py
|
torfmaster/tockloader
|
f833879dfb870d45c5ac0970f4cb4f8e8c515c48
|
[
"MIT"
] | null | null | null |
tockloader/tab.py
|
torfmaster/tockloader
|
f833879dfb870d45c5ac0970f4cb4f8e8c515c48
|
[
"MIT"
] | null | null | null |
import tarfile
import textwrap
import pytoml
from .app import App
from .exceptions import TockLoaderException
from .tbfh import TBFHeader
class TAB:
'''
Tock Application Bundle object. This class handles the TAB format.
'''
def __init__ (self, tab_path):
self.tab = tarfile.open(tab_path)
def extract_app (self, arch):
'''
Return an `App` object from this TAB. You must specify the desired
MCU architecture so the correct binary can be retrieved.
'''
binary_tarinfo = self.tab.getmember('{}.bin'.format(arch))
binary = self.tab.extractfile(binary_tarinfo).read()
# First get the TBF header from the correct binary in the TAB
tbfh = TBFHeader(binary)
if tbfh.is_valid():
name_or_params = tbfh.get_app_name()
if isinstance(name_or_params, str):
name = name_or_params
else:
start = name_or_params[0]
end = start+name_or_params[1]
name = binary[start:end].decode('utf-8')
# Check that total size actually matches the binary that we got.
if tbfh.get_app_size() < len(binary):
# It's fine if the binary is smaller, but the binary cannot be
# longer than the amount of reserved space (`total_size` in the
# TBF header) for the app.
raise TockLoaderException('Invalid TAB, the app binary is longer than its defined total_size')
return App(tbfh, None, name, binary)
else:
raise TockLoaderException('Invalid TBF found in app in TAB')
def is_compatible_with_board (self, board):
'''
Check if the Tock app is compatible with a particular Tock board.
'''
metadata = self.parse_metadata()
if metadata['tab-version'] == 1:
return 'only-for-boards' not in metadata or \
board in metadata['only-for-boards'] or \
metadata['only-for-boards'] == ''
else:
raise TockLoaderException('Unable to understand version {} of metadata'.format(metadata['tab-version']))
def parse_metadata (self):
'''
Open and parse the included metadata file in the TAB.
'''
metadata_tarinfo = self.tab.getmember('metadata.toml')
metadata_str = self.tab.extractfile(metadata_tarinfo).read().decode('utf-8')
return pytoml.loads(metadata_str)
def get_supported_architectures (self):
'''
Return a list of architectures that this TAB has compiled binaries for.
'''
contained_files = self.tab.getnames()
return [i[:-4] for i in contained_files if i[-4:] == '.bin']
def get_tbf_header (self):
'''
Return a TBFHeader object with the TBF header from the app in the TAB.
TBF headers are not architecture specific, so we pull from a random
binary if there are multiple architectures supported.
'''
# Find a .bin file
for f in self.tab.getnames():
if f[-4:] == '.bin':
binary_tarinfo = self.tab.getmember(f)
binary = self.tab.extractfile(binary_tarinfo).read()
# Get the TBF header from a binary in the TAB
return TBFHeader(binary)
return None
def __str__ (self):
out = ''
metadata = self.parse_metadata()
out += 'TAB: {}\n'.format(metadata['name'])
for k,v in sorted(metadata.items()):
if k == 'name':
continue
out += ' {}: {}\n'.format(k,v)
out += ' supported architectures: {}\n'.format(', '.join(self.get_supported_architectures()))
out += ' TBF Header\n'
out += textwrap.indent(str(self.get_tbf_header()), ' ')
return out
| 32.009709
| 107
| 0.693358
|
import tarfile
import textwrap
import pytoml
from .app import App
from .exceptions import TockLoaderException
from .tbfh import TBFHeader
class TAB:
def __init__ (self, tab_path):
self.tab = tarfile.open(tab_path)
def extract_app (self, arch):
binary_tarinfo = self.tab.getmember('{}.bin'.format(arch))
binary = self.tab.extractfile(binary_tarinfo).read()
tbfh = TBFHeader(binary)
if tbfh.is_valid():
name_or_params = tbfh.get_app_name()
if isinstance(name_or_params, str):
name = name_or_params
else:
start = name_or_params[0]
end = start+name_or_params[1]
name = binary[start:end].decode('utf-8')
if tbfh.get_app_size() < len(binary):
# longer than the amount of reserved space (`total_size` in the
# TBF header) for the app.
raise TockLoaderException('Invalid TAB, the app binary is longer than its defined total_size')
return App(tbfh, None, name, binary)
else:
raise TockLoaderException('Invalid TBF found in app in TAB')
def is_compatible_with_board (self, board):
metadata = self.parse_metadata()
if metadata['tab-version'] == 1:
return 'only-for-boards' not in metadata or \
board in metadata['only-for-boards'] or \
metadata['only-for-boards'] == ''
else:
raise TockLoaderException('Unable to understand version {} of metadata'.format(metadata['tab-version']))
def parse_metadata (self):
metadata_tarinfo = self.tab.getmember('metadata.toml')
metadata_str = self.tab.extractfile(metadata_tarinfo).read().decode('utf-8')
return pytoml.loads(metadata_str)
def get_supported_architectures (self):
contained_files = self.tab.getnames()
return [i[:-4] for i in contained_files if i[-4:] == '.bin']
def get_tbf_header (self):
# Find a .bin file
for f in self.tab.getnames():
if f[-4:] == '.bin':
binary_tarinfo = self.tab.getmember(f)
binary = self.tab.extractfile(binary_tarinfo).read()
# Get the TBF header from a binary in the TAB
return TBFHeader(binary)
return None
def __str__ (self):
out = ''
metadata = self.parse_metadata()
out += 'TAB: {}\n'.format(metadata['name'])
for k,v in sorted(metadata.items()):
if k == 'name':
continue
out += ' {}: {}\n'.format(k,v)
out += ' supported architectures: {}\n'.format(', '.join(self.get_supported_architectures()))
out += ' TBF Header\n'
out += textwrap.indent(str(self.get_tbf_header()), ' ')
return out
| true
| true
|
79033767bda915e916a7a2507007bdb76a27ba58
| 32,085
|
py
|
Python
|
omegaconf/basecontainer.py
|
gwenzek/omegaconf
|
0ff8a401739d00b01d88408c262a0f061ff3be68
|
[
"BSD-3-Clause"
] | null | null | null |
omegaconf/basecontainer.py
|
gwenzek/omegaconf
|
0ff8a401739d00b01d88408c262a0f061ff3be68
|
[
"BSD-3-Clause"
] | null | null | null |
omegaconf/basecontainer.py
|
gwenzek/omegaconf
|
0ff8a401739d00b01d88408c262a0f061ff3be68
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
import sys
from abc import ABC, abstractmethod
from enum import Enum
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Tuple, Union
import yaml
from ._utils import (
_DEFAULT_MARKER_,
ValueKind,
_ensure_container,
_get_value,
_is_interpolation,
_is_missing_literal,
_is_missing_value,
_is_none,
_is_special,
_is_union,
_resolve_optional,
get_ref_type,
get_structured_config_data,
get_value_kind,
get_yaml_loader,
is_container_annotation,
is_dict_annotation,
is_list_annotation,
is_primitive_dict,
is_primitive_type,
is_structured_config,
is_tuple_annotation,
)
from .base import Container, ContainerMetadata, DictKeyType, Node, SCMode
from .errors import (
ConfigCycleDetectedException,
ConfigTypeError,
InterpolationResolutionError,
KeyValidationError,
MissingMandatoryValue,
OmegaConfBaseException,
ReadonlyConfigError,
ValidationError,
)
if TYPE_CHECKING:
from .dictconfig import DictConfig # pragma: no cover
class BaseContainer(Container, ABC):
_resolvers: ClassVar[Dict[str, Any]] = {}
def __init__(self, parent: Optional["Container"], metadata: ContainerMetadata):
if not (parent is None or isinstance(parent, Container)):
raise ConfigTypeError("Parent type is not omegaconf.Container")
super().__init__(parent=parent, metadata=metadata)
self.__dict__["_content"] = None
def _resolve_with_default(
self,
key: Union[DictKeyType, int],
value: Node,
default_value: Any = _DEFAULT_MARKER_,
) -> Any:
"""returns the value with the specified key, like obj.key and obj['key']"""
if _is_missing_value(value):
if default_value is not _DEFAULT_MARKER_:
return default_value
raise MissingMandatoryValue("Missing mandatory value: $FULL_KEY")
resolved_node = self._maybe_resolve_interpolation(
parent=self,
key=key,
value=value,
throw_on_resolution_failure=True,
)
return _get_value(resolved_node)
def __str__(self) -> str:
return self.__repr__()
def __repr__(self) -> str:
if self.__dict__["_content"] is None:
return "None"
elif self._is_interpolation() or self._is_missing():
v = self.__dict__["_content"]
return f"'{v}'"
else:
return self.__dict__["_content"].__repr__() # type: ignore
# Support pickle
def __getstate__(self) -> Dict[str, Any]:
dict_copy = copy.copy(self.__dict__)
# no need to serialize the flags cache, it can be re-constructed later
dict_copy.pop("_flags_cache", None)
dict_copy["_metadata"] = copy.copy(dict_copy["_metadata"])
ref_type = self._metadata.ref_type
if is_container_annotation(ref_type):
if is_dict_annotation(ref_type):
dict_copy["_metadata"].ref_type = Dict
elif is_list_annotation(ref_type):
dict_copy["_metadata"].ref_type = List
else:
assert False
if sys.version_info < (3, 7): # pragma: no cover
element_type = self._metadata.element_type
if _is_union(element_type):
raise OmegaConfBaseException(
"Serializing structured configs with `Union` element type requires python >= 3.7"
)
return dict_copy
# Support pickle
def __setstate__(self, d: Dict[str, Any]) -> None:
from omegaconf import DictConfig
from omegaconf._utils import is_generic_dict, is_generic_list
if isinstance(self, DictConfig):
key_type = d["_metadata"].key_type
# backward compatibility to load OmegaConf 2.0 configs
if key_type is None:
key_type = Any
d["_metadata"].key_type = key_type
element_type = d["_metadata"].element_type
# backward compatibility to load OmegaConf 2.0 configs
if element_type is None:
element_type = Any
d["_metadata"].element_type = element_type
ref_type = d["_metadata"].ref_type
if is_container_annotation(ref_type):
if is_generic_dict(ref_type):
d["_metadata"].ref_type = Dict[key_type, element_type] # type: ignore
elif is_generic_list(ref_type):
d["_metadata"].ref_type = List[element_type] # type: ignore
else:
assert False
d["_flags_cache"] = None
self.__dict__.update(d)
@abstractmethod
def __delitem__(self, key: Any) -> None:
...
def __len__(self) -> int:
if self._is_none() or self._is_missing() or self._is_interpolation():
return 0
content = self.__dict__["_content"]
return len(content)
def merge_with_cli(self) -> None:
args_list = sys.argv[1:]
self.merge_with_dotlist(args_list)
def merge_with_dotlist(self, dotlist: List[str]) -> None:
from omegaconf import OmegaConf
def fail() -> None:
raise ValueError("Input list must be a list or a tuple of strings")
if not isinstance(dotlist, (list, tuple)):
fail()
for arg in dotlist:
if not isinstance(arg, str):
fail()
idx = arg.find("=")
if idx == -1:
key = arg
value = None
else:
key = arg[0:idx]
value = arg[idx + 1 :]
value = yaml.load(value, Loader=get_yaml_loader())
OmegaConf.update(self, key, value)
def is_empty(self) -> bool:
"""return true if config is empty"""
return len(self.__dict__["_content"]) == 0
@staticmethod
def _to_content(
conf: Container,
resolve: bool,
throw_on_missing: bool,
enum_to_str: bool = False,
structured_config_mode: SCMode = SCMode.DICT,
) -> Union[None, Any, str, Dict[DictKeyType, Any], List[Any]]:
from omegaconf import MISSING, DictConfig, ListConfig
def convert(val: Node) -> Any:
value = val._value()
if enum_to_str and isinstance(value, Enum):
value = f"{value.name}"
return value
def get_node_value(key: Union[DictKeyType, int]) -> Any:
try:
node = conf._get_node(key, throw_on_missing_value=throw_on_missing)
except MissingMandatoryValue as e:
conf._format_and_raise(key=key, value=None, cause=e)
assert isinstance(node, Node)
if resolve:
try:
node = node._dereference_node()
except InterpolationResolutionError as e:
conf._format_and_raise(key=key, value=None, cause=e)
if isinstance(node, Container):
value = BaseContainer._to_content(
node,
resolve=resolve,
throw_on_missing=throw_on_missing,
enum_to_str=enum_to_str,
structured_config_mode=structured_config_mode,
)
else:
value = convert(node)
return value
if conf._is_none():
return None
elif conf._is_missing():
if throw_on_missing:
conf._format_and_raise(
key=None,
value=None,
cause=MissingMandatoryValue("Missing mandatory value"),
)
else:
return MISSING
elif not resolve and conf._is_interpolation():
inter = conf._value()
assert isinstance(inter, str)
return inter
if resolve:
_conf = conf._dereference_node()
assert isinstance(_conf, Container)
conf = _conf
if isinstance(conf, DictConfig):
if (
conf._metadata.object_type not in (dict, None)
and structured_config_mode == SCMode.DICT_CONFIG
):
return conf
if structured_config_mode == SCMode.INSTANTIATE and is_structured_config(
conf._metadata.object_type
):
return conf._to_object()
retdict: Dict[DictKeyType, Any] = {}
for key in conf.keys():
value = get_node_value(key)
if enum_to_str and isinstance(key, Enum):
key = f"{key.name}"
retdict[key] = value
return retdict
elif isinstance(conf, ListConfig):
retlist: List[Any] = []
for index in range(len(conf)):
item = get_node_value(index)
retlist.append(item)
return retlist
assert False
@staticmethod
def _map_merge(dest: "BaseContainer", src: "BaseContainer") -> None:
"""merge src into dest and return a new copy, does not modified input"""
from omegaconf import AnyNode, DictConfig, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
src_ref_type = get_ref_type(src)
assert src_ref_type is not None
# If source DictConfig is:
# - None => set the destination DictConfig to None
# - an interpolation => set the destination DictConfig to be the same interpolation
if src._is_none() or src._is_interpolation():
dest._set_value(src._value())
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
return
dest._validate_merge(value=src)
def expand(node: Container) -> None:
rt = node._metadata.ref_type
val: Any
if rt is not Any:
if is_dict_annotation(rt):
val = {}
elif is_list_annotation(rt) or is_tuple_annotation(rt):
val = []
else:
val = rt
elif isinstance(node, DictConfig):
val = {}
else:
assert False
node._set_value(val)
if (
src._is_missing()
and not dest._is_missing()
and is_structured_config(src_ref_type)
):
# Replace `src` with a prototype of its corresponding structured config
# whose fields are all missing (to avoid overwriting fields in `dest`).
src = _create_structured_with_missing_fields(
ref_type=src_ref_type, object_type=src_type
)
if (dest._is_interpolation() or dest._is_missing()) and not src._is_missing():
expand(dest)
src_items = src.items_ex(resolve=False) if not src._is_missing() else []
for key, src_value in src_items:
src_node = src._get_node(key, validate_access=False)
dest_node = dest._get_node(key, validate_access=False)
assert src_node is None or isinstance(src_node, Node)
assert dest_node is None or isinstance(dest_node, Node)
if isinstance(dest_node, DictConfig):
dest_node._validate_merge(value=src_node)
missing_src_value = _is_missing_value(src_value)
if (
isinstance(dest_node, Container)
and dest_node._is_none()
and not missing_src_value
and not _is_none(src_value, resolve=True)
):
expand(dest_node)
if dest_node is not None and dest_node._is_interpolation():
target_node = dest_node._maybe_dereference_node()
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
is_optional, et = _resolve_optional(dest._metadata.element_type)
if dest_node is None and is_structured_config(et) and not missing_src_value:
# merging into a new node. Use element_type as a base
dest[key] = DictConfig(
et, parent=dest, ref_type=et, is_optional=is_optional
)
dest_node = dest._get_node(key)
if dest_node is not None:
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest_node._merge_with(src_value)
elif not missing_src_value:
dest.__setitem__(key, src_value)
else:
if isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
assert isinstance(src_node, ValueNode)
# Compare to literal missing, ignoring interpolation
src_node_missing = _is_missing_literal(src_value)
try:
if isinstance(dest_node, AnyNode):
if src_node_missing:
node = copy.copy(src_node)
# if src node is missing, use the value from the dest_node,
# but validate it against the type of the src node before assigment
node._set_value(dest_node._value())
else:
node = src_node
dest.__setitem__(key, node)
else:
if not src_node_missing:
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
# verified to be compatible above in _validate_merge
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
# explicit flags on the source config are replacing the flag values in the destination
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
@staticmethod
def _list_merge(dest: Any, src: Any) -> None:
from omegaconf import DictConfig, ListConfig, OmegaConf
assert isinstance(dest, ListConfig)
assert isinstance(src, ListConfig)
if src._is_none():
dest._set_value(None)
elif src._is_missing():
# do not change dest if src is MISSING.
if dest._metadata.element_type is Any:
dest._metadata.element_type = src._metadata.element_type
elif src._is_interpolation():
dest._set_value(src._value())
else:
temp_target = ListConfig(content=[], parent=dest._get_parent())
temp_target.__dict__["_metadata"] = copy.deepcopy(
dest.__dict__["_metadata"]
)
is_optional, et = _resolve_optional(dest._metadata.element_type)
if is_structured_config(et):
prototype = DictConfig(et, ref_type=et, is_optional=is_optional)
for item in src._iter_ex(resolve=False):
if isinstance(item, DictConfig):
item = OmegaConf.merge(prototype, item)
temp_target.append(item)
else:
for item in src._iter_ex(resolve=False):
temp_target.append(item)
dest.__dict__["_content"] = temp_target.__dict__["_content"]
# explicit flags on the source config are replacing the flag values in the destination
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
def merge_with(
self,
*others: Union[
"BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any
],
) -> None:
try:
self._merge_with(*others)
except Exception as e:
self._format_and_raise(key=None, value=None, cause=e)
def _merge_with(
self,
*others: Union[
"BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any
],
) -> None:
from .dictconfig import DictConfig
from .listconfig import ListConfig
"""merge a list of other Config objects into this one, overriding as needed"""
for other in others:
if other is None:
raise ValueError("Cannot merge with a None config")
my_flags = {}
if self._get_flag("allow_objects") is True:
my_flags = {"allow_objects": True}
other = _ensure_container(other, flags=my_flags)
if isinstance(self, DictConfig) and isinstance(other, DictConfig):
BaseContainer._map_merge(self, other)
elif isinstance(self, ListConfig) and isinstance(other, ListConfig):
BaseContainer._list_merge(self, other)
else:
raise TypeError("Cannot merge DictConfig with ListConfig")
# recursively correct the parent hierarchy after the merge
self._re_parent()
# noinspection PyProtectedMember
def _set_item_impl(self, key: Any, value: Any) -> None:
"""
Changes the value of the node key with the desired value. If the node key doesn't
exist it creates a new one.
"""
from .nodes import AnyNode, ValueNode
if isinstance(value, Node):
do_deepcopy = not self._get_flag("no_deepcopy_set_nodes")
if not do_deepcopy and isinstance(value, Container):
# if value is from the same config, perform a deepcopy no matter what.
if self._get_root() is value._get_root():
do_deepcopy = True
if do_deepcopy:
value = copy.deepcopy(value)
value._set_parent(None)
try:
old = value._key()
value._set_key(key)
self._validate_set(key, value)
finally:
value._set_key(old)
else:
self._validate_set(key, value)
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot change read-only config container")
input_is_node = isinstance(value, Node)
target_node_ref = self._get_node(key)
input_is_typed_vnode = isinstance(value, ValueNode) and not isinstance(
value, AnyNode
)
target_is_vnode = isinstance(target_node_ref, ValueNode)
def get_target_type_hint(val: Any) -> Any:
if not is_structured_config(val):
type_hint = self._metadata.element_type
else:
target = self._get_node(key)
if target is None:
type_hint = self._metadata.element_type
else:
assert isinstance(target, Node)
type_hint = target._metadata.type_hint
return type_hint
def assign(value_key: Any, val: Node) -> None:
assert val._get_parent() is None
v = val
v._set_parent(self)
v._set_key(value_key)
_deep_update_type_hint(node=v, type_hint=self._metadata.element_type)
self.__dict__["_content"][value_key] = v
if input_is_typed_vnode:
assign(key, value)
else:
# input is not a ValueNode, can be primitive or container
special_value = _is_special(value)
type_hint = get_target_type_hint(value)
# We use the `Node._set_value` method if the target node exists
# 1. the value is special (i.e. MISSING or None or interpolation), or
# 2. the target is a Container and has an explicit ref_type, or
# 3. the target is a typed ValueNode, or
# 4. the target is an AnyNode and the input is a primitive type.
should_set_value = target_node_ref is not None and (
special_value
or (
isinstance(target_node_ref, Container)
and target_node_ref._has_ref_type()
)
or (target_is_vnode and not isinstance(target_node_ref, AnyNode))
or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))
)
if should_set_value:
if special_value and isinstance(value, Node):
value = value._value()
self.__dict__["_content"][key]._set_value(value)
elif input_is_node:
_, ref_type = _resolve_optional(type_hint)
if special_value and (
is_container_annotation(ref_type) or is_structured_config(ref_type)
):
self._wrap_value_and_set(key, value._value(), type_hint)
else:
assign(key, value)
else:
self._wrap_value_and_set(key, value, type_hint)
def _wrap_value_and_set(self, key: Any, val: Any, type_hint: Any) -> None:
from omegaconf.omegaconf import _maybe_wrap
is_optional, ref_type = _resolve_optional(type_hint)
wrapped = _maybe_wrap(
ref_type=ref_type,
key=key,
value=val,
is_optional=is_optional,
parent=self,
)
self.__dict__["_content"][key] = wrapped
@staticmethod
def _item_eq(
c1: Container,
k1: Union[DictKeyType, int],
c2: Container,
k2: Union[DictKeyType, int],
) -> bool:
v1 = c1._get_node(k1)
v2 = c2._get_node(k2)
assert v1 is not None and v2 is not None
assert isinstance(v1, Node)
assert isinstance(v2, Node)
if v1._is_none() and v2._is_none():
return True
if v1._is_missing() and v2._is_missing():
return True
v1_inter = v1._is_interpolation()
v2_inter = v2._is_interpolation()
dv1: Optional[Node] = v1
dv2: Optional[Node] = v2
if v1_inter:
dv1 = v1._maybe_dereference_node()
if v2_inter:
dv2 = v2._maybe_dereference_node()
if v1_inter and v2_inter:
if dv1 is None or dv2 is None:
return v1 == v2
else:
# both are not none, if both are containers compare as container
if isinstance(dv1, Container) and isinstance(dv2, Container):
if dv1 != dv2:
return False
dv1 = _get_value(dv1)
dv2 = _get_value(dv2)
return dv1 == dv2
elif not v1_inter and not v2_inter:
v1 = _get_value(v1)
v2 = _get_value(v2)
ret = v1 == v2
assert isinstance(ret, bool)
return ret
else:
dv1 = _get_value(dv1)
dv2 = _get_value(dv2)
ret = dv1 == dv2
assert isinstance(ret, bool)
return ret
def _is_optional(self) -> bool:
return self.__dict__["_metadata"].optional is True
def _is_interpolation(self) -> bool:
return _is_interpolation(self.__dict__["_content"])
@abstractmethod
def _validate_get(self, key: Any, value: Any = None) -> None:
...
@abstractmethod
def _validate_set(self, key: Any, value: Any) -> None:
...
def _value(self) -> Any:
return self.__dict__["_content"]
def _get_full_key(self, key: Union[DictKeyType, int, slice, None]) -> str:
from .listconfig import ListConfig
from .omegaconf import _select_one
if not isinstance(key, (int, str, Enum, float, bool, slice, bytes, type(None))):
return ""
def _slice_to_str(x: slice) -> str:
if x.step is not None:
return f"{x.start}:{x.stop}:{x.step}"
else:
return f"{x.start}:{x.stop}"
def prepand(full_key: str, parent_type: Any, cur_type: Any, key: Any) -> str:
if isinstance(key, slice):
key = _slice_to_str(key)
elif isinstance(key, Enum):
key = key.name
elif isinstance(key, (int, float, bool)):
key = str(key)
if issubclass(parent_type, ListConfig):
if full_key != "":
if issubclass(cur_type, ListConfig):
full_key = f"[{key}]{full_key}"
else:
full_key = f"[{key}].{full_key}"
else:
full_key = f"[{key}]"
else:
if full_key == "":
full_key = key
else:
if issubclass(cur_type, ListConfig):
full_key = f"{key}{full_key}"
else:
full_key = f"{key}.{full_key}"
return full_key
if key is not None and key != "":
assert isinstance(self, Container)
cur, _ = _select_one(
c=self, key=str(key), throw_on_missing=False, throw_on_type_error=False
)
if cur is None:
cur = self
full_key = prepand("", type(cur), None, key)
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
else:
full_key = prepand("", type(cur._get_parent()), type(cur), cur._key())
else:
cur = self
if cur._key() is None:
return ""
full_key = self._key()
assert cur is not None
memo = {id(cur)} # remember already visited nodes so as to detect cycles
while cur._get_parent() is not None:
cur = cur._get_parent()
if id(cur) in memo:
raise ConfigCycleDetectedException(
f"Cycle when iterating over parents of key `{key!s}`"
)
memo.add(id(cur))
assert cur is not None
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
return full_key
def _create_structured_with_missing_fields(
ref_type: type, object_type: Optional[type] = None
) -> "DictConfig":
from . import MISSING, DictConfig
cfg_data = get_structured_config_data(ref_type)
for v in cfg_data.values():
v._set_value(MISSING)
cfg = DictConfig(cfg_data)
cfg._metadata.optional, cfg._metadata.ref_type = _resolve_optional(ref_type)
cfg._metadata.object_type = object_type
return cfg
def _update_types(node: Node, ref_type: Any, object_type: Optional[type]) -> None:
if object_type is not None and not is_primitive_dict(object_type):
node._metadata.object_type = object_type
if node._metadata.ref_type is Any:
_deep_update_type_hint(node, ref_type)
def _deep_update_type_hint(node: Node, type_hint: Any) -> None:
"""Ensure node is compatible with type_hint, mutating if necessary."""
from omegaconf import DictConfig, ListConfig
from ._utils import get_dict_key_value_types, get_list_element_type
if type_hint is Any:
return
_shallow_validate_type_hint(node, type_hint)
new_is_optional, new_ref_type = _resolve_optional(type_hint)
node._metadata.ref_type = new_ref_type
node._metadata.optional = new_is_optional
if is_list_annotation(new_ref_type) and isinstance(node, ListConfig):
new_element_type = get_list_element_type(new_ref_type)
node._metadata.element_type = new_element_type
if not _is_special(node):
for i in range(len(node)):
_deep_update_subnode(node, i, new_element_type)
if is_dict_annotation(new_ref_type) and isinstance(node, DictConfig):
new_key_type, new_element_type = get_dict_key_value_types(new_ref_type)
node._metadata.key_type = new_key_type
node._metadata.element_type = new_element_type
if not _is_special(node):
for key in node:
if new_key_type is not Any and not isinstance(key, new_key_type):
raise KeyValidationError(
f"Key {key!r} ({type(key).__name__}) is incompatible"
+ f" with key type hint '{new_key_type.__name__}'"
)
_deep_update_subnode(node, key, new_element_type)
def _deep_update_subnode(node: BaseContainer, key: Any, value_type_hint: Any) -> None:
"""Get node[key] and ensure it is compatible with value_type_hint, mutating if necessary."""
subnode = node._get_node(key)
assert isinstance(subnode, Node)
if _is_special(subnode):
# Ensure special values are wrapped in a Node subclass that
# is compatible with the type hint.
node._wrap_value_and_set(key, subnode._value(), value_type_hint)
subnode = node._get_node(key)
assert isinstance(subnode, Node)
_deep_update_type_hint(subnode, value_type_hint)
def _shallow_validate_type_hint(node: Node, type_hint: Any) -> None:
"""Error if node's type, content and metadata are not compatible with type_hint."""
from omegaconf import DictConfig, ListConfig, ValueNode
is_optional, ref_type = _resolve_optional(type_hint)
vk = get_value_kind(node)
if node._is_none():
if not is_optional:
value = _get_value(node)
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type.__name__}'"
)
return
elif vk in (ValueKind.MANDATORY_MISSING, ValueKind.INTERPOLATION):
return
elif vk == ValueKind.VALUE:
if is_primitive_type(ref_type) and isinstance(node, ValueNode):
value = node._value()
if not isinstance(value, ref_type):
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type.__name__}'"
)
elif is_structured_config(ref_type) and isinstance(node, DictConfig):
return
elif is_dict_annotation(ref_type) and isinstance(node, DictConfig):
return
elif is_list_annotation(ref_type) and isinstance(node, ListConfig):
return
else:
if isinstance(node, ValueNode):
value = node._value()
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type}'"
)
else:
raise ValidationError(
f"'{type(node).__name__}' is incompatible"
+ f" with type hint '{ref_type}'"
)
else:
assert False
| 36.87931
| 103
| 0.569331
|
import copy
import sys
from abc import ABC, abstractmethod
from enum import Enum
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Tuple, Union
import yaml
from ._utils import (
_DEFAULT_MARKER_,
ValueKind,
_ensure_container,
_get_value,
_is_interpolation,
_is_missing_literal,
_is_missing_value,
_is_none,
_is_special,
_is_union,
_resolve_optional,
get_ref_type,
get_structured_config_data,
get_value_kind,
get_yaml_loader,
is_container_annotation,
is_dict_annotation,
is_list_annotation,
is_primitive_dict,
is_primitive_type,
is_structured_config,
is_tuple_annotation,
)
from .base import Container, ContainerMetadata, DictKeyType, Node, SCMode
from .errors import (
ConfigCycleDetectedException,
ConfigTypeError,
InterpolationResolutionError,
KeyValidationError,
MissingMandatoryValue,
OmegaConfBaseException,
ReadonlyConfigError,
ValidationError,
)
if TYPE_CHECKING:
from .dictconfig import DictConfig
class BaseContainer(Container, ABC):
_resolvers: ClassVar[Dict[str, Any]] = {}
def __init__(self, parent: Optional["Container"], metadata: ContainerMetadata):
if not (parent is None or isinstance(parent, Container)):
raise ConfigTypeError("Parent type is not omegaconf.Container")
super().__init__(parent=parent, metadata=metadata)
self.__dict__["_content"] = None
def _resolve_with_default(
self,
key: Union[DictKeyType, int],
value: Node,
default_value: Any = _DEFAULT_MARKER_,
) -> Any:
if _is_missing_value(value):
if default_value is not _DEFAULT_MARKER_:
return default_value
raise MissingMandatoryValue("Missing mandatory value: $FULL_KEY")
resolved_node = self._maybe_resolve_interpolation(
parent=self,
key=key,
value=value,
throw_on_resolution_failure=True,
)
return _get_value(resolved_node)
def __str__(self) -> str:
return self.__repr__()
def __repr__(self) -> str:
if self.__dict__["_content"] is None:
return "None"
elif self._is_interpolation() or self._is_missing():
v = self.__dict__["_content"]
return f"'{v}'"
else:
return self.__dict__["_content"].__repr__()
def __getstate__(self) -> Dict[str, Any]:
dict_copy = copy.copy(self.__dict__)
dict_copy.pop("_flags_cache", None)
dict_copy["_metadata"] = copy.copy(dict_copy["_metadata"])
ref_type = self._metadata.ref_type
if is_container_annotation(ref_type):
if is_dict_annotation(ref_type):
dict_copy["_metadata"].ref_type = Dict
elif is_list_annotation(ref_type):
dict_copy["_metadata"].ref_type = List
else:
assert False
if sys.version_info < (3, 7):
element_type = self._metadata.element_type
if _is_union(element_type):
raise OmegaConfBaseException(
"Serializing structured configs with `Union` element type requires python >= 3.7"
)
return dict_copy
def __setstate__(self, d: Dict[str, Any]) -> None:
from omegaconf import DictConfig
from omegaconf._utils import is_generic_dict, is_generic_list
if isinstance(self, DictConfig):
key_type = d["_metadata"].key_type
if key_type is None:
key_type = Any
d["_metadata"].key_type = key_type
element_type = d["_metadata"].element_type
if element_type is None:
element_type = Any
d["_metadata"].element_type = element_type
ref_type = d["_metadata"].ref_type
if is_container_annotation(ref_type):
if is_generic_dict(ref_type):
d["_metadata"].ref_type = Dict[key_type, element_type]
elif is_generic_list(ref_type):
d["_metadata"].ref_type = List[element_type]
else:
assert False
d["_flags_cache"] = None
self.__dict__.update(d)
@abstractmethod
def __delitem__(self, key: Any) -> None:
...
def __len__(self) -> int:
if self._is_none() or self._is_missing() or self._is_interpolation():
return 0
content = self.__dict__["_content"]
return len(content)
def merge_with_cli(self) -> None:
args_list = sys.argv[1:]
self.merge_with_dotlist(args_list)
def merge_with_dotlist(self, dotlist: List[str]) -> None:
from omegaconf import OmegaConf
def fail() -> None:
raise ValueError("Input list must be a list or a tuple of strings")
if not isinstance(dotlist, (list, tuple)):
fail()
for arg in dotlist:
if not isinstance(arg, str):
fail()
idx = arg.find("=")
if idx == -1:
key = arg
value = None
else:
key = arg[0:idx]
value = arg[idx + 1 :]
value = yaml.load(value, Loader=get_yaml_loader())
OmegaConf.update(self, key, value)
def is_empty(self) -> bool:
return len(self.__dict__["_content"]) == 0
@staticmethod
def _to_content(
conf: Container,
resolve: bool,
throw_on_missing: bool,
enum_to_str: bool = False,
structured_config_mode: SCMode = SCMode.DICT,
) -> Union[None, Any, str, Dict[DictKeyType, Any], List[Any]]:
from omegaconf import MISSING, DictConfig, ListConfig
def convert(val: Node) -> Any:
value = val._value()
if enum_to_str and isinstance(value, Enum):
value = f"{value.name}"
return value
def get_node_value(key: Union[DictKeyType, int]) -> Any:
try:
node = conf._get_node(key, throw_on_missing_value=throw_on_missing)
except MissingMandatoryValue as e:
conf._format_and_raise(key=key, value=None, cause=e)
assert isinstance(node, Node)
if resolve:
try:
node = node._dereference_node()
except InterpolationResolutionError as e:
conf._format_and_raise(key=key, value=None, cause=e)
if isinstance(node, Container):
value = BaseContainer._to_content(
node,
resolve=resolve,
throw_on_missing=throw_on_missing,
enum_to_str=enum_to_str,
structured_config_mode=structured_config_mode,
)
else:
value = convert(node)
return value
if conf._is_none():
return None
elif conf._is_missing():
if throw_on_missing:
conf._format_and_raise(
key=None,
value=None,
cause=MissingMandatoryValue("Missing mandatory value"),
)
else:
return MISSING
elif not resolve and conf._is_interpolation():
inter = conf._value()
assert isinstance(inter, str)
return inter
if resolve:
_conf = conf._dereference_node()
assert isinstance(_conf, Container)
conf = _conf
if isinstance(conf, DictConfig):
if (
conf._metadata.object_type not in (dict, None)
and structured_config_mode == SCMode.DICT_CONFIG
):
return conf
if structured_config_mode == SCMode.INSTANTIATE and is_structured_config(
conf._metadata.object_type
):
return conf._to_object()
retdict: Dict[DictKeyType, Any] = {}
for key in conf.keys():
value = get_node_value(key)
if enum_to_str and isinstance(key, Enum):
key = f"{key.name}"
retdict[key] = value
return retdict
elif isinstance(conf, ListConfig):
retlist: List[Any] = []
for index in range(len(conf)):
item = get_node_value(index)
retlist.append(item)
return retlist
assert False
@staticmethod
def _map_merge(dest: "BaseContainer", src: "BaseContainer") -> None:
from omegaconf import AnyNode, DictConfig, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
src_ref_type = get_ref_type(src)
assert src_ref_type is not None
if src._is_none() or src._is_interpolation():
dest._set_value(src._value())
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
return
dest._validate_merge(value=src)
def expand(node: Container) -> None:
rt = node._metadata.ref_type
val: Any
if rt is not Any:
if is_dict_annotation(rt):
val = {}
elif is_list_annotation(rt) or is_tuple_annotation(rt):
val = []
else:
val = rt
elif isinstance(node, DictConfig):
val = {}
else:
assert False
node._set_value(val)
if (
src._is_missing()
and not dest._is_missing()
and is_structured_config(src_ref_type)
):
src = _create_structured_with_missing_fields(
ref_type=src_ref_type, object_type=src_type
)
if (dest._is_interpolation() or dest._is_missing()) and not src._is_missing():
expand(dest)
src_items = src.items_ex(resolve=False) if not src._is_missing() else []
for key, src_value in src_items:
src_node = src._get_node(key, validate_access=False)
dest_node = dest._get_node(key, validate_access=False)
assert src_node is None or isinstance(src_node, Node)
assert dest_node is None or isinstance(dest_node, Node)
if isinstance(dest_node, DictConfig):
dest_node._validate_merge(value=src_node)
missing_src_value = _is_missing_value(src_value)
if (
isinstance(dest_node, Container)
and dest_node._is_none()
and not missing_src_value
and not _is_none(src_value, resolve=True)
):
expand(dest_node)
if dest_node is not None and dest_node._is_interpolation():
target_node = dest_node._maybe_dereference_node()
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
is_optional, et = _resolve_optional(dest._metadata.element_type)
if dest_node is None and is_structured_config(et) and not missing_src_value:
dest[key] = DictConfig(
et, parent=dest, ref_type=et, is_optional=is_optional
)
dest_node = dest._get_node(key)
if dest_node is not None:
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest_node._merge_with(src_value)
elif not missing_src_value:
dest.__setitem__(key, src_value)
else:
if isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
assert isinstance(src_node, ValueNode)
src_node_missing = _is_missing_literal(src_value)
try:
if isinstance(dest_node, AnyNode):
if src_node_missing:
node = copy.copy(src_node)
node._set_value(dest_node._value())
else:
node = src_node
dest.__setitem__(key, node)
else:
if not src_node_missing:
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
@staticmethod
def _list_merge(dest: Any, src: Any) -> None:
from omegaconf import DictConfig, ListConfig, OmegaConf
assert isinstance(dest, ListConfig)
assert isinstance(src, ListConfig)
if src._is_none():
dest._set_value(None)
elif src._is_missing():
if dest._metadata.element_type is Any:
dest._metadata.element_type = src._metadata.element_type
elif src._is_interpolation():
dest._set_value(src._value())
else:
temp_target = ListConfig(content=[], parent=dest._get_parent())
temp_target.__dict__["_metadata"] = copy.deepcopy(
dest.__dict__["_metadata"]
)
is_optional, et = _resolve_optional(dest._metadata.element_type)
if is_structured_config(et):
prototype = DictConfig(et, ref_type=et, is_optional=is_optional)
for item in src._iter_ex(resolve=False):
if isinstance(item, DictConfig):
item = OmegaConf.merge(prototype, item)
temp_target.append(item)
else:
for item in src._iter_ex(resolve=False):
temp_target.append(item)
dest.__dict__["_content"] = temp_target.__dict__["_content"]
flags = src._metadata.flags
assert flags is not None
for flag, value in flags.items():
if value is not None:
dest._set_flag(flag, value)
def merge_with(
self,
*others: Union[
"BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any
],
) -> None:
try:
self._merge_with(*others)
except Exception as e:
self._format_and_raise(key=None, value=None, cause=e)
def _merge_with(
self,
*others: Union[
"BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any
],
) -> None:
from .dictconfig import DictConfig
from .listconfig import ListConfig
for other in others:
if other is None:
raise ValueError("Cannot merge with a None config")
my_flags = {}
if self._get_flag("allow_objects") is True:
my_flags = {"allow_objects": True}
other = _ensure_container(other, flags=my_flags)
if isinstance(self, DictConfig) and isinstance(other, DictConfig):
BaseContainer._map_merge(self, other)
elif isinstance(self, ListConfig) and isinstance(other, ListConfig):
BaseContainer._list_merge(self, other)
else:
raise TypeError("Cannot merge DictConfig with ListConfig")
self._re_parent()
def _set_item_impl(self, key: Any, value: Any) -> None:
from .nodes import AnyNode, ValueNode
if isinstance(value, Node):
do_deepcopy = not self._get_flag("no_deepcopy_set_nodes")
if not do_deepcopy and isinstance(value, Container):
if self._get_root() is value._get_root():
do_deepcopy = True
if do_deepcopy:
value = copy.deepcopy(value)
value._set_parent(None)
try:
old = value._key()
value._set_key(key)
self._validate_set(key, value)
finally:
value._set_key(old)
else:
self._validate_set(key, value)
if self._get_flag("readonly"):
raise ReadonlyConfigError("Cannot change read-only config container")
input_is_node = isinstance(value, Node)
target_node_ref = self._get_node(key)
input_is_typed_vnode = isinstance(value, ValueNode) and not isinstance(
value, AnyNode
)
target_is_vnode = isinstance(target_node_ref, ValueNode)
def get_target_type_hint(val: Any) -> Any:
if not is_structured_config(val):
type_hint = self._metadata.element_type
else:
target = self._get_node(key)
if target is None:
type_hint = self._metadata.element_type
else:
assert isinstance(target, Node)
type_hint = target._metadata.type_hint
return type_hint
def assign(value_key: Any, val: Node) -> None:
assert val._get_parent() is None
v = val
v._set_parent(self)
v._set_key(value_key)
_deep_update_type_hint(node=v, type_hint=self._metadata.element_type)
self.__dict__["_content"][value_key] = v
if input_is_typed_vnode:
assign(key, value)
else:
special_value = _is_special(value)
type_hint = get_target_type_hint(value)
should_set_value = target_node_ref is not None and (
special_value
or (
isinstance(target_node_ref, Container)
and target_node_ref._has_ref_type()
)
or (target_is_vnode and not isinstance(target_node_ref, AnyNode))
or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))
)
if should_set_value:
if special_value and isinstance(value, Node):
value = value._value()
self.__dict__["_content"][key]._set_value(value)
elif input_is_node:
_, ref_type = _resolve_optional(type_hint)
if special_value and (
is_container_annotation(ref_type) or is_structured_config(ref_type)
):
self._wrap_value_and_set(key, value._value(), type_hint)
else:
assign(key, value)
else:
self._wrap_value_and_set(key, value, type_hint)
def _wrap_value_and_set(self, key: Any, val: Any, type_hint: Any) -> None:
from omegaconf.omegaconf import _maybe_wrap
is_optional, ref_type = _resolve_optional(type_hint)
wrapped = _maybe_wrap(
ref_type=ref_type,
key=key,
value=val,
is_optional=is_optional,
parent=self,
)
self.__dict__["_content"][key] = wrapped
@staticmethod
def _item_eq(
c1: Container,
k1: Union[DictKeyType, int],
c2: Container,
k2: Union[DictKeyType, int],
) -> bool:
v1 = c1._get_node(k1)
v2 = c2._get_node(k2)
assert v1 is not None and v2 is not None
assert isinstance(v1, Node)
assert isinstance(v2, Node)
if v1._is_none() and v2._is_none():
return True
if v1._is_missing() and v2._is_missing():
return True
v1_inter = v1._is_interpolation()
v2_inter = v2._is_interpolation()
dv1: Optional[Node] = v1
dv2: Optional[Node] = v2
if v1_inter:
dv1 = v1._maybe_dereference_node()
if v2_inter:
dv2 = v2._maybe_dereference_node()
if v1_inter and v2_inter:
if dv1 is None or dv2 is None:
return v1 == v2
else:
if isinstance(dv1, Container) and isinstance(dv2, Container):
if dv1 != dv2:
return False
dv1 = _get_value(dv1)
dv2 = _get_value(dv2)
return dv1 == dv2
elif not v1_inter and not v2_inter:
v1 = _get_value(v1)
v2 = _get_value(v2)
ret = v1 == v2
assert isinstance(ret, bool)
return ret
else:
dv1 = _get_value(dv1)
dv2 = _get_value(dv2)
ret = dv1 == dv2
assert isinstance(ret, bool)
return ret
def _is_optional(self) -> bool:
return self.__dict__["_metadata"].optional is True
def _is_interpolation(self) -> bool:
return _is_interpolation(self.__dict__["_content"])
@abstractmethod
def _validate_get(self, key: Any, value: Any = None) -> None:
...
@abstractmethod
def _validate_set(self, key: Any, value: Any) -> None:
...
def _value(self) -> Any:
return self.__dict__["_content"]
def _get_full_key(self, key: Union[DictKeyType, int, slice, None]) -> str:
from .listconfig import ListConfig
from .omegaconf import _select_one
if not isinstance(key, (int, str, Enum, float, bool, slice, bytes, type(None))):
return ""
def _slice_to_str(x: slice) -> str:
if x.step is not None:
return f"{x.start}:{x.stop}:{x.step}"
else:
return f"{x.start}:{x.stop}"
def prepand(full_key: str, parent_type: Any, cur_type: Any, key: Any) -> str:
if isinstance(key, slice):
key = _slice_to_str(key)
elif isinstance(key, Enum):
key = key.name
elif isinstance(key, (int, float, bool)):
key = str(key)
if issubclass(parent_type, ListConfig):
if full_key != "":
if issubclass(cur_type, ListConfig):
full_key = f"[{key}]{full_key}"
else:
full_key = f"[{key}].{full_key}"
else:
full_key = f"[{key}]"
else:
if full_key == "":
full_key = key
else:
if issubclass(cur_type, ListConfig):
full_key = f"{key}{full_key}"
else:
full_key = f"{key}.{full_key}"
return full_key
if key is not None and key != "":
assert isinstance(self, Container)
cur, _ = _select_one(
c=self, key=str(key), throw_on_missing=False, throw_on_type_error=False
)
if cur is None:
cur = self
full_key = prepand("", type(cur), None, key)
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
else:
full_key = prepand("", type(cur._get_parent()), type(cur), cur._key())
else:
cur = self
if cur._key() is None:
return ""
full_key = self._key()
assert cur is not None
memo = {id(cur)}
while cur._get_parent() is not None:
cur = cur._get_parent()
if id(cur) in memo:
raise ConfigCycleDetectedException(
f"Cycle when iterating over parents of key `{key!s}`"
)
memo.add(id(cur))
assert cur is not None
if cur._key() is not None:
full_key = prepand(
full_key, type(cur._get_parent()), type(cur), cur._key()
)
return full_key
def _create_structured_with_missing_fields(
ref_type: type, object_type: Optional[type] = None
) -> "DictConfig":
from . import MISSING, DictConfig
cfg_data = get_structured_config_data(ref_type)
for v in cfg_data.values():
v._set_value(MISSING)
cfg = DictConfig(cfg_data)
cfg._metadata.optional, cfg._metadata.ref_type = _resolve_optional(ref_type)
cfg._metadata.object_type = object_type
return cfg
def _update_types(node: Node, ref_type: Any, object_type: Optional[type]) -> None:
if object_type is not None and not is_primitive_dict(object_type):
node._metadata.object_type = object_type
if node._metadata.ref_type is Any:
_deep_update_type_hint(node, ref_type)
def _deep_update_type_hint(node: Node, type_hint: Any) -> None:
from omegaconf import DictConfig, ListConfig
from ._utils import get_dict_key_value_types, get_list_element_type
if type_hint is Any:
return
_shallow_validate_type_hint(node, type_hint)
new_is_optional, new_ref_type = _resolve_optional(type_hint)
node._metadata.ref_type = new_ref_type
node._metadata.optional = new_is_optional
if is_list_annotation(new_ref_type) and isinstance(node, ListConfig):
new_element_type = get_list_element_type(new_ref_type)
node._metadata.element_type = new_element_type
if not _is_special(node):
for i in range(len(node)):
_deep_update_subnode(node, i, new_element_type)
if is_dict_annotation(new_ref_type) and isinstance(node, DictConfig):
new_key_type, new_element_type = get_dict_key_value_types(new_ref_type)
node._metadata.key_type = new_key_type
node._metadata.element_type = new_element_type
if not _is_special(node):
for key in node:
if new_key_type is not Any and not isinstance(key, new_key_type):
raise KeyValidationError(
f"Key {key!r} ({type(key).__name__}) is incompatible"
+ f" with key type hint '{new_key_type.__name__}'"
)
_deep_update_subnode(node, key, new_element_type)
def _deep_update_subnode(node: BaseContainer, key: Any, value_type_hint: Any) -> None:
subnode = node._get_node(key)
assert isinstance(subnode, Node)
if _is_special(subnode):
node._wrap_value_and_set(key, subnode._value(), value_type_hint)
subnode = node._get_node(key)
assert isinstance(subnode, Node)
_deep_update_type_hint(subnode, value_type_hint)
def _shallow_validate_type_hint(node: Node, type_hint: Any) -> None:
from omegaconf import DictConfig, ListConfig, ValueNode
is_optional, ref_type = _resolve_optional(type_hint)
vk = get_value_kind(node)
if node._is_none():
if not is_optional:
value = _get_value(node)
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type.__name__}'"
)
return
elif vk in (ValueKind.MANDATORY_MISSING, ValueKind.INTERPOLATION):
return
elif vk == ValueKind.VALUE:
if is_primitive_type(ref_type) and isinstance(node, ValueNode):
value = node._value()
if not isinstance(value, ref_type):
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type.__name__}'"
)
elif is_structured_config(ref_type) and isinstance(node, DictConfig):
return
elif is_dict_annotation(ref_type) and isinstance(node, DictConfig):
return
elif is_list_annotation(ref_type) and isinstance(node, ListConfig):
return
else:
if isinstance(node, ValueNode):
value = node._value()
raise ValidationError(
f"Value {value!r} ({type(value).__name__})"
+ f" is incompatible with type hint '{ref_type}'"
)
else:
raise ValidationError(
f"'{type(node).__name__}' is incompatible"
+ f" with type hint '{ref_type}'"
)
else:
assert False
| true
| true
|
790337b5ebb41712126a25e1814cf7d7972e199d
| 4,665
|
py
|
Python
|
tools/run_clang_format.py
|
markcutler/autopilot
|
bc55a52651f711843e8c234114e7b9f065c01bc9
|
[
"MIT"
] | null | null | null |
tools/run_clang_format.py
|
markcutler/autopilot
|
bc55a52651f711843e8c234114e7b9f065c01bc9
|
[
"MIT"
] | null | null | null |
tools/run_clang_format.py
|
markcutler/autopilot
|
bc55a52651f711843e8c234114e7b9f065c01bc9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import click
import os
import tempfile
import filecmp
import shutil
import difflib
import sys
import git
import shell_utils
SOURCE_EXTENSIONS = [".cpp", ".c", ".cxx", ".cc", ".h", ".hxx", ".hpp"]
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Symbols:
PASS = u'\u2714'
FAIL = u'\u2718'
# Find all the source files we want to check
def find_files_to_check(modified_files, repo_dir):
if modified_files:
# Check which files have been added or modified by git
changed_files = shell_utils.run_shell_command('git diff-index --diff-filter=ACMR --name-only HEAD')
changed_files = "{}".format(changed_files.decode('utf-8')).split()
sources_to_check = [os.path.join(repo_dir, f) for f in changed_files if
f.lower().endswith(tuple(SOURCE_EXTENSIONS))]
else:
# Recursively walk through the repo and find all the files that meet the extensions criteria
sources_to_check = [os.path.join(d, f)
for d, dirs, files in os.walk(repo_dir)
for f in files if f.lower().endswith(tuple(SOURCE_EXTENSIONS))]
return sources_to_check
# Given a list of files, run clang-format on them. Optionally fix the files in place if desired
def check_files(files, fix_in_place, verbose):
num_failed_files = 0
for file in files:
# format the file with clang-format and save the output to a temporary file
output = shell_utils.run_shell_command("clang-format -style=file -fallback-style=none " + file)
formatted_file = tempfile.NamedTemporaryFile()
formatted_file.write(output)
formatted_file.seek(0)
# check if the formatted file is different from the original
file_changed = not filecmp.cmp(formatted_file.name, file)
# Only need to handle those files that were changed by clang-format. Files that weren't changed are good to go.
if file_changed:
num_failed_files += 1
print(Colors.RED + Symbols.FAIL + Colors.END + " " + str(file))
if verbose:
# get and display the diff between the original and formatted files
original_file = open(file, 'r')
new_file = open(formatted_file.name, 'r')
diff = difflib.unified_diff(original_file.readlines(), new_file.readlines())
print(Colors.CYAN)
for line in diff:
sys.stdout.write(line)
print(Colors.END)
if fix_in_place:
# if we are fixing in place, just replace the original file with the changed contents
print(Colors.YELLOW + "WARNING: Fixing in place. Original file will be changed." + Colors.END)
shutil.move(formatted_file.name, file)
else:
print(Colors.GREEN + Symbols.PASS + Colors.END + " " + str(file))
# clean up
try:
formatted_file.close()
except FileNotFoundError as _:
# Do nothing. We must have moved the file above
pass
return num_failed_files
@click.command()
@click.option('-f', '--fix-in-place', default=False, is_flag=True, help='Fix the issues found.')
@click.option('-m', '--modified-files', default=False, is_flag=True, help='Check modified files (according to git) '
'only.')
@click.option('-v', '--verbose', default=False, is_flag=True, help="Print all the errors found.")
def main(fix_in_place, modified_files, verbose):
# change directory to the root of the git project
repo = git.Repo('.', search_parent_directories=True)
os.chdir(repo.working_tree_dir)
# Find the source files we want ot check
sources_to_check = find_files_to_check(modified_files, repo.working_tree_dir)
# Run clang-format and compare the output to the original files
num_failed_files = check_files(sources_to_check, fix_in_place, verbose)
# Return success or failure
if num_failed_files:
print(
Colors.RED + 3 * Symbols.FAIL + " " + str(num_failed_files) + " files have formatting errors." + Colors.END)
if fix_in_place:
print("The formatting errors have been automatically fixed.")
sys.exit(1)
print(Colors.GREEN + 3 * Symbols.PASS + Colors.END + " All files are properly formatted!")
sys.exit(0)
if __name__ == '__main__':
main()
| 37.02381
| 120
| 0.629582
|
import click
import os
import tempfile
import filecmp
import shutil
import difflib
import sys
import git
import shell_utils
SOURCE_EXTENSIONS = [".cpp", ".c", ".cxx", ".cc", ".h", ".hxx", ".hpp"]
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Symbols:
PASS = u'\u2714'
FAIL = u'\u2718'
def find_files_to_check(modified_files, repo_dir):
if modified_files:
changed_files = shell_utils.run_shell_command('git diff-index --diff-filter=ACMR --name-only HEAD')
changed_files = "{}".format(changed_files.decode('utf-8')).split()
sources_to_check = [os.path.join(repo_dir, f) for f in changed_files if
f.lower().endswith(tuple(SOURCE_EXTENSIONS))]
else:
sources_to_check = [os.path.join(d, f)
for d, dirs, files in os.walk(repo_dir)
for f in files if f.lower().endswith(tuple(SOURCE_EXTENSIONS))]
return sources_to_check
def check_files(files, fix_in_place, verbose):
num_failed_files = 0
for file in files:
output = shell_utils.run_shell_command("clang-format -style=file -fallback-style=none " + file)
formatted_file = tempfile.NamedTemporaryFile()
formatted_file.write(output)
formatted_file.seek(0)
file_changed = not filecmp.cmp(formatted_file.name, file)
if file_changed:
num_failed_files += 1
print(Colors.RED + Symbols.FAIL + Colors.END + " " + str(file))
if verbose:
# get and display the diff between the original and formatted files
original_file = open(file, 'r')
new_file = open(formatted_file.name, 'r')
diff = difflib.unified_diff(original_file.readlines(), new_file.readlines())
print(Colors.CYAN)
for line in diff:
sys.stdout.write(line)
print(Colors.END)
if fix_in_place:
# if we are fixing in place, just replace the original file with the changed contents
print(Colors.YELLOW + "WARNING: Fixing in place. Original file will be changed." + Colors.END)
shutil.move(formatted_file.name, file)
else:
print(Colors.GREEN + Symbols.PASS + Colors.END + " " + str(file))
# clean up
try:
formatted_file.close()
except FileNotFoundError as _:
# Do nothing. We must have moved the file above
pass
return num_failed_files
@click.command()
@click.option('-f', '--fix-in-place', default=False, is_flag=True, help='Fix the issues found.')
@click.option('-m', '--modified-files', default=False, is_flag=True, help='Check modified files (according to git) '
'only.')
@click.option('-v', '--verbose', default=False, is_flag=True, help="Print all the errors found.")
def main(fix_in_place, modified_files, verbose):
# change directory to the root of the git project
repo = git.Repo('.', search_parent_directories=True)
os.chdir(repo.working_tree_dir)
# Find the source files we want ot check
sources_to_check = find_files_to_check(modified_files, repo.working_tree_dir)
# Run clang-format and compare the output to the original files
num_failed_files = check_files(sources_to_check, fix_in_place, verbose)
# Return success or failure
if num_failed_files:
print(
Colors.RED + 3 * Symbols.FAIL + " " + str(num_failed_files) + " files have formatting errors." + Colors.END)
if fix_in_place:
print("The formatting errors have been automatically fixed.")
sys.exit(1)
print(Colors.GREEN + 3 * Symbols.PASS + Colors.END + " All files are properly formatted!")
sys.exit(0)
if __name__ == '__main__':
main()
| true
| true
|
79033800a202c366932dad5c58be20ae82d974e5
| 1,452
|
py
|
Python
|
tests/core/test_traverse.py
|
next-franciscoalgaba/python-benedict
|
81ff459304868327238c322a0a8a203d9d5d4314
|
[
"MIT"
] | 365
|
2019-05-21T05:50:30.000Z
|
2022-03-29T11:35:35.000Z
|
tests/core/test_traverse.py
|
next-franciscoalgaba/python-benedict
|
81ff459304868327238c322a0a8a203d9d5d4314
|
[
"MIT"
] | 78
|
2019-11-16T12:22:54.000Z
|
2022-03-14T12:21:30.000Z
|
tests/core/test_traverse.py
|
next-franciscoalgaba/python-benedict
|
81ff459304868327238c322a0a8a203d9d5d4314
|
[
"MIT"
] | 26
|
2019-12-16T06:34:12.000Z
|
2022-02-28T07:16:41.000Z
|
# -*- coding: utf-8 -*-
from benedict.core import clone as _clone
from benedict.core import traverse as _traverse
import unittest
class traverse_test_case(unittest.TestCase):
def test_traverse(self):
i = {
'a': {
'x': 2,
'y': 3,
'z': {
'ok': 5,
}
},
'b': {
'x': 7,
'y': 11,
'z': {
'ok': 13,
}
},
'c': {
'x': 17,
'y': 19,
'z': {
'ok': 23,
}
},
}
o = _clone(i)
with self.assertRaises(ValueError):
_traverse(o, True)
def f(parent, key, value):
if not isinstance(value, dict):
parent[key] = (value + 1)
_traverse(o, f)
r = {
'a': {
'x': 3,
'y': 4,
'z': {
'ok': 6,
}
},
'b': {
'x': 8,
'y': 12,
'z': {
'ok': 14,
}
},
'c': {
'x': 18,
'y': 20,
'z': {
'ok': 24,
}
},
}
self.assertEqual(o, r)
| 22
| 47
| 0.249311
|
from benedict.core import clone as _clone
from benedict.core import traverse as _traverse
import unittest
class traverse_test_case(unittest.TestCase):
def test_traverse(self):
i = {
'a': {
'x': 2,
'y': 3,
'z': {
'ok': 5,
}
},
'b': {
'x': 7,
'y': 11,
'z': {
'ok': 13,
}
},
'c': {
'x': 17,
'y': 19,
'z': {
'ok': 23,
}
},
}
o = _clone(i)
with self.assertRaises(ValueError):
_traverse(o, True)
def f(parent, key, value):
if not isinstance(value, dict):
parent[key] = (value + 1)
_traverse(o, f)
r = {
'a': {
'x': 3,
'y': 4,
'z': {
'ok': 6,
}
},
'b': {
'x': 8,
'y': 12,
'z': {
'ok': 14,
}
},
'c': {
'x': 18,
'y': 20,
'z': {
'ok': 24,
}
},
}
self.assertEqual(o, r)
| true
| true
|
7903380d28b911cf809d3bab7b3f2462ff4f1120
| 8,099
|
py
|
Python
|
yandex_market_language/models/shop.py
|
stefanitsky/yandex_market_language
|
e17595b556fc55e183cf366227b2739c5c6178dc
|
[
"MIT"
] | 7
|
2020-03-28T22:35:52.000Z
|
2021-09-16T10:50:10.000Z
|
yandex_market_language/models/shop.py
|
stefanitsky/yandex_market_language
|
e17595b556fc55e183cf366227b2739c5c6178dc
|
[
"MIT"
] | 192
|
2020-03-29T12:38:53.000Z
|
2021-09-01T14:12:07.000Z
|
yandex_market_language/models/shop.py
|
stefanitsky/yandex_market_language
|
e17595b556fc55e183cf366227b2739c5c6178dc
|
[
"MIT"
] | 6
|
2020-06-05T09:07:02.000Z
|
2021-11-28T14:37:58.000Z
|
from typing import List
from yandex_market_language import models, exceptions
from yandex_market_language.models import fields
from yandex_market_language.models.abstract import XMLElement, XMLSubElement
from yandex_market_language.exceptions import ValidationError
class Shop(
fields.EnableAutoDiscountField,
fields.DeliveryOptionsField,
fields.PickupOptionsField,
models.AbstractModel
):
"""
Shop model.
Docs:
https://yandex.ru/support/partnermarket/elements/shop.html
"""
__slots__ = [
'_url',
'name',
'company',
'currencies',
'categories',
'offers',
'platform',
'version',
'agency',
'email',
'_delivery_options',
'_pickup_options',
'_enable_auto_discounts',
'gifts',
'promos'
]
def __init__(
self,
name: str,
company: str,
url: str,
currencies: List["models.Currency"],
categories: List["models.Category"],
offers: List["models.offers.AbstractOffer"],
platform: str = None,
version: str = None,
agency: str = None,
email: str = None,
delivery_options: List["models.Option"] = None,
pickup_options: List["models.Option"] = None,
enable_auto_discounts=None,
gifts: List["models.Gift"] = None,
promos: List["models.Promo"] = None,
):
self.name = name
self.company = company
self.url = url
self.platform = platform
self.version = version
self.agency = agency
self.email = email
self.currencies = currencies
self.categories = categories
self.delivery_options = delivery_options
self.pickup_options = pickup_options
self.enable_auto_discounts = enable_auto_discounts
self.offers = offers
self.gifts = gifts
self.promos = promos
@property
def url(self):
return self._url
@url.setter
def url(self, value: str):
if len(value) > 512:
raise ValidationError("The maximum url length is 512 characters.")
self._url = value
def create_dict(self, **kwargs) -> dict:
return dict(
name=self.name,
company=self.company,
url=self.url,
platform=self.platform,
version=self.version,
agency=self.agency,
email=self.email,
currencies=[c.to_dict() for c in self.currencies],
categories=[c.to_dict() for c in self.categories],
delivery_options=[o.to_dict() for o in self.delivery_options],
pickup_options=[o.to_dict() for o in self.pickup_options],
enable_auto_discounts=self.enable_auto_discounts,
offers=[o.to_dict() for o in self.offers],
gifts=[g.to_dict() for g in self.gifts] if self.gifts else [],
promos=[p.to_dict() for p in self.promos] if self.promos else [],
)
def create_xml(self, **kwargs) -> XMLElement:
shop_el = XMLElement("shop")
# Add simple elements
for tag in (
"name",
"company",
"url",
"platform",
"version",
"agency",
"email",
):
value = getattr(self, tag)
if value:
el = XMLSubElement(shop_el, tag)
el.text = value
# Add currencies
currencies_el = XMLSubElement(shop_el, "currencies")
for c in self.currencies:
c.to_xml(currencies_el)
# Add categories
categories_el = XMLSubElement(shop_el, "categories")
for c in self.categories:
c.to_xml(categories_el)
# Add delivery options
if self.delivery_options:
delivery_options_el = XMLSubElement(shop_el, "delivery-options")
for o in self.delivery_options:
o.to_xml(delivery_options_el)
# Add pickup options
if self.pickup_options:
pickup_options_el = XMLSubElement(shop_el, "pickup-options")
for o in self.pickup_options:
o.to_xml(pickup_options_el)
# Add enable_auto_discounts
if self._enable_auto_discounts:
enable_auto_discounts_el = XMLSubElement(
shop_el, "enable_auto_discounts"
)
enable_auto_discounts_el.text = self._enable_auto_discounts
# Add offers
offers_el = XMLSubElement(shop_el, "offers")
for o in self.offers:
o.to_xml(offers_el)
# Add gifts
if self.gifts:
gifts_el = XMLSubElement(shop_el, "gifts")
for g in self.gifts:
g.to_xml(gifts_el)
# Add promos
if self.promos:
promos_el = XMLSubElement(shop_el, "promos")
for p in self.promos:
p.to_xml(promos_el)
return shop_el
@staticmethod
def from_xml(shop_el: XMLElement) -> "Shop":
kwargs = {}
for el in shop_el:
if el.tag == "currencies":
currencies = []
for currency_el in el:
currencies.append(models.Currency.from_xml(currency_el))
kwargs["currencies"] = currencies
elif el.tag == "categories":
categories = []
for category_el in el:
categories.append(models.Category.from_xml(category_el))
kwargs["categories"] = categories
elif el.tag == "delivery-options":
delivery_options = []
for option_el in el:
delivery_options.append(models.Option.from_xml(option_el))
kwargs["delivery_options"] = delivery_options
elif el.tag == "pickup-options":
pickup_options = []
for option_el in el:
pickup_options.append(models.Option.from_xml(option_el))
kwargs["pickup_options"] = pickup_options
elif el.tag == "offers":
offers = []
for offer_el in el:
offer_type = offer_el.attrib.get("type")
if offer_type is None:
offer = models.SimplifiedOffer.from_xml(offer_el)
elif offer_type == "vendor.model":
offer = models.ArbitraryOffer.from_xml(offer_el)
elif offer_type == "book":
offer = models.BookOffer.from_xml(offer_el)
elif offer_type == "audiobook":
offer = models.AudioBookOffer.from_xml(offer_el)
elif offer_type == "artist.title":
offer = models.MusicVideoOffer.from_xml(offer_el)
elif offer_type == "medicine":
offer = models.MedicineOffer.from_xml(offer_el)
elif offer_type == "event-ticket":
offer = models.EventTicketOffer.from_xml(offer_el)
elif offer_type == "alco":
offer = models.AlcoholOffer.from_xml(offer_el)
else:
raise exceptions.ParseError(
"Got unexpected offer type: {0}".format(offer_type)
)
offers.append(offer)
kwargs["offers"] = offers
elif el.tag == "gifts":
gifts = []
for gift_el in el:
gifts.append(models.Gift.from_xml(gift_el))
if gifts:
kwargs["gifts"] = gifts
elif el.tag == "promos":
promos = []
for promo_el in el:
promos.append(models.Promo.from_xml(promo_el))
if promos:
kwargs["promos"] = promos
else:
kwargs[el.tag] = el.text
return Shop(**kwargs)
| 34.172996
| 79
| 0.540684
|
from typing import List
from yandex_market_language import models, exceptions
from yandex_market_language.models import fields
from yandex_market_language.models.abstract import XMLElement, XMLSubElement
from yandex_market_language.exceptions import ValidationError
class Shop(
fields.EnableAutoDiscountField,
fields.DeliveryOptionsField,
fields.PickupOptionsField,
models.AbstractModel
):
__slots__ = [
'_url',
'name',
'company',
'currencies',
'categories',
'offers',
'platform',
'version',
'agency',
'email',
'_delivery_options',
'_pickup_options',
'_enable_auto_discounts',
'gifts',
'promos'
]
def __init__(
self,
name: str,
company: str,
url: str,
currencies: List["models.Currency"],
categories: List["models.Category"],
offers: List["models.offers.AbstractOffer"],
platform: str = None,
version: str = None,
agency: str = None,
email: str = None,
delivery_options: List["models.Option"] = None,
pickup_options: List["models.Option"] = None,
enable_auto_discounts=None,
gifts: List["models.Gift"] = None,
promos: List["models.Promo"] = None,
):
self.name = name
self.company = company
self.url = url
self.platform = platform
self.version = version
self.agency = agency
self.email = email
self.currencies = currencies
self.categories = categories
self.delivery_options = delivery_options
self.pickup_options = pickup_options
self.enable_auto_discounts = enable_auto_discounts
self.offers = offers
self.gifts = gifts
self.promos = promos
@property
def url(self):
return self._url
@url.setter
def url(self, value: str):
if len(value) > 512:
raise ValidationError("The maximum url length is 512 characters.")
self._url = value
def create_dict(self, **kwargs) -> dict:
return dict(
name=self.name,
company=self.company,
url=self.url,
platform=self.platform,
version=self.version,
agency=self.agency,
email=self.email,
currencies=[c.to_dict() for c in self.currencies],
categories=[c.to_dict() for c in self.categories],
delivery_options=[o.to_dict() for o in self.delivery_options],
pickup_options=[o.to_dict() for o in self.pickup_options],
enable_auto_discounts=self.enable_auto_discounts,
offers=[o.to_dict() for o in self.offers],
gifts=[g.to_dict() for g in self.gifts] if self.gifts else [],
promos=[p.to_dict() for p in self.promos] if self.promos else [],
)
def create_xml(self, **kwargs) -> XMLElement:
shop_el = XMLElement("shop")
for tag in (
"name",
"company",
"url",
"platform",
"version",
"agency",
"email",
):
value = getattr(self, tag)
if value:
el = XMLSubElement(shop_el, tag)
el.text = value
currencies_el = XMLSubElement(shop_el, "currencies")
for c in self.currencies:
c.to_xml(currencies_el)
categories_el = XMLSubElement(shop_el, "categories")
for c in self.categories:
c.to_xml(categories_el)
if self.delivery_options:
delivery_options_el = XMLSubElement(shop_el, "delivery-options")
for o in self.delivery_options:
o.to_xml(delivery_options_el)
if self.pickup_options:
pickup_options_el = XMLSubElement(shop_el, "pickup-options")
for o in self.pickup_options:
o.to_xml(pickup_options_el)
if self._enable_auto_discounts:
enable_auto_discounts_el = XMLSubElement(
shop_el, "enable_auto_discounts"
)
enable_auto_discounts_el.text = self._enable_auto_discounts
offers_el = XMLSubElement(shop_el, "offers")
for o in self.offers:
o.to_xml(offers_el)
if self.gifts:
gifts_el = XMLSubElement(shop_el, "gifts")
for g in self.gifts:
g.to_xml(gifts_el)
if self.promos:
promos_el = XMLSubElement(shop_el, "promos")
for p in self.promos:
p.to_xml(promos_el)
return shop_el
@staticmethod
def from_xml(shop_el: XMLElement) -> "Shop":
kwargs = {}
for el in shop_el:
if el.tag == "currencies":
currencies = []
for currency_el in el:
currencies.append(models.Currency.from_xml(currency_el))
kwargs["currencies"] = currencies
elif el.tag == "categories":
categories = []
for category_el in el:
categories.append(models.Category.from_xml(category_el))
kwargs["categories"] = categories
elif el.tag == "delivery-options":
delivery_options = []
for option_el in el:
delivery_options.append(models.Option.from_xml(option_el))
kwargs["delivery_options"] = delivery_options
elif el.tag == "pickup-options":
pickup_options = []
for option_el in el:
pickup_options.append(models.Option.from_xml(option_el))
kwargs["pickup_options"] = pickup_options
elif el.tag == "offers":
offers = []
for offer_el in el:
offer_type = offer_el.attrib.get("type")
if offer_type is None:
offer = models.SimplifiedOffer.from_xml(offer_el)
elif offer_type == "vendor.model":
offer = models.ArbitraryOffer.from_xml(offer_el)
elif offer_type == "book":
offer = models.BookOffer.from_xml(offer_el)
elif offer_type == "audiobook":
offer = models.AudioBookOffer.from_xml(offer_el)
elif offer_type == "artist.title":
offer = models.MusicVideoOffer.from_xml(offer_el)
elif offer_type == "medicine":
offer = models.MedicineOffer.from_xml(offer_el)
elif offer_type == "event-ticket":
offer = models.EventTicketOffer.from_xml(offer_el)
elif offer_type == "alco":
offer = models.AlcoholOffer.from_xml(offer_el)
else:
raise exceptions.ParseError(
"Got unexpected offer type: {0}".format(offer_type)
)
offers.append(offer)
kwargs["offers"] = offers
elif el.tag == "gifts":
gifts = []
for gift_el in el:
gifts.append(models.Gift.from_xml(gift_el))
if gifts:
kwargs["gifts"] = gifts
elif el.tag == "promos":
promos = []
for promo_el in el:
promos.append(models.Promo.from_xml(promo_el))
if promos:
kwargs["promos"] = promos
else:
kwargs[el.tag] = el.text
return Shop(**kwargs)
| true
| true
|
7903386c03359f170d66886b95a3ab0227613175
| 1,199
|
py
|
Python
|
tradefed_cluster/device_blocker.py
|
maksonlee/tradefed_cluster
|
d1153743ce8ddcad752443b23851015630862aea
|
[
"Apache-2.0"
] | null | null | null |
tradefed_cluster/device_blocker.py
|
maksonlee/tradefed_cluster
|
d1153743ce8ddcad752443b23851015630862aea
|
[
"Apache-2.0"
] | null | null | null |
tradefed_cluster/device_blocker.py
|
maksonlee/tradefed_cluster
|
d1153743ce8ddcad752443b23851015630862aea
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to blocker devices based on device blocklists."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
from tradefed_cluster import datastore_entities
def IsLabBlocked(lab_name):
"""Check if the lab is blocked.
Args:
lab_name: lab name
Returns:
true if the lab is blocked, otherwise false.
"""
device_blocklists = (
datastore_entities.DeviceBlocklist.query()
.filter(datastore_entities.DeviceBlocklist.lab_name == lab_name)
.fetch(1))
return bool(device_blocklists)
| 32.405405
| 74
| 0.764804
|
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
from tradefed_cluster import datastore_entities
def IsLabBlocked(lab_name):
device_blocklists = (
datastore_entities.DeviceBlocklist.query()
.filter(datastore_entities.DeviceBlocklist.lab_name == lab_name)
.fetch(1))
return bool(device_blocklists)
| true
| true
|
79033900592f01fd75e20a66b3237b2e60d03fb3
| 1,953
|
py
|
Python
|
backend/accounts/views.py
|
eliefrancois/project2-diabetesapplication-api
|
e0fd904b1f50eb7ed68fe1ceb74c2a1784e8dc40
|
[
"MIT"
] | null | null | null |
backend/accounts/views.py
|
eliefrancois/project2-diabetesapplication-api
|
e0fd904b1f50eb7ed68fe1ceb74c2a1784e8dc40
|
[
"MIT"
] | null | null | null |
backend/accounts/views.py
|
eliefrancois/project2-diabetesapplication-api
|
e0fd904b1f50eb7ed68fe1ceb74c2a1784e8dc40
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from accounts import models
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from accounts import serializers # Will use this to tell API what data to exect when making a POST PUT PATCH request to API
from accounts import models
from accounts import permissions
class Injection_DetailsViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating patient info readings"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.Injection_DetailsSerializer # This points to the
queryset = models.Injection_Details.objects.all()
permission_classes = (permissions.UpdateOwnReading, IsAuthenticated,) # Validates that a user is authenticated to read or modify objects
def get_queryset(self):
user = self.request.user
return models.Injection_Details.objects.get_queryset().filter(user_profile=user)
def perform_create(self, serializer): # overriding this function so that when a user tries to create an object they are validated as the current user
"""Sets the patient profile to the logged in user"""
serializer.save(user_profile=self.request.user) # This sets the user profile to the current user from the serializer passed in
#def create(self, serializer): # overriding this function so that when a user
#patient_info = models.PatientInfo.objects.filter(user_profile=self.request.user)
#serializer.save = self.get_serializer(patient_info, many = True) # This sets the user profile to the current user from the serializer passed in
#serializer.is_valid(raise_exceptions=True)
#self.perform_create(serializer)
#return Response(serializer.data)
| 54.25
| 153
| 0.777266
|
from django.shortcuts import render
from accounts import models
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from accounts import serializers
from accounts import models
from accounts import permissions
class Injection_DetailsViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.Injection_DetailsSerializer
queryset = models.Injection_Details.objects.all()
permission_classes = (permissions.UpdateOwnReading, IsAuthenticated,)
def get_queryset(self):
user = self.request.user
return models.Injection_Details.objects.get_queryset().filter(user_profile=user)
def perform_create(self, serializer):
serializer.save(user_profile=self.request.user)
| true
| true
|
7903399be9ce2f08abf032e744e8f8058081f1b6
| 2,273
|
py
|
Python
|
deepinsight_iqa/nima/predict.py
|
sandyz1000/deepinsight-iqa
|
1be15ba4bdb005d05d01eddd247de1dafbf3d256
|
[
"Apache-2.0"
] | 2
|
2021-11-22T15:57:47.000Z
|
2021-11-23T12:02:56.000Z
|
deepinsight_iqa/nima/predict.py
|
sandyz1000/deepinsight-iqa
|
1be15ba4bdb005d05d01eddd247de1dafbf3d256
|
[
"Apache-2.0"
] | null | null | null |
deepinsight_iqa/nima/predict.py
|
sandyz1000/deepinsight-iqa
|
1be15ba4bdb005d05d01eddd247de1dafbf3d256
|
[
"Apache-2.0"
] | 1
|
2022-02-05T03:19:31.000Z
|
2022-02-05T03:19:31.000Z
|
import os
import glob
import sys
from typing import Optional, List, Union
from .utils.utils import calc_mean_score, save_json, image_dir_to_json, image_file_to_json
from .handlers.model_builder import Nima
from deepinsight_iqa.common.utility import thread_safe_singleton, set_gpu_limit
from deepinsight_iqa.data_pipeline.nima_gen.nima_datagen import NimaDataGenerator as TestDataGenerator
import tensorflow as tf
import six
import logging
logger = logging.getLogger(__name__)
@six.add_metaclass(thread_safe_singleton)
class Prediction:
def __init__(self, weights_file: str, base_model_name: str):
""" Invoke a predict method of this class to predict image quality using nima model
"""
try:
# set_gpu_limit()
self.nima = Nima(base_model_name, weights=None)
self.nima.build()
self.nima.nima_model.load_weights(weights_file)
except Exception as e:
print("Unable to load NIMA weights", str(e))
sys.exit(1)
def predict(
self,
image_source: str,
predictions_file: Optional[str] = None,
img_format: str = 'jpg'
) -> List:
# load samples
if os.path.isfile(image_source):
image_dir, samples = image_file_to_json(image_source)
else:
image_dir = image_source
samples = image_dir_to_json(image_source, img_type='jpg')
# initialize data generator
n_classes = 10
batch_size = 64
samples = []
sample = {"imgage_id": "img_1"}
samples.append(sample)
data_generator = TestDataGenerator(
samples, image_dir, batch_size, n_classes,
self.nima.preprocessing_function(), img_format=img_format
)
# get predictions
predictions = self.nima.nima_model.predict_generator(
data_generator, workers=1, use_multiprocessing=False, verbose=1)
# calc mean scores and add to samples
for i, sample in enumerate(samples):
sample['mean_score_prediction'] = calc_mean_score(predictions[i])
# print(json.dumps(samples, indent=2))
if predictions_file is not None:
save_json(samples, predictions_file)
return samples
| 32.942029
| 102
| 0.66564
|
import os
import glob
import sys
from typing import Optional, List, Union
from .utils.utils import calc_mean_score, save_json, image_dir_to_json, image_file_to_json
from .handlers.model_builder import Nima
from deepinsight_iqa.common.utility import thread_safe_singleton, set_gpu_limit
from deepinsight_iqa.data_pipeline.nima_gen.nima_datagen import NimaDataGenerator as TestDataGenerator
import tensorflow as tf
import six
import logging
logger = logging.getLogger(__name__)
@six.add_metaclass(thread_safe_singleton)
class Prediction:
def __init__(self, weights_file: str, base_model_name: str):
try:
self.nima = Nima(base_model_name, weights=None)
self.nima.build()
self.nima.nima_model.load_weights(weights_file)
except Exception as e:
print("Unable to load NIMA weights", str(e))
sys.exit(1)
def predict(
self,
image_source: str,
predictions_file: Optional[str] = None,
img_format: str = 'jpg'
) -> List:
if os.path.isfile(image_source):
image_dir, samples = image_file_to_json(image_source)
else:
image_dir = image_source
samples = image_dir_to_json(image_source, img_type='jpg')
n_classes = 10
batch_size = 64
samples = []
sample = {"imgage_id": "img_1"}
samples.append(sample)
data_generator = TestDataGenerator(
samples, image_dir, batch_size, n_classes,
self.nima.preprocessing_function(), img_format=img_format
)
predictions = self.nima.nima_model.predict_generator(
data_generator, workers=1, use_multiprocessing=False, verbose=1)
for i, sample in enumerate(samples):
sample['mean_score_prediction'] = calc_mean_score(predictions[i])
if predictions_file is not None:
save_json(samples, predictions_file)
return samples
| true
| true
|
79033b5c42283a7d3287e201bd372d9dca5ef6a8
| 1,074
|
py
|
Python
|
services/web/server/src/simcore_service_webserver/director/config.py
|
KZzizzle/osparc-simcore
|
981bc8d193f3f5d507e3225f857e0308c339e163
|
[
"MIT"
] | null | null | null |
services/web/server/src/simcore_service_webserver/director/config.py
|
KZzizzle/osparc-simcore
|
981bc8d193f3f5d507e3225f857e0308c339e163
|
[
"MIT"
] | null | null | null |
services/web/server/src/simcore_service_webserver/director/config.py
|
KZzizzle/osparc-simcore
|
981bc8d193f3f5d507e3225f857e0308c339e163
|
[
"MIT"
] | null | null | null |
""" director subsystem's configuration
- config-file schema
- settings
"""
from typing import Dict
import trafaret as T
from aiohttp import ClientSession, web
from yarl import URL
from servicelib.application_keys import APP_CLIENT_SESSION_KEY, APP_CONFIG_KEY
APP_DIRECTOR_API_KEY = __name__ + ".director_api"
CONFIG_SECTION_NAME = "director"
schema = T.Dict(
{
T.Key("enabled", default=True, optional=True): T.Bool(),
T.Key("host", default="director",): T.String(),
T.Key("port", default=8001): T.ToInt(),
T.Key("version", default="v0"): T.Regexp(
regexp=r"^v\d+"
), # storage API version basepath
}
)
def build_api_url(config: Dict) -> URL:
api_baseurl = URL.build(
scheme="http", host=config["host"], port=config["port"]
).with_path(config["version"])
return api_baseurl
def get_config(app: web.Application) -> Dict:
return app[APP_CONFIG_KEY][CONFIG_SECTION_NAME]
def get_client_session(app: web.Application) -> ClientSession:
return app[APP_CLIENT_SESSION_KEY]
| 24.976744
| 78
| 0.679702
|
from typing import Dict
import trafaret as T
from aiohttp import ClientSession, web
from yarl import URL
from servicelib.application_keys import APP_CLIENT_SESSION_KEY, APP_CONFIG_KEY
APP_DIRECTOR_API_KEY = __name__ + ".director_api"
CONFIG_SECTION_NAME = "director"
schema = T.Dict(
{
T.Key("enabled", default=True, optional=True): T.Bool(),
T.Key("host", default="director",): T.String(),
T.Key("port", default=8001): T.ToInt(),
T.Key("version", default="v0"): T.Regexp(
regexp=r"^v\d+"
),
}
)
def build_api_url(config: Dict) -> URL:
api_baseurl = URL.build(
scheme="http", host=config["host"], port=config["port"]
).with_path(config["version"])
return api_baseurl
def get_config(app: web.Application) -> Dict:
return app[APP_CONFIG_KEY][CONFIG_SECTION_NAME]
def get_client_session(app: web.Application) -> ClientSession:
return app[APP_CLIENT_SESSION_KEY]
| true
| true
|
79033c3abb5425c24997413b7192536ca58adde2
| 1,181
|
py
|
Python
|
ctrp3_py3/reports/urls.py
|
CT-Data-Collaborative/ctrp3_v2
|
5224e4ad5e3a4497379030d7974a11c5c4832d19
|
[
"MIT"
] | null | null | null |
ctrp3_py3/reports/urls.py
|
CT-Data-Collaborative/ctrp3_v2
|
5224e4ad5e3a4497379030d7974a11c5c4832d19
|
[
"MIT"
] | 1
|
2017-09-15T21:01:40.000Z
|
2017-09-15T21:01:40.000Z
|
ctrp3_py3/reports/urls.py
|
CT-Data-Collaborative/ctrp3_v2
|
5224e4ad5e3a4497379030d7974a11c5c4832d19
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'tables/$', views.report_tables, name='tables'),
url(r'^api/stop_enforcement/', views.stop_enforcement_json_view, name='stop_enforcement'),
url(r'^api/residency/', views.resident_json_view, name='residency'),
url(r'^api/nature_of_stops/', views.nature_of_stops_json_view, name='nature_of_stop'),
url(r'^api/disposition/', views.disposition_json_view, name='disposition'),
url(r'^api/statutory_authority/', views.statutory_authority_json_view, name='stop_authority'),
url(r'^api/stops_by_month/', views.monthly_stops_json_view, name='stops_by_month'),
url(r'^api/stops_by_hour/', views.stops_by_hour_json_view, name='stops_by_hour'),
url(r'^api/stops_by_age/', views.stops_by_age_json_view, name='stops_by_age'),
url(r'^api/search_information/', views.search_information_json_view, name='search_information'),
url(r'^api/search_authority/', views.search_authority_json_view, name='search_authority'),
url(r'^api/traffic_stops/', views.traffic_stops_json_view, name='stops'),
url(r'^api/departments/', views.department_json_view, name='departments')
]
| 62.157895
| 100
| 0.751058
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'tables/$', views.report_tables, name='tables'),
url(r'^api/stop_enforcement/', views.stop_enforcement_json_view, name='stop_enforcement'),
url(r'^api/residency/', views.resident_json_view, name='residency'),
url(r'^api/nature_of_stops/', views.nature_of_stops_json_view, name='nature_of_stop'),
url(r'^api/disposition/', views.disposition_json_view, name='disposition'),
url(r'^api/statutory_authority/', views.statutory_authority_json_view, name='stop_authority'),
url(r'^api/stops_by_month/', views.monthly_stops_json_view, name='stops_by_month'),
url(r'^api/stops_by_hour/', views.stops_by_hour_json_view, name='stops_by_hour'),
url(r'^api/stops_by_age/', views.stops_by_age_json_view, name='stops_by_age'),
url(r'^api/search_information/', views.search_information_json_view, name='search_information'),
url(r'^api/search_authority/', views.search_authority_json_view, name='search_authority'),
url(r'^api/traffic_stops/', views.traffic_stops_json_view, name='stops'),
url(r'^api/departments/', views.department_json_view, name='departments')
]
| true
| true
|
79033c774b2b31136b9910c82aadafd4f39f3d90
| 1,134
|
py
|
Python
|
solutions/quick_sort.py
|
Surbeivol/daily-coding-problems
|
4cfd47af47d2d41d348e542154120749e711b1c8
|
[
"MIT"
] | 1
|
2019-08-12T21:40:49.000Z
|
2019-08-12T21:40:49.000Z
|
solutions/quick_sort.py
|
Surbeivol/daily-coding-problems
|
4cfd47af47d2d41d348e542154120749e711b1c8
|
[
"MIT"
] | null | null | null |
solutions/quick_sort.py
|
Surbeivol/daily-coding-problems
|
4cfd47af47d2d41d348e542154120749e711b1c8
|
[
"MIT"
] | 1
|
2020-02-19T20:59:23.000Z
|
2020-02-19T20:59:23.000Z
|
"""
Write a function that takes in an array of integers and returns a sorted version of that array. Use the QuickSort algorithm to sort the array.
"""
def quick_sort(array):
if len(array) <= 1:
return array
_rec_helper(array, 0, len(array) - 1)
return array
def _rec_helper(array, start, end):
# base case
if start >= end:
return
pivot = start
left = pivot + 1
right = end
while left <= right:
if array[left] > array[pivot] and array[right] < array[pivot]:
_swap(array, left, right)
if array[pivot] >= array[left]:
left += 1
if array[pivot] <= array[right]:
right -= 1
_swap(array, pivot, right)
if right - start > end - right:
_rec_helper(array, start, right - 1)
_rec_helper(array, right + 1, end)
else:
_rec_helper(array, right + 1, end)
_rec_helper(array, start, right - 1)
def _swap(array, left, right):
array[left], array[right] = array[right], array[left]
#test
array = [3, 4, 7, 1, 1, 2, 5, 1, 3, 8, 4]
assert quick_sort(array) == sorted(array)
print('OK')
| 25.772727
| 142
| 0.589947
|
def quick_sort(array):
if len(array) <= 1:
return array
_rec_helper(array, 0, len(array) - 1)
return array
def _rec_helper(array, start, end):
if start >= end:
return
pivot = start
left = pivot + 1
right = end
while left <= right:
if array[left] > array[pivot] and array[right] < array[pivot]:
_swap(array, left, right)
if array[pivot] >= array[left]:
left += 1
if array[pivot] <= array[right]:
right -= 1
_swap(array, pivot, right)
if right - start > end - right:
_rec_helper(array, start, right - 1)
_rec_helper(array, right + 1, end)
else:
_rec_helper(array, right + 1, end)
_rec_helper(array, start, right - 1)
def _swap(array, left, right):
array[left], array[right] = array[right], array[left]
array = [3, 4, 7, 1, 1, 2, 5, 1, 3, 8, 4]
assert quick_sort(array) == sorted(array)
print('OK')
| true
| true
|
79033d08d95c45a42f88d1e3a2fafc24e9f25b1e
| 2,370
|
py
|
Python
|
src/ggrc/utils/html_cleaner.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/utils/html_cleaner.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/utils/html_cleaner.py
|
sfarbotka/ggrc-core
|
ef7aae6bc09ad2f53a2414f643572e07d689784a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-02-13T12:32:45.000Z
|
2020-02-13T12:32:45.000Z
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Provides an HTML cleaner function with sqalchemy compatible API"""
import re
import HTMLParser
import bleach
# Set up custom tags/attributes for bleach
BLEACH_TAGS = [
'caption', 'strong', 'em', 'b', 'i', 'p', 'code', 'pre', 'tt', 'samp',
'kbd', 'var', 'sub', 'sup', 'dfn', 'cite', 'big', 'small', 'address',
'hr', 'br', 'div', 'span', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ul',
'ol', 'li', 'dl', 'dt', 'dd', 'abbr', 'acronym', 'a', 'img',
'blockquote', 'del', 'ins', 'table', 'tbody', 'tr', 'td', 'th',
] + bleach.ALLOWED_TAGS
BLEACH_ATTRS = {}
ATTRS = [
'href', 'src', 'width', 'height', 'alt', 'cite', 'datetime',
'title', 'class', 'name', 'xml:lang', 'abbr'
]
BUGGY_STRINGS_PATTERN = "&.{2,3};"
for tag in BLEACH_TAGS:
BLEACH_ATTRS[tag] = ATTRS
CLEANER = bleach.sanitizer.Cleaner(
tags=BLEACH_TAGS, attributes=BLEACH_ATTRS, strip=True
)
PARSER = HTMLParser.HTMLParser()
def cleaner(dummy, value, *_):
"""Cleans out unsafe HTML tags.
Uses bleach and unescape until it reaches a fix point.
Args:
dummy: unused, sqalchemy will pass in the model class
value: html (string) to be cleaned
Returns:
Html (string) without unsafe tags.
"""
if value is None:
# No point in sanitizing None values
return value
if not isinstance(value, basestring):
# No point in sanitizing non-strings
return value
value = unicode(value)
buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, PARSER.unescape(value))
while True:
lastvalue = value
value = PARSER.unescape(CLEANER.clean(value))
if value == lastvalue:
break
# for some reason clean() function converts strings like "&*!;" to "&*;;".
# if we have such string we are replacing new incorrect values to old ones
if buggy_strings:
backup_value = value
updated_buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, value)
for match in updated_buggy_strings:
try:
old_value = buggy_strings.next().group()
start, finish = match.span()
value = value[:start] + old_value + value[finish:]
except StopIteration:
# If we have different number of string after clean function
# we should skip replacing
return backup_value
return value
| 27.55814
| 78
| 0.646835
|
import re
import HTMLParser
import bleach
BLEACH_TAGS = [
'caption', 'strong', 'em', 'b', 'i', 'p', 'code', 'pre', 'tt', 'samp',
'kbd', 'var', 'sub', 'sup', 'dfn', 'cite', 'big', 'small', 'address',
'hr', 'br', 'div', 'span', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ul',
'ol', 'li', 'dl', 'dt', 'dd', 'abbr', 'acronym', 'a', 'img',
'blockquote', 'del', 'ins', 'table', 'tbody', 'tr', 'td', 'th',
] + bleach.ALLOWED_TAGS
BLEACH_ATTRS = {}
ATTRS = [
'href', 'src', 'width', 'height', 'alt', 'cite', 'datetime',
'title', 'class', 'name', 'xml:lang', 'abbr'
]
BUGGY_STRINGS_PATTERN = "&.{2,3};"
for tag in BLEACH_TAGS:
BLEACH_ATTRS[tag] = ATTRS
CLEANER = bleach.sanitizer.Cleaner(
tags=BLEACH_TAGS, attributes=BLEACH_ATTRS, strip=True
)
PARSER = HTMLParser.HTMLParser()
def cleaner(dummy, value, *_):
if value is None:
return value
if not isinstance(value, basestring):
return value
value = unicode(value)
buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, PARSER.unescape(value))
while True:
lastvalue = value
value = PARSER.unescape(CLEANER.clean(value))
if value == lastvalue:
break
if buggy_strings:
backup_value = value
updated_buggy_strings = re.finditer(BUGGY_STRINGS_PATTERN, value)
for match in updated_buggy_strings:
try:
old_value = buggy_strings.next().group()
start, finish = match.span()
value = value[:start] + old_value + value[finish:]
except StopIteration:
return backup_value
return value
| true
| true
|
79033d48e47032f01280a94dfd26c731e5df4113
| 1,827
|
py
|
Python
|
samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T05:23:29.000Z
|
2022-03-30T05:23:29.000Z
|
samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateSpecialistPool
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async]
from google.cloud import aiplatform_v1
async def sample_create_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.CreateSpecialistPoolRequest(
parent="parent_value",
specialist_pool=specialist_pool,
)
# Make the request
operation = client.create_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async]
| 33.218182
| 85
| 0.767378
|
from google.cloud import aiplatform_v1
async def sample_create_specialist_pool():
client = aiplatform_v1.SpecialistPoolServiceAsyncClient()
specialist_pool = aiplatform_v1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.CreateSpecialistPoolRequest(
parent="parent_value",
specialist_pool=specialist_pool,
)
operation = client.create_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
print(response)
| true
| true
|
79033e329aeeed6995605fb7fa079108c03ba683
| 8,699
|
py
|
Python
|
awx/main/dispatch/worker/callback.py
|
Mayses/awx
|
35441694a9707d0d2f57c701970db22110091163
|
[
"Apache-2.0"
] | 1
|
2021-08-02T10:37:09.000Z
|
2021-08-02T10:37:09.000Z
|
awx/main/dispatch/worker/callback.py
|
Mayses/awx
|
35441694a9707d0d2f57c701970db22110091163
|
[
"Apache-2.0"
] | 2
|
2019-03-01T19:08:10.000Z
|
2020-03-12T09:14:27.000Z
|
awx/main/dispatch/worker/callback.py
|
hostinger/awx
|
dac01b14e2c04c201a162ea03ef8386d822e3923
|
[
"Apache-2.0"
] | 24
|
2020-11-27T08:37:35.000Z
|
2021-03-08T13:27:15.000Z
|
import cProfile
import json
import logging
import os
import pstats
import signal
import tempfile
import time
import traceback
from django.conf import settings
from django.utils.timezone import now as tz_now
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
import psutil
import redis
from awx.main.consumers import emit_channel_notification
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent, UnifiedJob,
Job)
from awx.main.tasks import handle_success_and_failure_notifications
from awx.main.models.events import emit_event_detail
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
'''
A worker implementation that deserializes callback event data and persists
it into the database.
The code that *generates* these types of messages is found in the
ansible-runner display callback plugin.
'''
MAX_RETRIES = 2
last_stats = time.time()
total = 0
last_event = ''
prof = None
def __init__(self):
self.buff = {}
self.pid = os.getpid()
self.redis = redis.Redis.from_url(settings.BROKER_URL)
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
self.redis.delete(key)
def read(self, queue):
try:
res = self.redis.blpop(settings.CALLBACK_QUEUE, timeout=settings.JOB_EVENT_BUFFER_SECONDS)
if res is None:
return {'event': 'FLUSH'}
self.total += 1
return json.loads(res[1])
except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis")
time.sleep(1)
except (json.JSONDecodeError, KeyError):
logger.exception("failed to decode JSON message from redis")
finally:
self.record_statistics()
return {'event': 'FLUSH'}
def record_statistics(self):
# buffer stat recording to once per (by default) 5s
if time.time() - self.last_stats > settings.JOB_EVENT_STATISTICS_INTERVAL:
try:
self.redis.set(f'awx_callback_receiver_statistics_{self.pid}', self.debug())
self.last_stats = time.time()
except Exception:
logger.exception("encountered an error communicating with redis")
self.last_stats = time.time()
def debug(self):
return f'. worker[pid:{self.pid}] sent={self.total} rss={self.mb}MB {self.last_event}'
@property
def mb(self):
return '{:0.3f}'.format(
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
)
def toggle_profiling(self, *args):
if self.prof:
self.prof.disable()
filename = f'callback-{self.pid}.pstats'
filepath = os.path.join(tempfile.gettempdir(), filename)
with open(filepath, 'w') as f:
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
self.prof = False
logger.error(f'profiling is disabled, wrote {filepath}')
else:
self.prof = cProfile.Profile()
self.prof.enable()
logger.error('profiling is enabled')
def work_loop(self, *args, **kw):
if settings.AWX_CALLBACK_PROFILE:
signal.signal(signal.SIGUSR1, self.toggle_profiling)
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
def flush(self, force=False):
now = tz_now()
if (
force or
any([len(events) >= 1000 for events in self.buff.values()])
):
for cls, events in self.buff.items():
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
for e in events:
if not e.created:
e.created = now
e.modified = now
try:
cls.objects.bulk_create(events)
except Exception:
# if an exception occurs, we should re-attempt to save the
# events one-by-one, because something in the list is
# broken/stale
for e in events:
try:
e.save()
except Exception:
logger.exception('Database Error Saving Job Event')
for e in events:
emit_event_detail(e)
self.buff = {}
def perform_work(self, body):
try:
flush = body.get('event') == 'FLUSH'
if flush:
self.last_event = ''
if not flush:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
job_identifier = 'unknown job'
for key, cls in event_map.items():
if key in body:
job_identifier = body[key]
break
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})' # noqa
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if isinstance(uj, Job):
# *actual playbooks* send their success/failure
# notifications in response to the playbook_on_stats
# event handling code in main.models.events
pass
elif hasattr(uj, 'send_notification_templates'):
handle_success_and_failure_notifications.apply_async([uj.id])
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
return
event = cls.create_from_data(**body)
self.buff.setdefault(cls, []).append(event)
retries = 0
while retries <= self.MAX_RETRIES:
try:
self.flush(force=flush)
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event')
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
| 40.840376
| 136
| 0.550178
|
import cProfile
import json
import logging
import os
import pstats
import signal
import tempfile
import time
import traceback
from django.conf import settings
from django.utils.timezone import now as tz_now
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
import psutil
import redis
from awx.main.consumers import emit_channel_notification
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent, UnifiedJob,
Job)
from awx.main.tasks import handle_success_and_failure_notifications
from awx.main.models.events import emit_event_detail
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
MAX_RETRIES = 2
last_stats = time.time()
total = 0
last_event = ''
prof = None
def __init__(self):
self.buff = {}
self.pid = os.getpid()
self.redis = redis.Redis.from_url(settings.BROKER_URL)
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
self.redis.delete(key)
def read(self, queue):
try:
res = self.redis.blpop(settings.CALLBACK_QUEUE, timeout=settings.JOB_EVENT_BUFFER_SECONDS)
if res is None:
return {'event': 'FLUSH'}
self.total += 1
return json.loads(res[1])
except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis")
time.sleep(1)
except (json.JSONDecodeError, KeyError):
logger.exception("failed to decode JSON message from redis")
finally:
self.record_statistics()
return {'event': 'FLUSH'}
def record_statistics(self):
if time.time() - self.last_stats > settings.JOB_EVENT_STATISTICS_INTERVAL:
try:
self.redis.set(f'awx_callback_receiver_statistics_{self.pid}', self.debug())
self.last_stats = time.time()
except Exception:
logger.exception("encountered an error communicating with redis")
self.last_stats = time.time()
def debug(self):
return f'. worker[pid:{self.pid}] sent={self.total} rss={self.mb}MB {self.last_event}'
@property
def mb(self):
return '{:0.3f}'.format(
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
)
def toggle_profiling(self, *args):
if self.prof:
self.prof.disable()
filename = f'callback-{self.pid}.pstats'
filepath = os.path.join(tempfile.gettempdir(), filename)
with open(filepath, 'w') as f:
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
self.prof = False
logger.error(f'profiling is disabled, wrote {filepath}')
else:
self.prof = cProfile.Profile()
self.prof.enable()
logger.error('profiling is enabled')
def work_loop(self, *args, **kw):
if settings.AWX_CALLBACK_PROFILE:
signal.signal(signal.SIGUSR1, self.toggle_profiling)
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
def flush(self, force=False):
now = tz_now()
if (
force or
any([len(events) >= 1000 for events in self.buff.values()])
):
for cls, events in self.buff.items():
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
for e in events:
if not e.created:
e.created = now
e.modified = now
try:
cls.objects.bulk_create(events)
except Exception:
for e in events:
try:
e.save()
except Exception:
logger.exception('Database Error Saving Job Event')
for e in events:
emit_event_detail(e)
self.buff = {}
def perform_work(self, body):
try:
flush = body.get('event') == 'FLUSH'
if flush:
self.last_event = ''
if not flush:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
job_identifier = 'unknown job'
for key, cls in event_map.items():
if key in body:
job_identifier = body[key]
break
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})'
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
uj = UnifiedJob.objects.get(pk=job_identifier)
if isinstance(uj, Job):
pass
elif hasattr(uj, 'send_notification_templates'):
handle_success_and_failure_notifications.apply_async([uj.id])
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
return
event = cls.create_from_data(**body)
self.buff.setdefault(cls, []).append(event)
retries = 0
while retries <= self.MAX_RETRIES:
try:
self.flush(force=flush)
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event')
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
| true
| true
|
790341431528bf4d0db5b5dba15949090cd333a0
| 1,442
|
py
|
Python
|
seafileapi/utils.py
|
nguacon01/python-seafile
|
943592b89b78f79540771be37db639bd1879995e
|
[
"Apache-2.0"
] | 5
|
2020-12-17T02:13:18.000Z
|
2021-07-30T11:42:39.000Z
|
seafileapi/utils.py
|
nguacon01/python-seafile
|
943592b89b78f79540771be37db639bd1879995e
|
[
"Apache-2.0"
] | 2
|
2020-12-17T14:34:42.000Z
|
2021-01-11T16:22:52.000Z
|
seafileapi/utils.py
|
nguacon01/python-seafile
|
943592b89b78f79540771be37db639bd1879995e
|
[
"Apache-2.0"
] | null | null | null |
import string
import random
from functools import wraps
from urllib.parse import urlencode
from seafileapi.exceptions import ClientHttpError, DoesNotExist
def randstring(length=0):
if length == 0:
length = random.randint(1, 30)
return ''.join(random.choice(string.lowercase) for i in range(length))
def urljoin(base, *args):
url = base
if url[-1] != '/':
url += '/'
for arg in args:
arg = arg.strip('/')
url += arg + '/'
if '?' in url:
url = url[:-1]
return url
def raise_does_not_exist(msg):
"""Decorator to turn a function that get a http 404 response to a
:exc:`DoesNotExist` exception."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except ClientHttpError as e:
if e.code == 404:
raise DoesNotExist(msg)
else:
raise
return wrapped
return decorator
def to_utf8(obj):
if isinstance(obj, str):
return obj.encode('utf-8')
return obj
def querystr(**kwargs):
return '?' + urlencode(kwargs)
def utf8lize(obj):
if isinstance(obj, dict):
return {k: to_utf8(v) for k, v in obj.items()}
if isinstance(obj, list):
return [to_utf8(x) for x in ob]
if instance(obj, str):
return obj.encode('utf-8')
return obj
| 24.862069
| 74
| 0.575589
|
import string
import random
from functools import wraps
from urllib.parse import urlencode
from seafileapi.exceptions import ClientHttpError, DoesNotExist
def randstring(length=0):
if length == 0:
length = random.randint(1, 30)
return ''.join(random.choice(string.lowercase) for i in range(length))
def urljoin(base, *args):
url = base
if url[-1] != '/':
url += '/'
for arg in args:
arg = arg.strip('/')
url += arg + '/'
if '?' in url:
url = url[:-1]
return url
def raise_does_not_exist(msg):
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except ClientHttpError as e:
if e.code == 404:
raise DoesNotExist(msg)
else:
raise
return wrapped
return decorator
def to_utf8(obj):
if isinstance(obj, str):
return obj.encode('utf-8')
return obj
def querystr(**kwargs):
return '?' + urlencode(kwargs)
def utf8lize(obj):
if isinstance(obj, dict):
return {k: to_utf8(v) for k, v in obj.items()}
if isinstance(obj, list):
return [to_utf8(x) for x in ob]
if instance(obj, str):
return obj.encode('utf-8')
return obj
| true
| true
|
790341dae8dd98e984f133b238e81d9bcf4bcb94
| 1,559
|
py
|
Python
|
share/scripts/augen_octahedron2camera.py
|
eliemichel/GrainViewer
|
91d4922b3185ada90508f0944f2691ba8eba45e3
|
[
"MIT"
] | 8
|
2020-12-14T13:14:22.000Z
|
2021-12-11T20:04:54.000Z
|
share/scripts/augen_octahedron2camera.py
|
eliemichel/GrainViewer
|
91d4922b3185ada90508f0944f2691ba8eba45e3
|
[
"MIT"
] | null | null | null |
share/scripts/augen_octahedron2camera.py
|
eliemichel/GrainViewer
|
91d4922b3185ada90508f0944f2691ba8eba45e3
|
[
"MIT"
] | 2
|
2020-12-16T10:02:15.000Z
|
2021-03-16T16:06:19.000Z
|
import sys
import struct
from math import sqrt
def cross(a, b):
return [
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]
]
def dot(a, b):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def normalized(a):
s = 1 / sqrt(dot(a, a))
return [ a[0] * s, a[1] * s, a[2] * s ]
def mul(m, a):
return [
dot(m[0], a),
dot(m[1], a),
dot(m[2], a)
]
def opp(a):
return [-a[0], -a[1], -a[2]]
def lookFrom(p):
z = p
x = normalized(cross([0,0,1], z))
y = normalized(cross(z, x))
invp = opp(mul([x, y, z], p))
return [
[x[0], x[1], x[2], invp[0]],
[y[0], y[1], y[2], invp[1]],
[z[0], z[1], z[2], invp[2]],
[0, 0, 0, 1],
]
def write_view_matrix(inputFilename, outputFilepath):
with open(outputFilepath, 'wb') as outFile:
for i, line in enumerate(open(inputFilename, 'r')):
coords = [float(x) for x in line.split()]
if len(coords) != 3:
print("Unable to parse line: %s " % line)
exit(1)
mat = lookFrom(coords)
print(mat)
column_major_data = tuple(mat[i][j] for j in range(4) for i in range(4))
outFile.write(struct.pack("f"*16, *column_major_data))
if __name__ == "__main__":
inputFilename = sys.argv[1] if len(sys.argv) > 1 else "octahedron.xyz"
outputFilepath = sys.argv[2] if len(sys.argv) > 2 else "octahedron_camera.bin"
write_view_matrix(inputFilename, outputFilepath)
| 26.87931
| 84
| 0.502245
|
import sys
import struct
from math import sqrt
def cross(a, b):
return [
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0]
]
def dot(a, b):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def normalized(a):
s = 1 / sqrt(dot(a, a))
return [ a[0] * s, a[1] * s, a[2] * s ]
def mul(m, a):
return [
dot(m[0], a),
dot(m[1], a),
dot(m[2], a)
]
def opp(a):
return [-a[0], -a[1], -a[2]]
def lookFrom(p):
z = p
x = normalized(cross([0,0,1], z))
y = normalized(cross(z, x))
invp = opp(mul([x, y, z], p))
return [
[x[0], x[1], x[2], invp[0]],
[y[0], y[1], y[2], invp[1]],
[z[0], z[1], z[2], invp[2]],
[0, 0, 0, 1],
]
def write_view_matrix(inputFilename, outputFilepath):
with open(outputFilepath, 'wb') as outFile:
for i, line in enumerate(open(inputFilename, 'r')):
coords = [float(x) for x in line.split()]
if len(coords) != 3:
print("Unable to parse line: %s " % line)
exit(1)
mat = lookFrom(coords)
print(mat)
column_major_data = tuple(mat[i][j] for j in range(4) for i in range(4))
outFile.write(struct.pack("f"*16, *column_major_data))
if __name__ == "__main__":
inputFilename = sys.argv[1] if len(sys.argv) > 1 else "octahedron.xyz"
outputFilepath = sys.argv[2] if len(sys.argv) > 2 else "octahedron_camera.bin"
write_view_matrix(inputFilename, outputFilepath)
| true
| true
|
790342aed0669dd446268dba8cc861cbb980a333
| 3,335
|
py
|
Python
|
app/__init__.py
|
natalia-rios/flask-mega-tutorial
|
496d44b1123c174e1b2afcd227855d0c6c047572
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
natalia-rios/flask-mega-tutorial
|
496d44b1123c174e1b2afcd227855d0c6c047572
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
natalia-rios/flask-mega-tutorial
|
496d44b1123c174e1b2afcd227855d0c6c047572
|
[
"MIT"
] | null | null | null |
import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
from flask import Flask, request, current_app
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_babel import Babel, lazy_gettext as _l
from elasticsearch import Elasticsearch
from redis import Redis
import rq
from config import Config
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = _l('Please log in to access this page.')
mail = Mail()
bootstrap = Bootstrap()
moment = Moment()
babel = Babel()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
babel.init_app(app)
app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
if app.config['ELASTICSEARCH_URL'] else None
app.redis = Redis.from_url(app.config['REDIS_URL'])
app.task_queue = rq.Queue('microblog-tasks', connection=app.redis)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Microblog Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if app.config['LOG_TO_STDOUT']:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
else:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/microblog.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
return app
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(current_app.config['LANGUAGES'])
from app import models
| 33.686869
| 79
| 0.664168
|
import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
from flask import Flask, request, current_app
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_babel import Babel, lazy_gettext as _l
from elasticsearch import Elasticsearch
from redis import Redis
import rq
from config import Config
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = _l('Please log in to access this page.')
mail = Mail()
bootstrap = Bootstrap()
moment = Moment()
babel = Babel()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
babel.init_app(app)
app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
if app.config['ELASTICSEARCH_URL'] else None
app.redis = Redis.from_url(app.config['REDIS_URL'])
app.task_queue = rq.Queue('microblog-tasks', connection=app.redis)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Microblog Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if app.config['LOG_TO_STDOUT']:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
else:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/microblog.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
return app
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(current_app.config['LANGUAGES'])
from app import models
| true
| true
|
790342b593f0f9fff35264ceb740ba6906b02f8a
| 76
|
py
|
Python
|
ABC026/ABC026f.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
ABC026/ABC026f.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
ABC026/ABC026f.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
#ABC026f
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
| 15.2
| 28
| 0.789474
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
| true
| true
|
790343765cc55c5f9c2ca1f2b92ec280603bcf7a
| 7,451
|
py
|
Python
|
examples/benchmark/utils/recommendation/ncf_input_pipeline.py
|
Ezra-H/autodist
|
b5ab28d0d867c22742daa3c1d324fe20c1852bd7
|
[
"Apache-2.0"
] | 127
|
2020-07-16T16:33:10.000Z
|
2022-03-25T09:58:50.000Z
|
examples/benchmark/utils/recommendation/ncf_input_pipeline.py
|
Ezra-H/autodist
|
b5ab28d0d867c22742daa3c1d324fe20c1852bd7
|
[
"Apache-2.0"
] | 17
|
2020-07-16T20:03:44.000Z
|
2021-02-24T19:53:12.000Z
|
examples/benchmark/utils/recommendation/ncf_input_pipeline.py
|
Ezra-H/autodist
|
b5ab28d0d867c22742daa3c1d324fe20c1852bd7
|
[
"Apache-2.0"
] | 26
|
2020-07-21T01:23:55.000Z
|
2022-02-24T03:43:08.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF model input pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# pylint: disable=g-bad-import-order
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from utils.recommendation import constants as rconst
from utils.recommendation import movielens
from utils.recommendation import data_pipeline
NUM_SHARDS = 16
def create_dataset_from_tf_record_files(input_file_pattern,
pre_batch_size,
batch_size,
is_training=True):
"""Creates dataset from (tf)records files for training/evaluation."""
files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training)
def make_dataset(files_dataset, shard_index):
"""Returns dataset for sharded tf record files."""
if pre_batch_size != batch_size:
raise ValueError("Pre-batch ({}) size is not equal to batch "
"size ({})".format(pre_batch_size, batch_size))
files_dataset = files_dataset.shard(NUM_SHARDS, shard_index)
dataset = files_dataset.interleave(tf.data.TFRecordDataset)
decode_fn = functools.partial(
data_pipeline.DatasetManager.deserialize,
batch_size=pre_batch_size,
is_training=is_training)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
dataset = tf.data.Dataset.range(NUM_SHARDS)
map_fn = functools.partial(make_dataset, files)
dataset = dataset.interleave(
map_fn,
cycle_length=NUM_SHARDS,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_dataset_from_data_producer(producer, params):
"""Return dataset online-generating data."""
def preprocess_train_input(features, labels):
"""Pre-process the training data.
This is needed because
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for DUPLICATE_MASK in training data.
Args:
features: Dictionary of features for training.
labels: Training labels.
Returns:
Processed training features.
"""
fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN])
features[rconst.DUPLICATE_MASK] = fake_dup_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
train_input_fn = producer.make_input_fn(is_training=True)
train_input_dataset = train_input_fn(params).map(preprocess_train_input)
def preprocess_eval_input(features):
"""Pre-process the eval data.
This is needed because:
- The label needs to be extended to be used in the loss fn
- We need the same inputs for training and eval so adding fake inputs
for VALID_PT_MASK in eval data.
Args:
features: Dictionary of features for evaluation.
Returns:
Processed evaluation features.
"""
labels = tf.cast(tf.zeros_like(
features[movielens.USER_COLUMN]), tf.bool)
fake_valid_pt_mask = tf.cast(
tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
eval_input_fn = producer.make_input_fn(is_training=False)
eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input)
return train_input_dataset, eval_input_dataset
def create_ncf_input_data(params,
producer=None,
input_meta_data=None,
strategy=None):
"""Creates NCF training/evaluation dataset.
Args:
params: Dictionary containing parameters for train/evaluation data.
producer: Instance of BaseDataConstructor that generates data online. Must
not be None when params['train_dataset_path'] or
params['eval_dataset_path'] is not specified.
input_meta_data: A dictionary of input metadata to be used when reading data
from tf record files. Must be specified when params["train_input_dataset"]
is specified.
strategy: Distribution strategy used for distributed training. If specified,
used to assert that evaluation batch size is correctly a multiple of
total number of devices used.
Returns:
(training dataset, evaluation dataset, train steps per epoch,
eval steps per epoch)
Raises:
ValueError: If data is being generated online for when using TPU's.
"""
# NCF evaluation metric calculation logic assumes that evaluation data
# sample size are in multiples of (1 + number of negative samples in
# evaluation) for each device. As so, evaluation batch size must be a
# multiple of (number of replicas * (1 + number of negative samples)).
num_devices = strategy.num_replicas_in_sync if strategy else 1
if (params["eval_batch_size"] % (num_devices *
(1 + rconst.NUM_EVAL_NEGATIVES))):
raise ValueError("Evaluation batch size must be divisible by {} "
"times {}".format(num_devices,
(1 + rconst.NUM_EVAL_NEGATIVES)))
if params["train_dataset_path"]:
assert params["eval_dataset_path"]
train_dataset = create_dataset_from_tf_record_files(
params["train_dataset_path"],
input_meta_data["train_prebatch_size"],
params["batch_size"],
is_training=True)
eval_dataset = create_dataset_from_tf_record_files(
params["eval_dataset_path"],
input_meta_data["eval_prebatch_size"],
params["eval_batch_size"],
is_training=False)
num_train_steps = int(input_meta_data["num_train_steps"])
num_eval_steps = int(input_meta_data["num_eval_steps"])
else:
if params["use_tpu"]:
raise ValueError(
"TPU training does not support data producer yet. "
"Use pre-processed data.")
assert producer
# Start retrieving data from producer.
train_dataset, eval_dataset = create_dataset_from_data_producer(
producer, params)
num_train_steps = producer.train_batches_per_epoch
num_eval_steps = producer.eval_batches_per_epoch
return train_dataset, eval_dataset, num_train_steps, num_eval_steps
| 39.84492
| 82
| 0.668232
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow.compat.v2 as tf
from utils.recommendation import constants as rconst
from utils.recommendation import movielens
from utils.recommendation import data_pipeline
NUM_SHARDS = 16
def create_dataset_from_tf_record_files(input_file_pattern,
pre_batch_size,
batch_size,
is_training=True):
files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training)
def make_dataset(files_dataset, shard_index):
if pre_batch_size != batch_size:
raise ValueError("Pre-batch ({}) size is not equal to batch "
"size ({})".format(pre_batch_size, batch_size))
files_dataset = files_dataset.shard(NUM_SHARDS, shard_index)
dataset = files_dataset.interleave(tf.data.TFRecordDataset)
decode_fn = functools.partial(
data_pipeline.DatasetManager.deserialize,
batch_size=pre_batch_size,
is_training=is_training)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
dataset = tf.data.Dataset.range(NUM_SHARDS)
map_fn = functools.partial(make_dataset, files)
dataset = dataset.interleave(
map_fn,
cycle_length=NUM_SHARDS,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_dataset_from_data_producer(producer, params):
def preprocess_train_input(features, labels):
fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN])
features[rconst.DUPLICATE_MASK] = fake_dup_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
train_input_fn = producer.make_input_fn(is_training=True)
train_input_dataset = train_input_fn(params).map(preprocess_train_input)
def preprocess_eval_input(features):
labels = tf.cast(tf.zeros_like(
features[movielens.USER_COLUMN]), tf.bool)
fake_valid_pt_mask = tf.cast(
tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask
features[rconst.TRAIN_LABEL_KEY] = labels
return features
eval_input_fn = producer.make_input_fn(is_training=False)
eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input)
return train_input_dataset, eval_input_dataset
def create_ncf_input_data(params,
producer=None,
input_meta_data=None,
strategy=None):
num_devices = strategy.num_replicas_in_sync if strategy else 1
if (params["eval_batch_size"] % (num_devices *
(1 + rconst.NUM_EVAL_NEGATIVES))):
raise ValueError("Evaluation batch size must be divisible by {} "
"times {}".format(num_devices,
(1 + rconst.NUM_EVAL_NEGATIVES)))
if params["train_dataset_path"]:
assert params["eval_dataset_path"]
train_dataset = create_dataset_from_tf_record_files(
params["train_dataset_path"],
input_meta_data["train_prebatch_size"],
params["batch_size"],
is_training=True)
eval_dataset = create_dataset_from_tf_record_files(
params["eval_dataset_path"],
input_meta_data["eval_prebatch_size"],
params["eval_batch_size"],
is_training=False)
num_train_steps = int(input_meta_data["num_train_steps"])
num_eval_steps = int(input_meta_data["num_eval_steps"])
else:
if params["use_tpu"]:
raise ValueError(
"TPU training does not support data producer yet. "
"Use pre-processed data.")
assert producer
train_dataset, eval_dataset = create_dataset_from_data_producer(
producer, params)
num_train_steps = producer.train_batches_per_epoch
num_eval_steps = producer.eval_batches_per_epoch
return train_dataset, eval_dataset, num_train_steps, num_eval_steps
| true
| true
|
790344d0c93753e114d72a64fc03c1ca4da837f6
| 855
|
py
|
Python
|
zinnia/views/mixins/entry_preview.py
|
julienc91/django-blog-zinnia
|
b4949304b104a8e1a7a7a0773cbfd024313c3a15
|
[
"BSD-3-Clause"
] | 10
|
2020-03-04T05:32:09.000Z
|
2020-03-04T05:49:52.000Z
|
zinnia/views/mixins/entry_preview.py
|
julienc91/django-blog-zinnia
|
b4949304b104a8e1a7a7a0773cbfd024313c3a15
|
[
"BSD-3-Clause"
] | 9
|
2017-05-09T02:00:31.000Z
|
2017-06-12T11:08:26.000Z
|
zinnia/views/mixins/entry_preview.py
|
julienc91/django-blog-zinnia
|
b4949304b104a8e1a7a7a0773cbfd024313c3a15
|
[
"BSD-3-Clause"
] | null | null | null |
"""Preview mixins for Zinnia views"""
from django.http import Http404
from django.utils.translation import ugettext as _
class EntryPreviewMixin(object):
"""
Mixin implementing the preview of Entries.
"""
def get_object(self, queryset=None):
"""
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
"""
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.is_visible:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user.pk in [
author.pk for author in obj.authors.all()]):
return obj
raise Http404(_('No entry found matching the query'))
| 32.884615
| 65
| 0.62924
|
from django.http import Http404
from django.utils.translation import ugettext as _
class EntryPreviewMixin(object):
def get_object(self, queryset=None):
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.is_visible:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user.pk in [
author.pk for author in obj.authors.all()]):
return obj
raise Http404(_('No entry found matching the query'))
| true
| true
|
79034548ec1d838f35e1240aed74d123fc9e3469
| 5,176
|
py
|
Python
|
src/stt.py
|
microsoft/SpeechServices
|
9509a1ca01b5c4628dd11ce8e4840561d4d1e693
|
[
"MIT"
] | null | null | null |
src/stt.py
|
microsoft/SpeechServices
|
9509a1ca01b5c4628dd11ce8e4840561d4d1e693
|
[
"MIT"
] | null | null | null |
src/stt.py
|
microsoft/SpeechServices
|
9509a1ca01b5c4628dd11ce8e4840561d4d1e693
|
[
"MIT"
] | null | null | null |
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API '''
''' nonstoptimm@gmail.com '''
# Import required packages
import os
import glob
import json
import logging
import codecs
import helper as he
import azure.cognitiveservices.speech as speechsdk
import params as pa
# Load and set configuration parameters
pa.get_config()
def request_endpoint(audio, speech_config, output_directory, lexical):
"""Request the speech service endpoint
Args:
audio: Input data frame
speech_config: Choice between scoring and
output_folder: LUIS app ID
case: LUIS subscription key
lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00
Returns:
df: Scoring data frame with predicted intents and scores
Raises:
ConnectionError: If file is not found
"""
audio_config = speechsdk.audio.AudioConfig(filename = audio)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config)
result = speech_recognizer.recognize_once()
filename = audio[audio.rindex('\\')+1:]
text = process_recognition(result, filename, output_directory, lexical)
return text, filename
def process_recognition(result, filename, output_directory, lexical):
"""Process recognition received from the speech service
Args:
result: Result object returned by STT-service
filename: Filename for output file
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
Returns:
text: Processed recognition as string
"""
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
if lexical:
text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}"
else:
text = f"{format(result.text)}"
logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}")
elif result.reason == speechsdk.ResultReason.NoMatch:
logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}")
text = ""
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}")
if cancellation_details.reason == speechsdk.CancellationReason.Error:
logging.error(f"Error details: {cancellation_details.error_details}")
text = ""
return text
# General Function
def write_transcription(output_directory, text):
"""Write transcription to file
Args:
text: Processed recognition as string
output_directory: Output directory for the file
Returns:
Writes output to file
"""
if not os.path.exists(f'{output_directory}/transcriptions.txt'):
transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig')
transfile.close()
logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.')
with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile:
transfile.write(f'{text}\n')
transfile.close()
def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv):
"""Main function for STT-functionality
Args:
speech_files: Directory of audio files to be transcribed
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
enable_proxy: Boolean to enable proxy function in case you need it
*argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str
Returns:
zip(filenames, results): Zipped lists of filenames and STT-results as string
"""
try:
speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region'])
except RuntimeError:
logging.error("[ERROR] - Could not retrieve speech config")
# If necessary, you can enable a proxy here:
# set_proxy(hostname: str, port: str, username: str, password: str)
if enable_proxy:
speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3])
# Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted
speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter)
if pa.config_data['stt_endpoint'] != "":
speech_config.endpoint_id = pa.config_data['stt_endpoint']
logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files')
results = []
filenames = []
for audio in glob.iglob(f'{speech_files}*av'):
result, filename = request_endpoint(audio, speech_config, output_directory, lexical)
results.append(result)
filenames.append(filename)
# Check the result
return zip(filenames, results)
if __name__ == '__main__':
main("input/audio/", "output/test/")
| 45.008696
| 131
| 0.701893
|
import os
import glob
import json
import logging
import codecs
import helper as he
import azure.cognitiveservices.speech as speechsdk
import params as pa
pa.get_config()
def request_endpoint(audio, speech_config, output_directory, lexical):
audio_config = speechsdk.audio.AudioConfig(filename = audio)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config)
result = speech_recognizer.recognize_once()
filename = audio[audio.rindex('\\')+1:]
text = process_recognition(result, filename, output_directory, lexical)
return text, filename
def process_recognition(result, filename, output_directory, lexical):
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
if lexical:
text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}"
else:
text = f"{format(result.text)}"
logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}")
elif result.reason == speechsdk.ResultReason.NoMatch:
logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}")
text = ""
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}")
if cancellation_details.reason == speechsdk.CancellationReason.Error:
logging.error(f"Error details: {cancellation_details.error_details}")
text = ""
return text
def write_transcription(output_directory, text):
if not os.path.exists(f'{output_directory}/transcriptions.txt'):
transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig')
transfile.close()
logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.')
with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile:
transfile.write(f'{text}\n')
transfile.close()
def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv):
try:
speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region'])
except RuntimeError:
logging.error("[ERROR] - Could not retrieve speech config")
if enable_proxy:
speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3])
speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter)
if pa.config_data['stt_endpoint'] != "":
speech_config.endpoint_id = pa.config_data['stt_endpoint']
logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files')
results = []
filenames = []
for audio in glob.iglob(f'{speech_files}*av'):
result, filename = request_endpoint(audio, speech_config, output_directory, lexical)
results.append(result)
filenames.append(filename)
return zip(filenames, results)
if __name__ == '__main__':
main("input/audio/", "output/test/")
| true
| true
|
790345629c935e83bea283f2d5a9e53f07e212f4
| 4,367
|
py
|
Python
|
tests/test_unicode.py
|
astrojuanlu/Fiona
|
766a4598462efd5e3b819a0ede0900bc7f9ac9c1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_unicode.py
|
astrojuanlu/Fiona
|
766a4598462efd5e3b819a0ede0900bc7f9ac9c1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_unicode.py
|
astrojuanlu/Fiona
|
766a4598462efd5e3b819a0ede0900bc7f9ac9c1
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
import logging
import os
import shutil
import sys
import tempfile
import unittest
import pytest
import fiona
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class UnicodePathTest(unittest.TestCase):
def setUp(self):
tempdir = tempfile.mkdtemp()
self.dir = os.path.join(tempdir, 'français')
shutil.copytree('tests/data/', self.dir)
def tearDown(self):
shutil.rmtree(os.path.dirname(self.dir))
def test_unicode_path(self):
path = self.dir + '/coutwildrnp.shp'
if sys.version_info < (3,):
path = path.decode('utf-8')
with fiona.open(path) as c:
assert len(c) == 67
def test_unicode_path_layer(self):
path = self.dir
layer = 'coutwildrnp'
if sys.version_info < (3,):
path = path.decode('utf-8')
layer = layer.decode('utf-8')
with fiona.open(path, layer=layer) as c:
assert len(c) == 67
def test_utf8_path(self):
path = self.dir + '/coutwildrnp.shp'
if sys.version_info < (3,):
with fiona.open(path) as c:
assert len(c) == 67
class UnicodeStringFieldTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
@pytest.mark.xfail(reason="OGR silently fails to convert strings")
def test_write_mismatch(self):
"""TOFIX: OGR silently fails to convert strings"""
# Details:
#
# If we tell OGR that we want a latin-1 encoded output file and
# give it a feature with a unicode property that can't be converted
# to latin-1, no error is raised and OGR just writes the utf-8
# encoded bytes to the output file.
#
# This might be shapefile specific.
#
# Consequences: no error on write, but there will be an error
# on reading the data and expecting latin-1.
schema = {
'geometry': 'Point',
'properties': {'label': 'str', 'num': 'int'}}
with fiona.open(os.path.join(self.tempdir, "test-write-fail.shp"),
'w', driver="ESRI Shapefile", schema=schema,
encoding='latin1') as c:
c.writerecords([{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [0, 0]},
'properties': {
'label': u'徐汇区',
'num': 0}}])
with fiona.open(os.path.join(self.tempdir), encoding='latin1') as c:
f = next(iter(c))
# Next assert fails.
self.assertEqual(f['properties']['label'], u'徐汇区')
def test_write_utf8(self):
schema = {
'geometry': 'Point',
'properties': {'label': 'str', u'verit\xe9': 'int'}}
with fiona.open(os.path.join(self.tempdir, "test-write.shp"),
"w", "ESRI Shapefile", schema=schema,
encoding='utf-8') as c:
c.writerecords([{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [0, 0]},
'properties': {
'label': u'Ba\u2019kelalan', u'verit\xe9': 0}}])
with fiona.open(os.path.join(self.tempdir), encoding='utf-8') as c:
f = next(iter(c))
self.assertEqual(f['properties']['label'], u'Ba\u2019kelalan')
self.assertEqual(f['properties'][u'verit\xe9'], 0)
def test_write_gb18030(self):
"""Can write a simplified Chinese shapefile"""
schema = {
'geometry': 'Point',
'properties': {'label': 'str', 'num': 'int'}}
with fiona.open(os.path.join(self.tempdir, "test-write-gb18030.shp"),
'w', driver="ESRI Shapefile", schema=schema,
encoding='gb18030') as c:
c.writerecords([{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [0, 0]},
'properties': {'label': u'徐汇区', 'num': 0}}])
with fiona.open(os.path.join(self.tempdir), encoding='gb18030') as c:
f = next(iter(c))
self.assertEqual(f['properties']['label'], u'徐汇区')
self.assertEqual(f['properties']['num'], 0)
| 34.936
| 77
| 0.53973
|
import logging
import os
import shutil
import sys
import tempfile
import unittest
import pytest
import fiona
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class UnicodePathTest(unittest.TestCase):
def setUp(self):
tempdir = tempfile.mkdtemp()
self.dir = os.path.join(tempdir, 'français')
shutil.copytree('tests/data/', self.dir)
def tearDown(self):
shutil.rmtree(os.path.dirname(self.dir))
def test_unicode_path(self):
path = self.dir + '/coutwildrnp.shp'
if sys.version_info < (3,):
path = path.decode('utf-8')
with fiona.open(path) as c:
assert len(c) == 67
def test_unicode_path_layer(self):
path = self.dir
layer = 'coutwildrnp'
if sys.version_info < (3,):
path = path.decode('utf-8')
layer = layer.decode('utf-8')
with fiona.open(path, layer=layer) as c:
assert len(c) == 67
def test_utf8_path(self):
path = self.dir + '/coutwildrnp.shp'
if sys.version_info < (3,):
with fiona.open(path) as c:
assert len(c) == 67
class UnicodeStringFieldTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
@pytest.mark.xfail(reason="OGR silently fails to convert strings")
def test_write_mismatch(self):
# to latin-1, no error is raised and OGR just writes the utf-8
# encoded bytes to the output file.
#
# This might be shapefile specific.
#
# Consequences: no error on write, but there will be an error
# on reading the data and expecting latin-1.
schema = {
'geometry': 'Point',
'properties': {'label': 'str', 'num': 'int'}}
with fiona.open(os.path.join(self.tempdir, "test-write-fail.shp"),
'w', driver="ESRI Shapefile", schema=schema,
encoding='latin1') as c:
c.writerecords([{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [0, 0]},
'properties': {
'label': u'徐汇区',
'num': 0}}])
with fiona.open(os.path.join(self.tempdir), encoding='latin1') as c:
f = next(iter(c))
# Next assert fails.
self.assertEqual(f['properties']['label'], u'徐汇区')
def test_write_utf8(self):
schema = {
'geometry': 'Point',
'properties': {'label': 'str', u'verit\xe9': 'int'}}
with fiona.open(os.path.join(self.tempdir, "test-write.shp"),
"w", "ESRI Shapefile", schema=schema,
encoding='utf-8') as c:
c.writerecords([{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [0, 0]},
'properties': {
'label': u'Ba\u2019kelalan', u'verit\xe9': 0}}])
with fiona.open(os.path.join(self.tempdir), encoding='utf-8') as c:
f = next(iter(c))
self.assertEqual(f['properties']['label'], u'Ba\u2019kelalan')
self.assertEqual(f['properties'][u'verit\xe9'], 0)
def test_write_gb18030(self):
schema = {
'geometry': 'Point',
'properties': {'label': 'str', 'num': 'int'}}
with fiona.open(os.path.join(self.tempdir, "test-write-gb18030.shp"),
'w', driver="ESRI Shapefile", schema=schema,
encoding='gb18030') as c:
c.writerecords([{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [0, 0]},
'properties': {'label': u'徐汇区', 'num': 0}}])
with fiona.open(os.path.join(self.tempdir), encoding='gb18030') as c:
f = next(iter(c))
self.assertEqual(f['properties']['label'], u'徐汇区')
self.assertEqual(f['properties']['num'], 0)
| true
| true
|
790347682b5d900b563ebd9a546cfa50d6a212c3
| 6,427
|
py
|
Python
|
docs/source/conf.py
|
lukapecnik/NiaPy
|
a40ac08a4c06a13019ec5e39cc137461884928b0
|
[
"MIT"
] | 1
|
2020-03-16T11:15:43.000Z
|
2020-03-16T11:15:43.000Z
|
docs/source/conf.py
|
lukapecnik/NiaPy
|
a40ac08a4c06a13019ec5e39cc137461884928b0
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
lukapecnik/NiaPy
|
a40ac08a4c06a13019ec5e39cc137461884928b0
|
[
"MIT"
] | 1
|
2020-03-25T16:20:36.000Z
|
2020-03-25T16:20:36.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
print(sys.path)
# -- Project information -----------------------------------------------------
project = u'NiaPy'
copyright = u'2018, NiaOrg'
author = u'Grega Vrbančič, Lucija Brezočnik, Uroš Mlakar, Dušan Fister, Iztok Fister Jr., Klemen Berkovič, Jan Popič'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'0.0.0.'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NiaPydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NiaPy.tex', u'NiaPy Documentation',
u'Grega Vrbančič, Lucija Brezočnik, Uroš Mlakar, Dušan Fister, Iztok Fister Jr.', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'niapy', u'NiaPy Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NiaPy', u'NiaPy Documentation',
author, 'NiaPy', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
autoclass_content = 'both'
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# A boolean that decides whether parentheses are appended to function and method role text (e.g. the content of :func:`input`) to signify that the name is callable. Default is True
add_function_parentheses = True
# Napolen settings
# chekc https://sphinxcontrib-napoleon.readthedocs.io/en/latest/sphinxcontrib.napoleon.html
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
napoleon_custom_sections = None
import matplotlib
matplotlib.use('agg')
| 31.816832
| 180
| 0.67232
|
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
print(sys.path)
project = u'NiaPy'
copyright = u'2018, NiaOrg'
author = u'Grega Vrbančič, Lucija Brezočnik, Uroš Mlakar, Dušan Fister, Iztok Fister Jr., Klemen Berkovič, Jan Popič'
version = u''
release = u'0.0.0.'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NiaPydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NiaPy.tex', u'NiaPy Documentation',
u'Grega Vrbančič, Lucija Brezočnik, Uroš Mlakar, Dušan Fister, Iztok Fister Jr.', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'niapy', u'NiaPy Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NiaPy', u'NiaPy Documentation',
author, 'NiaPy', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
autoclass_content = 'both'
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# A boolean that decides whether parentheses are appended to function and method role text (e.g. the content of :func:`input`) to signify that the name is callable. Default is True
add_function_parentheses = True
# Napolen settings
# chekc https://sphinxcontrib-napoleon.readthedocs.io/en/latest/sphinxcontrib.napoleon.html
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
napoleon_custom_sections = None
import matplotlib
matplotlib.use('agg')
| true
| true
|
790347b2c5ec555882e4f6d27b4803a4abbedce4
| 1,281
|
py
|
Python
|
databench/analyses_packaged/dummypi/analysis.py
|
svenkreiss/databench
|
99d4adad494b60a42af6b8bfba94dd0c41ba0786
|
[
"MIT"
] | 61
|
2015-01-07T18:03:21.000Z
|
2020-11-23T03:31:54.000Z
|
databench/analyses_packaged/dummypi/analysis.py
|
phillipaug/Data-Analysis-General-repository
|
99d4adad494b60a42af6b8bfba94dd0c41ba0786
|
[
"MIT"
] | 9
|
2015-02-25T15:56:28.000Z
|
2019-03-13T15:16:20.000Z
|
databench/analyses_packaged/dummypi/analysis.py
|
phillipaug/Data-Analysis-General-repository
|
99d4adad494b60a42af6b8bfba94dd0c41ba0786
|
[
"MIT"
] | 15
|
2015-01-07T10:53:59.000Z
|
2020-02-28T05:02:00.000Z
|
from __future__ import division
import databench
import math
import random
class Dummypi(databench.Analysis):
"""A dummy analysis."""
@databench.on
def connected(self):
yield self.data.init({'samples': 100000})
@databench.on
def run(self):
"""Run when button is pressed."""
inside = 0
for draws in range(1, self.data['samples']):
# generate points and check whether they are inside the unit circle
r1 = random.random()
r2 = random.random()
if r1 ** 2 + r2 ** 2 < 1.0:
inside += 1
# every 1000 iterations, update status
if draws % 1000 != 0:
continue
# debug
yield self.emit('log', {'draws': draws, 'inside': inside})
# calculate pi and its uncertainty given the current draws
p = inside / draws
pi = {
'estimate': 4.0 * p,
'uncertainty': 4.0 * math.sqrt(draws * p * (1.0 - p)) / draws,
}
# send status to frontend
yield self.set_state(pi=pi)
yield self.emit('log', {'action': 'done'})
@databench.on
def samples(self, value):
yield self.set_state(samples=value)
| 26.142857
| 79
| 0.527713
|
from __future__ import division
import databench
import math
import random
class Dummypi(databench.Analysis):
@databench.on
def connected(self):
yield self.data.init({'samples': 100000})
@databench.on
def run(self):
inside = 0
for draws in range(1, self.data['samples']):
r1 = random.random()
r2 = random.random()
if r1 ** 2 + r2 ** 2 < 1.0:
inside += 1
if draws % 1000 != 0:
continue
yield self.emit('log', {'draws': draws, 'inside': inside})
p = inside / draws
pi = {
'estimate': 4.0 * p,
'uncertainty': 4.0 * math.sqrt(draws * p * (1.0 - p)) / draws,
}
yield self.set_state(pi=pi)
yield self.emit('log', {'action': 'done'})
@databench.on
def samples(self, value):
yield self.set_state(samples=value)
| true
| true
|
79034931f44beb14fe594976eaf4f6977d7e2c73
| 1,928
|
py
|
Python
|
wagtail_storages/tests/test_utils.py
|
ski-family/wagtail-storages
|
2786b55540eb7045c87460a885176c45c9afab53
|
[
"BSD-2-Clause"
] | 26
|
2019-12-04T09:45:26.000Z
|
2021-12-02T17:17:31.000Z
|
wagtail_storages/tests/test_utils.py
|
ski-family/wagtail-storages
|
2786b55540eb7045c87460a885176c45c9afab53
|
[
"BSD-2-Clause"
] | 20
|
2019-12-05T10:45:35.000Z
|
2022-02-21T16:03:49.000Z
|
wagtail_storages/tests/test_utils.py
|
ski-family/wagtail-storages
|
2786b55540eb7045c87460a885176c45c9afab53
|
[
"BSD-2-Clause"
] | 5
|
2019-12-04T14:35:45.000Z
|
2021-12-16T07:48:37.000Z
|
from django.test import TestCase, override_settings
from wagtail_storages.factories import (
CollectionFactory,
CollectionViewRestrictionFactory,
)
from wagtail_storages.utils import (
get_acl_for_collection,
get_frontend_cache_configuration,
is_s3_boto3_storage_used,
)
class TestIsS3Boto3StorageUsed(TestCase):
@override_settings(
DEFAULT_FILE_STORAGE="django.core.files.storage.FileSystemStorage"
)
def test_should_return_false_if_not(self):
self.assertIs(is_s3_boto3_storage_used(), False)
@override_settings(DEFAULT_FILE_STORAGE="storages.backends.s3boto3.S3Boto3Storage")
def test_should_return_true_if_yes(self):
self.assertIs(is_s3_boto3_storage_used(), True)
@override_settings(WAGTAIL_STORAGES_DOCUMENTS_FRONTENDCACHE={})
def test_get_frontend_cache_configuration_1(self):
self.assertEqual(get_frontend_cache_configuration(), {})
@override_settings(
WAGTAIL_STORAGES_DOCUMENTS_FRONTENDCACHE={
"varnish": {
"BACKEND": "wagtail.contrib.frontend_cache.backends.HTTPBackend",
"LOCATION": "http://localhost:8000",
},
}
)
def test_get_frontend_cache_configuration_2(self):
self.assertEqual(
get_frontend_cache_configuration(),
{
"varnish": {
"BACKEND": "wagtail.contrib.frontend_cache.backends.HTTPBackend",
"LOCATION": "http://localhost:8000",
},
},
)
class TestGetAclForCollection(TestCase):
def test_public_colleciton(self):
collection = CollectionFactory()
self.assertEqual(get_acl_for_collection(collection), "public-read")
def test_private_colleciton(self):
collection = CollectionViewRestrictionFactory().collection
self.assertEqual(get_acl_for_collection(collection), "private")
| 33.824561
| 87
| 0.693983
|
from django.test import TestCase, override_settings
from wagtail_storages.factories import (
CollectionFactory,
CollectionViewRestrictionFactory,
)
from wagtail_storages.utils import (
get_acl_for_collection,
get_frontend_cache_configuration,
is_s3_boto3_storage_used,
)
class TestIsS3Boto3StorageUsed(TestCase):
@override_settings(
DEFAULT_FILE_STORAGE="django.core.files.storage.FileSystemStorage"
)
def test_should_return_false_if_not(self):
self.assertIs(is_s3_boto3_storage_used(), False)
@override_settings(DEFAULT_FILE_STORAGE="storages.backends.s3boto3.S3Boto3Storage")
def test_should_return_true_if_yes(self):
self.assertIs(is_s3_boto3_storage_used(), True)
@override_settings(WAGTAIL_STORAGES_DOCUMENTS_FRONTENDCACHE={})
def test_get_frontend_cache_configuration_1(self):
self.assertEqual(get_frontend_cache_configuration(), {})
@override_settings(
WAGTAIL_STORAGES_DOCUMENTS_FRONTENDCACHE={
"varnish": {
"BACKEND": "wagtail.contrib.frontend_cache.backends.HTTPBackend",
"LOCATION": "http://localhost:8000",
},
}
)
def test_get_frontend_cache_configuration_2(self):
self.assertEqual(
get_frontend_cache_configuration(),
{
"varnish": {
"BACKEND": "wagtail.contrib.frontend_cache.backends.HTTPBackend",
"LOCATION": "http://localhost:8000",
},
},
)
class TestGetAclForCollection(TestCase):
def test_public_colleciton(self):
collection = CollectionFactory()
self.assertEqual(get_acl_for_collection(collection), "public-read")
def test_private_colleciton(self):
collection = CollectionViewRestrictionFactory().collection
self.assertEqual(get_acl_for_collection(collection), "private")
| true
| true
|
790349e7536ecbcf94ff9781fa1d4db9df54005a
| 6,913
|
py
|
Python
|
testing.py
|
sunil1239/FuelEfficiencyInfo
|
7f036b6cfdb120668e940519ca426f4c6794a98b
|
[
"Unlicense"
] | null | null | null |
testing.py
|
sunil1239/FuelEfficiencyInfo
|
7f036b6cfdb120668e940519ca426f4c6794a98b
|
[
"Unlicense"
] | null | null | null |
testing.py
|
sunil1239/FuelEfficiencyInfo
|
7f036b6cfdb120668e940519ca426f4c6794a98b
|
[
"Unlicense"
] | null | null | null |
from PySide import QtGui, QtCore
import os, subprocess, shutil, re
class animQt(QtGui.QMainWindow):
def __init__(self):
super(animQt, self).__init__()
self.setGeometry(250,250,360,100)
style = """
QMainWindow, QMessageBox{
background-color: qradialgradient(spread:pad, cx:0.5, cy:0.5, radius:0.5, fx:0.5, fy:0.5, stop:0.264865 rgba(121, 185, 255, 255), stop:1 rgba(0, 126, 255, 255));
}
QPushButton{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(255, 255, 255, 107), stop:0.464865 rgba(0, 0, 0, 15));
border:1px solid rgb(0, 170, 255);
padding:5px;
color:#FFF;
border-radius:5px;
}
QPushButton:hover{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(0, 0, 0, 15), stop:0.47 rgba(255, 255, 255, 107));
}
QCheckBox{
color:#FFF;
}
QLineEdit{
background-color:rgba(255, 255, 255, 100);
color:#FFF;
border:1px solid rgb(0,170,255);
border-radius:5px;
padding:3px;
}
QLabel{
color:#FFF;
}
QComboBox{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(255, 255, 255, 107), stop:0.464865 rgba(0, 0, 0, 15));
color:#FFF;
padding:5px;
border:1px solid rgb(0, 170, 255);
border-radius:5px;
}
QComboBox:hover{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(0, 0, 0, 15), stop:0.47 rgba(255, 255, 255, 107));
}
QComboBox::drop-down{
subcontrol-origin: padding;
subcontrol-position: top right;
width:25px;
border-left-width: 1px;
border-left-style: solid;
border-top-right-radius: 5px;
border-bottom-right-radius: 5px;
border-left-color: rgb(0, 170, 255);
}
QComboBox::down-arrow{
border-image: url("./down-arrow.png");
height:30px;
width:30px;
}
"""
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(5)
effect.setOffset(2,2)
self.setStyleSheet(style)
self.setWindowTitle("Exe Generator(py2exe)")
centralWidget = QtGui.QWidget()
layout = QtGui.QGridLayout(centralWidget)
self.foldPath = QtGui.QLineEdit(self)
openBtn = QtGui.QPushButton(self)
openBtn.setGraphicsEffect(effect)
openBtn.setText("Select File")
openBtn.clicked.connect(self.fileBrowser)
pyPathInit = QtGui.QLabel(self)
pyPathInit.setText("Select Python Version")
self.pyPath = QtGui.QComboBox(self)
self.pyPath.activated.connect(self.changePyPath)
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(5)
effect.setOffset(2, 2)
self.pyPath.setGraphicsEffect(effect)
self.checkBox = QtGui.QCheckBox(self)
self.checkBox.setText("Window Mode")
checkBtn = QtGui.QPushButton(self)
checkBtn.clicked.connect(self.createSetup)
checkBtn.setText("Process")
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(5)
effect.setOffset(2, 2)
checkBtn.setGraphicsEffect(effect)
layout.addWidget(self.foldPath, 0, 0, 1, 2)
layout.addWidget(openBtn, 0, 2, 1, 1)
layout.addWidget(pyPathInit, 1, 0, 1, 1)
layout.addWidget(self.pyPath, 1, 1, 1, 2)
layout.addWidget(self.checkBox, 2, 0, 1, 2)
layout.addWidget(checkBtn, 2, 2, 1, 1)
self.setCentralWidget(centralWidget)
self.getInstalledPy()
def fileBrowser(self):
browse = QtGui.QFileDialog.getOpenFileName(self, "Select File")
self.foldPath.setText(browse[0])
self.foldName = os.path.dirname(browse[0])
self.filePath = browse[0]
# self.createSetup()
def changePyPath(self, index):
self.setPath = self.pyPath.itemText(index)
def getInstalledPy(self):
path = "c:/"
self.pyPath.addItem("Select")
for each in os.listdir(path):
if os.path.isdir(path+each):
if re.search("Python\d", each, re.I):
if os.path.exists(path+each+"/python.exe"):
# print path+each+"/python.exe"
self.pyPath.addItem(path+each+"/python.exe")
# self.pyPath.addItem("Z:/workspace_mel/dqepy/py27/Scripts/python.exe")
def createSetup(self):
try:
setupFile = self.foldName.replace('\\','/')+"/setup.py"
with open(setupFile, 'w') as fd:
if not self.checkBox.isChecked():
fd.write("from distutils.core import setup\n")
fd.write("import py2exe\n")
fd.write("setup(console =['%s'])"%os.path.basename(self.filePath))
else:
fd.write("from distutils.core import setup\n")
fd.write("import py2exe\n")
fd.write("setup(windows =['%s'])" % os.path.basename(self.filePath))
self.cmdProcess()
shutil.rmtree('%s/build'%self.foldName.replace('\\','/'))
os.rename("dist",os.path.basename(self.filePath).split('.')[0])
self.displayError(parent=self, m="Process done successfully!!!", t="Process Done")
except Exception as e:
self.displayError(parent=self, m="Please Enter all the values\nbefore clicking process button", t="Invalid Values", type=QtGui.QMessageBox.Critical)
def cmdProcess(self):
with open("runBatch.bat", 'w') as fd:
fd.write("@echo off\n")
fd.write("cd %s\n" % self.foldName)
fd.write("%s\n"%self.foldName.replace('\\','/').split("/")[0])
fd.write('%s setup.py py2exe'%self.setPath)
try:
subprocess.call("runBatch.bat", 0, None, None, None, None)
except:
self.displayError(parent=self, m="Python modules were missing in the Python Interpreter\nPlease make sure you had py2exe module", t="Invalid Python Version", type=QtGui.QMessageBox.Critical)
os.remove("runBatch.bat")
def displayError(self, parent, m=None, t="Error found", type=QtGui.QMessageBox.Information, details = ""):
dError = QtGui.QMessageBox(parent)
dError.setText(m)
dError.setWindowTitle(t)
dError.setIcon(type)
dError.setStandardButtons(QtGui.QMessageBox.Ok)
dError.setEscapeButton(QtGui.QMessageBox.Ok)
if details != "":
dError.setDetailedText(details)
dError.show()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
gui = animQt()
gui.show()
sys.exit(app.exec_())
| 39.959538
| 202
| 0.592362
|
from PySide import QtGui, QtCore
import os, subprocess, shutil, re
class animQt(QtGui.QMainWindow):
def __init__(self):
super(animQt, self).__init__()
self.setGeometry(250,250,360,100)
style = """
QMainWindow, QMessageBox{
background-color: qradialgradient(spread:pad, cx:0.5, cy:0.5, radius:0.5, fx:0.5, fy:0.5, stop:0.264865 rgba(121, 185, 255, 255), stop:1 rgba(0, 126, 255, 255));
}
QPushButton{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(255, 255, 255, 107), stop:0.464865 rgba(0, 0, 0, 15));
border:1px solid rgb(0, 170, 255);
padding:5px;
color:#FFF;
border-radius:5px;
}
QPushButton:hover{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(0, 0, 0, 15), stop:0.47 rgba(255, 255, 255, 107));
}
QCheckBox{
color:#FFF;
}
QLineEdit{
background-color:rgba(255, 255, 255, 100);
color:#FFF;
border:1px solid rgb(0,170,255);
border-radius:5px;
padding:3px;
}
QLabel{
color:#FFF;
}
QComboBox{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(255, 255, 255, 107), stop:0.464865 rgba(0, 0, 0, 15));
color:#FFF;
padding:5px;
border:1px solid rgb(0, 170, 255);
border-radius:5px;
}
QComboBox:hover{
background-color: qlineargradient(spread:pad, x1:1, y1:1, x2:1, y2:0, stop:0.448649 rgba(0, 0, 0, 15), stop:0.47 rgba(255, 255, 255, 107));
}
QComboBox::drop-down{
subcontrol-origin: padding;
subcontrol-position: top right;
width:25px;
border-left-width: 1px;
border-left-style: solid;
border-top-right-radius: 5px;
border-bottom-right-radius: 5px;
border-left-color: rgb(0, 170, 255);
}
QComboBox::down-arrow{
border-image: url("./down-arrow.png");
height:30px;
width:30px;
}
"""
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(5)
effect.setOffset(2,2)
self.setStyleSheet(style)
self.setWindowTitle("Exe Generator(py2exe)")
centralWidget = QtGui.QWidget()
layout = QtGui.QGridLayout(centralWidget)
self.foldPath = QtGui.QLineEdit(self)
openBtn = QtGui.QPushButton(self)
openBtn.setGraphicsEffect(effect)
openBtn.setText("Select File")
openBtn.clicked.connect(self.fileBrowser)
pyPathInit = QtGui.QLabel(self)
pyPathInit.setText("Select Python Version")
self.pyPath = QtGui.QComboBox(self)
self.pyPath.activated.connect(self.changePyPath)
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(5)
effect.setOffset(2, 2)
self.pyPath.setGraphicsEffect(effect)
self.checkBox = QtGui.QCheckBox(self)
self.checkBox.setText("Window Mode")
checkBtn = QtGui.QPushButton(self)
checkBtn.clicked.connect(self.createSetup)
checkBtn.setText("Process")
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setBlurRadius(5)
effect.setOffset(2, 2)
checkBtn.setGraphicsEffect(effect)
layout.addWidget(self.foldPath, 0, 0, 1, 2)
layout.addWidget(openBtn, 0, 2, 1, 1)
layout.addWidget(pyPathInit, 1, 0, 1, 1)
layout.addWidget(self.pyPath, 1, 1, 1, 2)
layout.addWidget(self.checkBox, 2, 0, 1, 2)
layout.addWidget(checkBtn, 2, 2, 1, 1)
self.setCentralWidget(centralWidget)
self.getInstalledPy()
def fileBrowser(self):
browse = QtGui.QFileDialog.getOpenFileName(self, "Select File")
self.foldPath.setText(browse[0])
self.foldName = os.path.dirname(browse[0])
self.filePath = browse[0]
def changePyPath(self, index):
self.setPath = self.pyPath.itemText(index)
def getInstalledPy(self):
path = "c:/"
self.pyPath.addItem("Select")
for each in os.listdir(path):
if os.path.isdir(path+each):
if re.search("Python\d", each, re.I):
if os.path.exists(path+each+"/python.exe"):
self.pyPath.addItem(path+each+"/python.exe")
def createSetup(self):
try:
setupFile = self.foldName.replace('\\','/')+"/setup.py"
with open(setupFile, 'w') as fd:
if not self.checkBox.isChecked():
fd.write("from distutils.core import setup\n")
fd.write("import py2exe\n")
fd.write("setup(console =['%s'])"%os.path.basename(self.filePath))
else:
fd.write("from distutils.core import setup\n")
fd.write("import py2exe\n")
fd.write("setup(windows =['%s'])" % os.path.basename(self.filePath))
self.cmdProcess()
shutil.rmtree('%s/build'%self.foldName.replace('\\','/'))
os.rename("dist",os.path.basename(self.filePath).split('.')[0])
self.displayError(parent=self, m="Process done successfully!!!", t="Process Done")
except Exception as e:
self.displayError(parent=self, m="Please Enter all the values\nbefore clicking process button", t="Invalid Values", type=QtGui.QMessageBox.Critical)
def cmdProcess(self):
with open("runBatch.bat", 'w') as fd:
fd.write("@echo off\n")
fd.write("cd %s\n" % self.foldName)
fd.write("%s\n"%self.foldName.replace('\\','/').split("/")[0])
fd.write('%s setup.py py2exe'%self.setPath)
try:
subprocess.call("runBatch.bat", 0, None, None, None, None)
except:
self.displayError(parent=self, m="Python modules were missing in the Python Interpreter\nPlease make sure you had py2exe module", t="Invalid Python Version", type=QtGui.QMessageBox.Critical)
os.remove("runBatch.bat")
def displayError(self, parent, m=None, t="Error found", type=QtGui.QMessageBox.Information, details = ""):
dError = QtGui.QMessageBox(parent)
dError.setText(m)
dError.setWindowTitle(t)
dError.setIcon(type)
dError.setStandardButtons(QtGui.QMessageBox.Ok)
dError.setEscapeButton(QtGui.QMessageBox.Ok)
if details != "":
dError.setDetailedText(details)
dError.show()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
gui = animQt()
gui.show()
sys.exit(app.exec_())
| true
| true
|
79034b377e53dbe27bcfc9791d8775eae11ab645
| 4,532
|
py
|
Python
|
notebook-samples/unsupervised/pred_electricity_consumption.py
|
MarkMoretto/python-examples-main
|
37b8c41d2f175029f4536ca970f037ff19b4e951
|
[
"MIT"
] | 1
|
2020-07-21T23:24:25.000Z
|
2020-07-21T23:24:25.000Z
|
notebook-samples/unsupervised/pred_electricity_consumption.py
|
MarkMoretto/python-examples-main
|
37b8c41d2f175029f4536ca970f037ff19b4e951
|
[
"MIT"
] | 4
|
2021-06-29T00:38:57.000Z
|
2022-01-15T00:22:15.000Z
|
notebook-samples/unsupervised/pred_electricity_consumption.py
|
MarkMoretto/python-examples-main
|
37b8c41d2f175029f4536ca970f037ff19b4e951
|
[
"MIT"
] | null | null | null |
"""
Purpose: Unsupervised learning sampler
Date created: 2020-11-06
Ref repo: https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries
Local folder: C:/Users/Work1/Desktop/Info/GitHub/python-examples-main/notebook-samples/unsupervised
Contributor(s):
Mark M.
"""
import os
from pathlib import Path
# Set local folder if developing/debugging
myuser = os.environ["username"]
PROJECT_FOLDER = Path(rf"C:\Users\{myuser}\Desktop\Info\GitHub\python-examples-main\notebook-samples\unsupervised")
os.chdir(PROJECT_FOLDER)
from UnsupervisedTSRepo import scikit_wrappers
import gc
import zipfile
import requests
from io import BytesIO, StringIO
# Data sci and dat processing imports
import scipy as sp
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import sklearn
from sklearn import cluster
from sklearn import neighbors
import torch
import torch.nn as nn
import torch.optim as optim
pd.set_option("mode.chained_assignment", None)
pd.set_option("display.width", 120)
pd.set_option("display.date_yearfirst", True)
pd.set_option("display.max_colwidth", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.max_info_rows", 10000)
gc.enable()
# Check for CUDA
CUDA_TF: bool = False
if torch.cuda.is_available():
print("Using CUDA...")
CUDA_TF = True
GPU = 0
zip_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
def import_zipfile_data(URL = zip_url):
with requests.Session() as s:
tmp = s.get(URL)
with zipfile.ZipFile(BytesIO(tmp.content)) as zfo:
with zfo.open("household_power_consumption.txt") as zfi:
tmp = StringIO(zfi.read().decode("utf-8"))
data_ = pd.read_csv(tmp, sep=";", decimal=",", header=0, low_memory=False)
del tmp
return data_
data = import_zipfile_data(zip_url)
data.loc[:, "Date"] = pd.to_datetime(data.loc[:, "Date"], yearfirst=True)
data.loc[:, "Time"] = pd.to_datetime(data.loc[:, "Time"], format="%H:%M:%S").dt.time
#dataset = data.transpose(pd.array(data))[2].reshape(1, 1, -1)
# Update missing values with the "last seen" value.
# This probably works better for timeseries than other data
# since order is important here.
dataset = np.transpose(np.array(data))[2].reshape(1, 1, -1)
for idx in range(np.shape(dataset)[2]):
if dataset[0, 0, idx] == "?":
dataset[0, 0, idx] = dataset[0, 0, idx - 1]
dataset = dataset.astype(np.float32)
# Create training and testing sets
train = dataset[:, :, :500000]
test = dataset[:, :, 500000:]
# Normalization
mu_ = np.mean(dataset)
sigma_ = np.std(dataset)
normalize = lambda d, mean, sigma: (d - mean) / sigma
dataset = normalize(dataset, mu_, sigma_)
train = normalize(train, mu_, sigma_)
test = normalize(test, mu_, sigma_)
print(f"Normalized data set metrics:\n\tMean: {np.mean(dataset)}\n\tVariance: {np.var(dataset)}")
# Feature learning
# Train new model?
training = True
model_path = PROJECT_FOLDER.joinpath(r"data\HouseholdPowerConsumption_yearly")
# hyperparams = {
# "batch_size": 1,
# "channels": 30,
# "compared_length": None,
# "depth": 10,
# "nb_steps": 400,
# "in_channels": 1,
# "kernel_size": 3,
# "penalty": None,
# "early_stopping": None,
# "lr": 0.001,
# "nb_random_samples": 10,
# "negative_penalty": 1,
# "out_channels": 160,
# "reduced_size": 80,
# "cuda": CUDA_TF,
# "gpu": GPU
# }
# encoder_yearly = scikit_wrappers.CausalCNNEncoderClassifier()
# encoder_yearly.set_params(**hyperparams)
# if training:
# encoder_yearly.fit_encoder(train, save_memory=True, verbose=True)
# encoder_yearly.save_encoder(model_path.as_posix())
# else:
# encoder_yearly.load_encoder(model_path.as_posix())
torch.cuda.empty_cache()
"""" For local zipfile data
from io import StringIO
with zipfile.ZipFile("household_power_consumption.zip") as zfo:
with zfo.open("household_power_consumption.txt") as zfi:
tmp = StringIO(zfi.read().decode("utf-8"))
data = pd.read_csv(tmp, sep=";", decimal=",", header=0, low_memory=False)
del tmp
"""
"""
import hmac
import pickle
import hashlib
import binascii
def create_sha256_signature(key, message):
byte_key = binascii.unhexlify(key)
message = message.encode()
return hmac.new(byte_key, message, hashlib.sha256).hexdigest().upper()
create_sha256_signature("E49756B4C8FAB4E48222A3E7F3B97CC3", "TEST STRING")
"""
| 23.978836
| 115
| 0.703442
|
import os
from pathlib import Path
myuser = os.environ["username"]
PROJECT_FOLDER = Path(rf"C:\Users\{myuser}\Desktop\Info\GitHub\python-examples-main\notebook-samples\unsupervised")
os.chdir(PROJECT_FOLDER)
from UnsupervisedTSRepo import scikit_wrappers
import gc
import zipfile
import requests
from io import BytesIO, StringIO
import scipy as sp
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import sklearn
from sklearn import cluster
from sklearn import neighbors
import torch
import torch.nn as nn
import torch.optim as optim
pd.set_option("mode.chained_assignment", None)
pd.set_option("display.width", 120)
pd.set_option("display.date_yearfirst", True)
pd.set_option("display.max_colwidth", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.max_info_rows", 10000)
gc.enable()
CUDA_TF: bool = False
if torch.cuda.is_available():
print("Using CUDA...")
CUDA_TF = True
GPU = 0
zip_url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
def import_zipfile_data(URL = zip_url):
with requests.Session() as s:
tmp = s.get(URL)
with zipfile.ZipFile(BytesIO(tmp.content)) as zfo:
with zfo.open("household_power_consumption.txt") as zfi:
tmp = StringIO(zfi.read().decode("utf-8"))
data_ = pd.read_csv(tmp, sep=";", decimal=",", header=0, low_memory=False)
del tmp
return data_
data = import_zipfile_data(zip_url)
data.loc[:, "Date"] = pd.to_datetime(data.loc[:, "Date"], yearfirst=True)
data.loc[:, "Time"] = pd.to_datetime(data.loc[:, "Time"], format="%H:%M:%S").dt.time
dataset = np.transpose(np.array(data))[2].reshape(1, 1, -1)
for idx in range(np.shape(dataset)[2]):
if dataset[0, 0, idx] == "?":
dataset[0, 0, idx] = dataset[0, 0, idx - 1]
dataset = dataset.astype(np.float32)
train = dataset[:, :, :500000]
test = dataset[:, :, 500000:]
mu_ = np.mean(dataset)
sigma_ = np.std(dataset)
normalize = lambda d, mean, sigma: (d - mean) / sigma
dataset = normalize(dataset, mu_, sigma_)
train = normalize(train, mu_, sigma_)
test = normalize(test, mu_, sigma_)
print(f"Normalized data set metrics:\n\tMean: {np.mean(dataset)}\n\tVariance: {np.var(dataset)}")
training = True
model_path = PROJECT_FOLDER.joinpath(r"data\HouseholdPowerConsumption_yearly")
torch.cuda.empty_cache()
| true
| true
|
79034cd6ecd7727ce9e7cf45eacbe59cb3197f5c
| 3,218
|
py
|
Python
|
find_schedule.py
|
jason-sa/Toucans
|
b463817426702eef470c8973102703d71274c235
|
[
"MIT"
] | 1
|
2018-09-27T21:04:08.000Z
|
2018-09-27T21:04:08.000Z
|
1-Benson_Project/find_schedule.py
|
Stitchmaker/Metis_Bootcamp
|
d5ba3b215482cb1044e6b38833068ba93f2852f3
|
[
"MIT"
] | null | null | null |
1-Benson_Project/find_schedule.py
|
Stitchmaker/Metis_Bootcamp
|
d5ba3b215482cb1044e6b38833068ba93f2852f3
|
[
"MIT"
] | 1
|
2018-10-14T01:55:47.000Z
|
2018-10-14T01:55:47.000Z
|
import pandas as pd
import read_mta_turnstile as t
# This function generally generates a schedule for all stations in the df_top.csv file in a pivot table format.
def find_schedule():
# Read the stations with highest Toucan scores and select columns relavant
# to our schedule algorithm
top_stations = pd.read_csv('df_top.csv')
top_stations.rename(columns={'name':'STATION'}, inplace = True)
top_stations1 = top_stations.loc[:,['STATION','toucan_score']]
# Read the turnstile data and select the columns relavant to schedule algorithm
turnstile_data = t.read_mta_turnstile(start='20180501', end='20180531')
turnstile_data1 = turnstile_data.loc[:,['STATION','DATE','TIME','hourly_entries','hourly_exits']]
# Merge the two DataFrames to have hourly entries and exits of stations with top Toucan scores
turnstile_data2 = turnstile_data1.merge(top_stations1, on = 'STATION')
# Format dataframe and give it "day of week" and "hour of day" values and
# aggergate hourly entries of each station by date
schedule = pd.DataFrame(columns = ['STATION', 'hour_of_day', 'day_name', 'hourly_entries'])
agg = turnstile_data1.groupby(['STATION','DATE','TIME'])[['hourly_entries']].sum().reset_index()
agg.DATE = pd.to_datetime(agg.DATE, format='%m/%d/%Y')
agg.TIME = pd.to_datetime(agg.TIME, format='%H:%M:%S')
agg['day_name'] = agg.DATE.dt.day_name()
agg['hour_of_day'] = agg.TIME.dt.hour
# Remove 0, 4, and 20 hours of day. Only want 8:00am, 12:00pm, and 4:00pm
agg = agg[(agg['hour_of_day'] > 5) & (agg['hour_of_day'] < 19 )]
# Segment hours of day into three different shifts: Morning, Afternoon and Evening
l_times = []
for h in agg.hour_of_day:
if int(h) <= 11:
l_times.append('Morning')
elif int(h) >= 15:
l_times.append('Evening')
else:
l_times.append('Afternoon')
agg.hour_of_day = l_times
# For each station in the top station list, this for loop generates a schedule, which identifies
# three shifts with the highest number of entries during the week. Volunteers should be at the station
# at these three shifts.
for station_name in top_stations1.STATION.unique():
# Aggergate each station's hourly entries by day of the week, shifts of the day and
# pivot the DataFrame as shift vs. day
hm = agg.loc[agg.STATION == station_name,['hour_of_day','day_name','hourly_entries']]
hm = hm.groupby(['hour_of_day','day_name'])['hourly_entries'].mean().reset_index()
hm = hm.pivot(index='hour_of_day',columns='day_name',values='hourly_entries')
# Calculate three shifts with highest throughput
sc = hm.stack().nlargest(3).reset_index()
sc.rename(columns={0:'hourly_entries'}, inplace=True)
sc['STATION'] = [station_name]*3
schedule = schedule.append(sc) # This is a schedule for all stations in the top station list.
# Make a pivot table of the schedule
schedule['p'] = [1]*schedule.shape[0]
schedule_pivot = schedule.pivot_table(index=['STATION'],columns=['day_name','hour_of_day'],values='p')
return schedule_pivot
| 51.903226
| 111
| 0.678061
|
import pandas as pd
import read_mta_turnstile as t
def find_schedule():
top_stations = pd.read_csv('df_top.csv')
top_stations.rename(columns={'name':'STATION'}, inplace = True)
top_stations1 = top_stations.loc[:,['STATION','toucan_score']]
turnstile_data = t.read_mta_turnstile(start='20180501', end='20180531')
turnstile_data1 = turnstile_data.loc[:,['STATION','DATE','TIME','hourly_entries','hourly_exits']]
turnstile_data2 = turnstile_data1.merge(top_stations1, on = 'STATION')
schedule = pd.DataFrame(columns = ['STATION', 'hour_of_day', 'day_name', 'hourly_entries'])
agg = turnstile_data1.groupby(['STATION','DATE','TIME'])[['hourly_entries']].sum().reset_index()
agg.DATE = pd.to_datetime(agg.DATE, format='%m/%d/%Y')
agg.TIME = pd.to_datetime(agg.TIME, format='%H:%M:%S')
agg['day_name'] = agg.DATE.dt.day_name()
agg['hour_of_day'] = agg.TIME.dt.hour
agg = agg[(agg['hour_of_day'] > 5) & (agg['hour_of_day'] < 19 )]
l_times = []
for h in agg.hour_of_day:
if int(h) <= 11:
l_times.append('Morning')
elif int(h) >= 15:
l_times.append('Evening')
else:
l_times.append('Afternoon')
agg.hour_of_day = l_times
for station_name in top_stations1.STATION.unique():
# pivot the DataFrame as shift vs. day
hm = agg.loc[agg.STATION == station_name,['hour_of_day','day_name','hourly_entries']]
hm = hm.groupby(['hour_of_day','day_name'])['hourly_entries'].mean().reset_index()
hm = hm.pivot(index='hour_of_day',columns='day_name',values='hourly_entries')
# Calculate three shifts with highest throughput
sc = hm.stack().nlargest(3).reset_index()
sc.rename(columns={0:'hourly_entries'}, inplace=True)
sc['STATION'] = [station_name]*3
schedule = schedule.append(sc) # This is a schedule for all stations in the top station list.
# Make a pivot table of the schedule
schedule['p'] = [1]*schedule.shape[0]
schedule_pivot = schedule.pivot_table(index=['STATION'],columns=['day_name','hour_of_day'],values='p')
return schedule_pivot
| true
| true
|
79034d00af5409d64978ded6af61a9b54a7c4936
| 4,215
|
py
|
Python
|
utils/eval.py
|
Curli-quan/oneshot-medical-landmark
|
572926077fffbe9832aa16baa98bd046ec326700
|
[
"Apache-2.0"
] | 7
|
2021-12-18T17:08:15.000Z
|
2022-03-02T14:08:12.000Z
|
utils/eval.py
|
Curli-quan/oneshot-medical-landmark
|
572926077fffbe9832aa16baa98bd046ec326700
|
[
"Apache-2.0"
] | 2
|
2021-12-19T20:28:22.000Z
|
2021-12-28T05:17:47.000Z
|
utils/eval.py
|
Curli-quan/oneshot-medical-landmark
|
572926077fffbe9832aa16baa98bd046ec326700
|
[
"Apache-2.0"
] | 1
|
2022-03-19T15:08:16.000Z
|
2022-03-19T15:08:16.000Z
|
import numpy as np
from .utils import make_dir
class Evaluater(object):
def __init__(self, logger, size, original_size, tag='paper_figure'):
self.pixel_spaceing = 0.1
self.tag = tag
make_dir(tag)
self.tag += '/'
self.logger = logger
self.scale_rate_y = original_size[0] / size[0]
self.scale_rate_x = original_size[1] / size[1]
self.RE_list = list()
self.recall_radius = [2, 2.5, 3, 4] # 2mm etc
self.recall_rate = list()
self.Attack_RE_list = list()
self.Defend_RE_list = list()
self.dict_Attack = dict()
self.dict_Defend = dict()
self.total_list = dict()
self.mode_list = [0, 1, 2, 3]
self.mode_dict = {0: "Iterative FGSM", 1: "Adaptive Iterative FGSM", \
2: "Adaptive_Rate", 3: "Proposed"}
for mode in self.mode_list:
self.dict_Defend[mode] = dict()
self.dict_Attack[mode] = dict()
self.total_list[mode] = list()
self.best_mre = 100.0
def reset(self):
self.RE_list.clear()
for mode in self.mode_list:
self.dict_Defend[mode] = dict()
self.dict_Attack[mode] = dict()
self.total_list[mode] = list()
self.Attack_RE_list.clear()
self.Defend_RE_list.clear()
def record(self, pred, landmark):
# n = batchsize = 1
# pred : list[ c(y) ; c(x) ]
# landmark: list [ (x , y) * c]
c = pred[0].shape[0]
diff = np.zeros([c, 2], dtype=float) # y, x
for i in range(c):
diff[i][0] = abs(pred[0][i] - landmark[i][1]) * self.scale_rate_y
diff[i][1] = abs(pred[1][i] - landmark[i][0]) * self.scale_rate_x
Radial_Error = np.sqrt(np.power(diff[:, 0], 2) + np.power(diff[:, 1], 2))
Radial_Error *= self.pixel_spaceing
self.RE_list.append(Radial_Error)
# for i in range(len(Radial_Error)):
# if Radial_Error[i] > 10:
# print("Landmark {} RE {}".format(i, Radial_Error[i]))
# if Radial_Error.max() > 10:
# return Radial_Error.argmax()
return None
def record_attack(self, pred, landmark, attack_list, mode=0, iteration=0):
# n = batchsize = 1
# pred : list[ c(y) ; c(x) ]
# landmark: list [ (x , y) * c]
assert (mode in [0, 1, 2, 3])
c = pred[0].shape[0]
diff = np.zeros([c, 2], dtype=float) # y, x
attack_temp = list()
defend_temp = list()
for i in range(c):
diff[i][0] = abs(pred[0][i] - landmark[i][1]) * self.scale_rate_y
diff[i][1] = abs(pred[1][i] - landmark[i][0]) * self.scale_rate_x
Radial_Error = np.sqrt(np.power(diff[i, 0], 2) + np.power(diff[i, 1], 2))
if i in attack_list:
attack_temp.append([i, Radial_Error * self.pixel_spaceing])
else:
defend_temp.append([i, Radial_Error * self.pixel_spaceing])
if iteration not in self.dict_Attack[mode].keys():
self.dict_Attack[mode][iteration] = list()
self.dict_Attack[mode][iteration].append(attack_temp)
if iteration not in self.dict_Defend[mode].keys():
self.dict_Defend[mode][iteration] = list()
self.dict_Defend[mode][iteration].append(defend_temp)
def cal_metrics(self, ex=False):
# calculate MRE SDR
temp = np.array(self.RE_list)
Mean_RE_channel = temp.mean(axis=0)
self.logger.info(Mean_RE_channel)
# with open('./tmp/results.csv', 'w') as f:
# writer = csv.writer(f)
# writer.writerow(Mean_RE_channel.tolist())
mre = Mean_RE_channel.mean()
self.logger.info("ALL MRE {}".format(mre))
for radius in self.recall_radius:
total = temp.size
shot = (temp < radius).sum()
self.logger.info("ALL SDR {}mm {}".format\
(radius, shot * 100 / total))
if ex:
return mre, None
return mre
| 37.300885
| 86
| 0.530724
|
import numpy as np
from .utils import make_dir
class Evaluater(object):
def __init__(self, logger, size, original_size, tag='paper_figure'):
self.pixel_spaceing = 0.1
self.tag = tag
make_dir(tag)
self.tag += '/'
self.logger = logger
self.scale_rate_y = original_size[0] / size[0]
self.scale_rate_x = original_size[1] / size[1]
self.RE_list = list()
self.recall_radius = [2, 2.5, 3, 4]
self.recall_rate = list()
self.Attack_RE_list = list()
self.Defend_RE_list = list()
self.dict_Attack = dict()
self.dict_Defend = dict()
self.total_list = dict()
self.mode_list = [0, 1, 2, 3]
self.mode_dict = {0: "Iterative FGSM", 1: "Adaptive Iterative FGSM", \
2: "Adaptive_Rate", 3: "Proposed"}
for mode in self.mode_list:
self.dict_Defend[mode] = dict()
self.dict_Attack[mode] = dict()
self.total_list[mode] = list()
self.best_mre = 100.0
def reset(self):
self.RE_list.clear()
for mode in self.mode_list:
self.dict_Defend[mode] = dict()
self.dict_Attack[mode] = dict()
self.total_list[mode] = list()
self.Attack_RE_list.clear()
self.Defend_RE_list.clear()
def record(self, pred, landmark):
c = pred[0].shape[0]
diff = np.zeros([c, 2], dtype=float)
for i in range(c):
diff[i][0] = abs(pred[0][i] - landmark[i][1]) * self.scale_rate_y
diff[i][1] = abs(pred[1][i] - landmark[i][0]) * self.scale_rate_x
Radial_Error = np.sqrt(np.power(diff[:, 0], 2) + np.power(diff[:, 1], 2))
Radial_Error *= self.pixel_spaceing
self.RE_list.append(Radial_Error)
return None
def record_attack(self, pred, landmark, attack_list, mode=0, iteration=0):
assert (mode in [0, 1, 2, 3])
c = pred[0].shape[0]
diff = np.zeros([c, 2], dtype=float)
attack_temp = list()
defend_temp = list()
for i in range(c):
diff[i][0] = abs(pred[0][i] - landmark[i][1]) * self.scale_rate_y
diff[i][1] = abs(pred[1][i] - landmark[i][0]) * self.scale_rate_x
Radial_Error = np.sqrt(np.power(diff[i, 0], 2) + np.power(diff[i, 1], 2))
if i in attack_list:
attack_temp.append([i, Radial_Error * self.pixel_spaceing])
else:
defend_temp.append([i, Radial_Error * self.pixel_spaceing])
if iteration not in self.dict_Attack[mode].keys():
self.dict_Attack[mode][iteration] = list()
self.dict_Attack[mode][iteration].append(attack_temp)
if iteration not in self.dict_Defend[mode].keys():
self.dict_Defend[mode][iteration] = list()
self.dict_Defend[mode][iteration].append(defend_temp)
def cal_metrics(self, ex=False):
temp = np.array(self.RE_list)
Mean_RE_channel = temp.mean(axis=0)
self.logger.info(Mean_RE_channel)
mre = Mean_RE_channel.mean()
self.logger.info("ALL MRE {}".format(mre))
for radius in self.recall_radius:
total = temp.size
shot = (temp < radius).sum()
self.logger.info("ALL SDR {}mm {}".format\
(radius, shot * 100 / total))
if ex:
return mre, None
return mre
| true
| true
|
79034d4a700d76f45ff8a416cf194f3fa8e5cc19
| 2,723
|
py
|
Python
|
recstudio/model/seq/hgn.py
|
ustc-recsys/Torchrec
|
4d62ee42018c12961850936cfd8f4f8d3c6a8dbc
|
[
"MIT"
] | 1
|
2021-11-13T12:12:54.000Z
|
2021-11-13T12:12:54.000Z
|
recstudio/model/seq/hgn.py
|
ustc-recsys/Torchrec
|
4d62ee42018c12961850936cfd8f4f8d3c6a8dbc
|
[
"MIT"
] | null | null | null |
recstudio/model/seq/hgn.py
|
ustc-recsys/Torchrec
|
4d62ee42018c12961850936cfd8f4f8d3c6a8dbc
|
[
"MIT"
] | null | null | null |
import torch
from recstudio.ann import sampler
from recstudio.data import dataset
from recstudio.model import basemodel, loss_func, scorer
r"""
HGN
########
Paper Reference:
Chen ma, et al. "HGN: Hierarchical Gating Networks for Sequential Recommendation" in KDD2019.
https://dl.acm.org/doi/abs/10.1145/3292500.3330984
"""
class HGNQueryEncoder(torch.nn.Module):
def __init__(self, fuid, fiid, num_users, embed_dim, max_seq_len, item_encoder, pooling_type='mean') -> None:
super().__init__()
self.fuid = fuid
self.fiid = fiid
self.item_encoder = item_encoder
self.pooling_type = pooling_type
self.user_embedding = torch.nn.Embedding(num_users, embed_dim, 0)
self.W_g_1 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.W_g_2 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.b_g = torch.nn.Parameter(torch.empty(embed_dim), requires_grad=True)
self.w_g_3 = torch.nn.Linear(embed_dim, 1, bias=False)
self.W_g_4 = torch.nn.Linear(embed_dim, max_seq_len)
def forward(self, batch):
U = self.user_embedding(batch[self.fuid])
S = self.item_encoder(batch['in_'+self.fiid])
S_F = S * torch.sigmoid(self.W_g_1(S) + self.W_g_2(U).view(U.size(0), 1, -1) + self.b_g)
weight = torch.sigmoid(self.w_g_3(S_F) + (U@self.W_g_4.weight[:S.size(1)].T).view(U.size(0), -1, 1)) # BxLx1
S_I = S_F * weight
if self.pooling_type == 'mean':
s = S_I.sum(1) / weight.sum(1)
elif self.pooling_type == 'max':
s = torch.max(S_I, dim=1).values
else:
raise ValueError("`pooling_type` only support `avg` and `max`")
query = U + s + S.sum(1)
return query
class HGN(basemodel.BaseRetriever):
r"""HGN proposes a hierarchical gating network, integrated with the Bayesian Personalized Ranking
(BPR) to capture both the long-term and short-term user interests. HGN consists of a feature
gating module, an instance gating module, and an item-item product module."""
def _get_dataset_class(self):
r"""The dataset is SeqDataset."""
return dataset.SeqDataset
def _get_query_encoder(self, train_data):
return HGNQueryEncoder(self.fuid, self.fiid, train_data.num_users, self.embed_dim, \
train_data.config['max_seq_len'], self.item_encoder, self.config['pooling_type'])
def _get_scorer_func(self):
return scorer.InnerProductScorer()
def _get_loss_func(self):
r"""BPR loss is used."""
return loss_func.BPRLoss()
def _get_sampler(self, train_data):
return sampler.UniformSampler(train_data.num_items-1)
| 36.797297
| 120
| 0.662505
|
import torch
from recstudio.ann import sampler
from recstudio.data import dataset
from recstudio.model import basemodel, loss_func, scorer
class HGNQueryEncoder(torch.nn.Module):
def __init__(self, fuid, fiid, num_users, embed_dim, max_seq_len, item_encoder, pooling_type='mean') -> None:
super().__init__()
self.fuid = fuid
self.fiid = fiid
self.item_encoder = item_encoder
self.pooling_type = pooling_type
self.user_embedding = torch.nn.Embedding(num_users, embed_dim, 0)
self.W_g_1 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.W_g_2 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.b_g = torch.nn.Parameter(torch.empty(embed_dim), requires_grad=True)
self.w_g_3 = torch.nn.Linear(embed_dim, 1, bias=False)
self.W_g_4 = torch.nn.Linear(embed_dim, max_seq_len)
def forward(self, batch):
U = self.user_embedding(batch[self.fuid])
S = self.item_encoder(batch['in_'+self.fiid])
S_F = S * torch.sigmoid(self.W_g_1(S) + self.W_g_2(U).view(U.size(0), 1, -1) + self.b_g)
weight = torch.sigmoid(self.w_g_3(S_F) + (U@self.W_g_4.weight[:S.size(1)].T).view(U.size(0), -1, 1))
S_I = S_F * weight
if self.pooling_type == 'mean':
s = S_I.sum(1) / weight.sum(1)
elif self.pooling_type == 'max':
s = torch.max(S_I, dim=1).values
else:
raise ValueError("`pooling_type` only support `avg` and `max`")
query = U + s + S.sum(1)
return query
class HGN(basemodel.BaseRetriever):
def _get_dataset_class(self):
return dataset.SeqDataset
def _get_query_encoder(self, train_data):
return HGNQueryEncoder(self.fuid, self.fiid, train_data.num_users, self.embed_dim, \
train_data.config['max_seq_len'], self.item_encoder, self.config['pooling_type'])
def _get_scorer_func(self):
return scorer.InnerProductScorer()
def _get_loss_func(self):
return loss_func.BPRLoss()
def _get_sampler(self, train_data):
return sampler.UniformSampler(train_data.num_items-1)
| true
| true
|
79034d720a9797a0961ffc28129dfb20eb5e848d
| 668
|
py
|
Python
|
articles/models.py
|
Blaise-design/Django-Hospital-Project
|
30572cef02be343eda50390ca6bd1f239a37d9c1
|
[
"MIT"
] | 2
|
2020-04-22T06:31:18.000Z
|
2020-06-16T05:03:16.000Z
|
articles/models.py
|
Blaise-design/Django-Hospital-Project
|
30572cef02be343eda50390ca6bd1f239a37d9c1
|
[
"MIT"
] | null | null | null |
articles/models.py
|
Blaise-design/Django-Hospital-Project
|
30572cef02be343eda50390ca6bd1f239a37d9c1
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.utils import timezone
# Create your models here.
class Article(models.Model):
title=models.CharField(max_length=100)
slug=models.SlugField(blank=True)
body= models.TextField()
date= models.DateTimeField(default=timezone.now)
thumb=models.ImageField(default='default.jpg',blank=True)
Author= models.ForeignKey(User,default=None,on_delete=models.CASCADE)
#Thumbnails
def __str__(self):
return self.title
def snippets(self):
return self.body[:80] + '...'
| 31.809524
| 77
| 0.685629
|
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.utils import timezone
class Article(models.Model):
title=models.CharField(max_length=100)
slug=models.SlugField(blank=True)
body= models.TextField()
date= models.DateTimeField(default=timezone.now)
thumb=models.ImageField(default='default.jpg',blank=True)
Author= models.ForeignKey(User,default=None,on_delete=models.CASCADE)
def __str__(self):
return self.title
def snippets(self):
return self.body[:80] + '...'
| true
| true
|
79034d78000be728f4c26f055790a32ed78837e8
| 2,676
|
py
|
Python
|
compiler_idioms/idiom/implementations/remainder_signed_todo.py
|
fkie-cad/pidarci
|
7c9ab0af202c675fae3af8f7f27bbde7aa3eea40
|
[
"MIT"
] | null | null | null |
compiler_idioms/idiom/implementations/remainder_signed_todo.py
|
fkie-cad/pidarci
|
7c9ab0af202c675fae3af8f7f27bbde7aa3eea40
|
[
"MIT"
] | null | null | null |
compiler_idioms/idiom/implementations/remainder_signed_todo.py
|
fkie-cad/pidarci
|
7c9ab0af202c675fae3af8f7f27bbde7aa3eea40
|
[
"MIT"
] | null | null | null |
import json
from typing import List, Dict
from icecream import ic
from compiler_idioms.idiom.instruction_sequence import InstructionSequence
from compiler_idioms.idiom.utils.magic import compute_magic_numbers_if_not_exists
from compiler_idioms.instruction import from_anonymized_pattern, Instruction
from compiler_idioms.match import Match
from config import TEST_DIR, ROOT
#TEST_PATTERN_PATH = TEST_DIR / "mods-pointer.json"
TEST_PATTERN_PATH = TEST_DIR / "patterns-mods-O0.json"
PATTERN_DIR = ROOT / 'patterns'
HEX_BASE = 16
class SignedRemainderInstructionSequence(InstructionSequence):
def __init__(self):
sequences = self._load_sequences_from_file()
# with TEST_PATTERN_PATH.open('r') as f:
# seq = json.load(f)
# print(seq)
# sequences = [from_anonymized_pattern(seq['pattern'])]
self.magic_table = compute_magic_numbers_if_not_exists()
super().__init__(sequences)
def search(self, sequence: List[Instruction], original_constants: Dict[str, str], original_registers: Dict[str, str]) -> Match:
if match := super().search(sequence, original_constants, original_registers):
match.operation = "modulo"
match.operand = self._get_register_operand(original_registers)
match.constant = self._get_original_constant_from_magic(original_constants)
if not match.constant:
return None
return match
def _get_register_operand(self, original_registers: Dict[str, str]):
return original_registers.get("reg_1", [])
def _get_original_constant_from_magic(self, original_constants: Dict[str, str]) -> int:
magic = int(original_constants.get("const_0"), HEX_BASE)
power = int(original_constants.get("const_1"), HEX_BASE) + int(original_constants.get("const_2"), HEX_BASE)
return self.magic_table.get((magic, power))
@staticmethod
def _load_sequences_from_file():
sequences = []
for patter_file in PATTERN_DIR.glob("*mods*"):
try:
with patter_file.open("r") as f:
data = json.load(f)
for seq in data:
pattern = seq.get("sequence")
anonymized_instruction_list = from_anonymized_pattern(pattern)
if anonymized_instruction_list:
sequences.append(anonymized_instruction_list)
except FileNotFoundError as e:
print("No file for division found")
return sequences
if __name__ == "__main__":
idiom = SignedRemainderInstructionSequence()
print(idiom.magic_table)
| 40.545455
| 131
| 0.67713
|
import json
from typing import List, Dict
from icecream import ic
from compiler_idioms.idiom.instruction_sequence import InstructionSequence
from compiler_idioms.idiom.utils.magic import compute_magic_numbers_if_not_exists
from compiler_idioms.instruction import from_anonymized_pattern, Instruction
from compiler_idioms.match import Match
from config import TEST_DIR, ROOT
TEST_PATTERN_PATH = TEST_DIR / "patterns-mods-O0.json"
PATTERN_DIR = ROOT / 'patterns'
HEX_BASE = 16
class SignedRemainderInstructionSequence(InstructionSequence):
def __init__(self):
sequences = self._load_sequences_from_file()
self.magic_table = compute_magic_numbers_if_not_exists()
super().__init__(sequences)
def search(self, sequence: List[Instruction], original_constants: Dict[str, str], original_registers: Dict[str, str]) -> Match:
if match := super().search(sequence, original_constants, original_registers):
match.operation = "modulo"
match.operand = self._get_register_operand(original_registers)
match.constant = self._get_original_constant_from_magic(original_constants)
if not match.constant:
return None
return match
def _get_register_operand(self, original_registers: Dict[str, str]):
return original_registers.get("reg_1", [])
def _get_original_constant_from_magic(self, original_constants: Dict[str, str]) -> int:
magic = int(original_constants.get("const_0"), HEX_BASE)
power = int(original_constants.get("const_1"), HEX_BASE) + int(original_constants.get("const_2"), HEX_BASE)
return self.magic_table.get((magic, power))
@staticmethod
def _load_sequences_from_file():
sequences = []
for patter_file in PATTERN_DIR.glob("*mods*"):
try:
with patter_file.open("r") as f:
data = json.load(f)
for seq in data:
pattern = seq.get("sequence")
anonymized_instruction_list = from_anonymized_pattern(pattern)
if anonymized_instruction_list:
sequences.append(anonymized_instruction_list)
except FileNotFoundError as e:
print("No file for division found")
return sequences
if __name__ == "__main__":
idiom = SignedRemainderInstructionSequence()
print(idiom.magic_table)
| true
| true
|
79034e703aa4db0e2d015ab307a41a96377bd4b5
| 6,173
|
py
|
Python
|
wbml/data/data.py
|
wesselb/wbml
|
06bf71777ab9a75ef71845f95f91755626b37ddf
|
[
"MIT"
] | 4
|
2019-12-04T11:30:34.000Z
|
2022-02-24T09:16:28.000Z
|
wbml/data/data.py
|
wesselb/wbml
|
06bf71777ab9a75ef71845f95f91755626b37ddf
|
[
"MIT"
] | null | null | null |
wbml/data/data.py
|
wesselb/wbml
|
06bf71777ab9a75ef71845f95f91755626b37ddf
|
[
"MIT"
] | 1
|
2018-10-14T13:10:39.000Z
|
2018-10-14T13:10:39.000Z
|
import datetime
import os
import shutil
import subprocess
import urllib.request
from contextlib import closing
import numpy as np
import pandas as pd
import requests
import wbml.out
__all__ = [
"DependencyError",
"resource",
"dependency",
"asserted_dependency",
"split_df",
"data_path",
"date_to_decimal_year",
]
class DependencyError(AssertionError):
"""Exception raised in case of an erroneous dependency."""
def resource(target, url, post=False, **kw_args):
"""Specify a dependency on an online resource.
Further takes in keyword arguments that are passed to the appropriate method
from :mod:`requests` or :mod:`urllib`.
Args:
target (str): Target file.
url (str): Source URL.
post (bool, optional): Make a POST request instead of a GET request.
Only applicable if the URL starts with "http" or "https". Defaults
to `False`.
"""
if not os.path.exists(target):
with wbml.out.Section("Downloading file"):
wbml.out.kv("Source", url)
wbml.out.kv("Target", target)
# Ensure that all directories in the path exist.
make_dirs(target)
# If the URL starts with "ftp", use the :mod:`urllib` library.
if url.startswith("ftp"):
with closing(urllib.request.urlopen(url, **kw_args)) as r:
with open(target, "wb") as f:
shutil.copyfileobj(r, f)
# By default, use the :mod:`requests` library.
else:
request = requests.post if post else requests.get
with request(url, stream=True, **kw_args) as r:
with open(target, "wb") as f:
shutil.copyfileobj(r.raw, f)
def dependency(target, source, commands):
"""Specify a dependency that is generated from an existing file.
Args:
target (str): Target file.
source (str): Source file.
commands (list[str]): List of commands to generate target file.
"""
if not os.path.exists(target):
with wbml.out.Section("Generating file"):
wbml.out.kv("Source", source)
wbml.out.kv("Target", target)
# Check that the source exists.
if not os.path.exists(source):
raise DependencyError(
f'Source "{source}" asserted to exist, but it does not.'
)
# Save current working directory.
current_wd = os.getcwd()
# Ensure that all directories in the path exist.
make_dirs(target)
# Perform commands.
for command in commands:
wbml.out.out(command)
# Change working directory to directory of target file, run
# command, and restore working directory afterwards.
os.chdir(os.path.dirname(target))
subprocess.call(command, shell=True)
os.chdir(current_wd)
def asserted_dependency(target):
"""Specify a dependency that cannot be fetched.
Args:
target (str): Target file.
"""
if not os.path.exists(target):
raise DependencyError(
f'Dependency "{target}" is asserted to exist, '
f"but it does not, and it cannot be "
f"automatically fetched. Please put the file "
f"into place manually."
)
def make_dirs(path):
"""Make the directories in the path of a file.
Args:
path (url): Path of a file.
"""
os.makedirs(os.path.dirname(path), exist_ok=True)
def data_path(*xs):
"""Get the path of a data file.
Args:
*xs (str): Parts of the path.
Returns:
str: Absolute path.
"""
return os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "data", *xs)
)
def split_df(df, index_range, columns, iloc=False):
"""Split a data frame by selecting from columns a particular range.
Args:
df (:class:`pd.DataFrame`): Data frame to split.
index_range (tuple): Tuple containing lower and upper limit of the
range to split the index by. If `index_range = (a, b)`, then
`[a, b)` is taken.
columns (list[object]): Columns to select.
iloc (bool, optional): The index range is the integer location instead
of the index value. Defaults to `False`.
Returns:
tuple[:class:`pd.DataFrame`]: Selected rows from selected columns
and the remainder.
"""
if iloc:
inds = np.arange(df.shape[0])
rows = (inds >= index_range[0]) & (inds < index_range[1])
else:
rows = (df.index >= index_range[0]) & (df.index < index_range[1])
selected = pd.DataFrame([df[name][rows] for name in columns]).T
remainder = pd.DataFrame(
[df[name][~rows] for name in columns]
+ [df[name] for name in set(df.columns) - set(columns)]
).T
# Fix order of columns.
selected_inds = [i for i, c in enumerate(df.columns) if c in columns]
selected = selected.reindex(df.columns[np.array(selected_inds)], axis=1)
remainder = remainder.reindex(df.columns, axis=1)
return selected, remainder
def date_to_decimal_year(date, format=None):
"""Convert a date to decimal year.
Args:
date (str): Date as a string.
format (str, optional): Format of the date if a conversion is needed.
Returns:
float: Decimal year corresponding to the date.
"""
if format:
date = datetime.datetime.strptime(date, format)
start = datetime.date(date.year, 1, 1).toordinal()
year_length = datetime.date(date.year + 1, 1, 1).toordinal() - start
# Account for subday time.
subday_time = 0
if hasattr(date, "hour"):
subday_time += date.hour / year_length / 24
if hasattr(date, "minute"):
subday_time += date.minute / year_length / 24 / 60
if hasattr(date, "second"):
subday_time += date.second / year_length / 24 / 60 / 60
return date.year + float(date.toordinal() - start) / year_length + subday_time
| 31.176768
| 82
| 0.599708
|
import datetime
import os
import shutil
import subprocess
import urllib.request
from contextlib import closing
import numpy as np
import pandas as pd
import requests
import wbml.out
__all__ = [
"DependencyError",
"resource",
"dependency",
"asserted_dependency",
"split_df",
"data_path",
"date_to_decimal_year",
]
class DependencyError(AssertionError):
def resource(target, url, post=False, **kw_args):
if not os.path.exists(target):
with wbml.out.Section("Downloading file"):
wbml.out.kv("Source", url)
wbml.out.kv("Target", target)
make_dirs(target)
if url.startswith("ftp"):
with closing(urllib.request.urlopen(url, **kw_args)) as r:
with open(target, "wb") as f:
shutil.copyfileobj(r, f)
else:
request = requests.post if post else requests.get
with request(url, stream=True, **kw_args) as r:
with open(target, "wb") as f:
shutil.copyfileobj(r.raw, f)
def dependency(target, source, commands):
if not os.path.exists(target):
with wbml.out.Section("Generating file"):
wbml.out.kv("Source", source)
wbml.out.kv("Target", target)
if not os.path.exists(source):
raise DependencyError(
f'Source "{source}" asserted to exist, but it does not.'
)
current_wd = os.getcwd()
make_dirs(target)
for command in commands:
wbml.out.out(command)
os.chdir(os.path.dirname(target))
subprocess.call(command, shell=True)
os.chdir(current_wd)
def asserted_dependency(target):
if not os.path.exists(target):
raise DependencyError(
f'Dependency "{target}" is asserted to exist, '
f"but it does not, and it cannot be "
f"automatically fetched. Please put the file "
f"into place manually."
)
def make_dirs(path):
os.makedirs(os.path.dirname(path), exist_ok=True)
def data_path(*xs):
return os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "data", *xs)
)
def split_df(df, index_range, columns, iloc=False):
if iloc:
inds = np.arange(df.shape[0])
rows = (inds >= index_range[0]) & (inds < index_range[1])
else:
rows = (df.index >= index_range[0]) & (df.index < index_range[1])
selected = pd.DataFrame([df[name][rows] for name in columns]).T
remainder = pd.DataFrame(
[df[name][~rows] for name in columns]
+ [df[name] for name in set(df.columns) - set(columns)]
).T
selected_inds = [i for i, c in enumerate(df.columns) if c in columns]
selected = selected.reindex(df.columns[np.array(selected_inds)], axis=1)
remainder = remainder.reindex(df.columns, axis=1)
return selected, remainder
def date_to_decimal_year(date, format=None):
if format:
date = datetime.datetime.strptime(date, format)
start = datetime.date(date.year, 1, 1).toordinal()
year_length = datetime.date(date.year + 1, 1, 1).toordinal() - start
subday_time = 0
if hasattr(date, "hour"):
subday_time += date.hour / year_length / 24
if hasattr(date, "minute"):
subday_time += date.minute / year_length / 24 / 60
if hasattr(date, "second"):
subday_time += date.second / year_length / 24 / 60 / 60
return date.year + float(date.toordinal() - start) / year_length + subday_time
| true
| true
|
79034ec9623865f932bf2486fb45c24b26b52d42
| 2,208
|
py
|
Python
|
leetcode.com/python/98_Validate_Binary_Search_Tree.py
|
XSoyOscar/Algorithms
|
6e1626d4b0f7804494f0a651698966ad6fd0fe18
|
[
"MIT"
] | 713
|
2019-11-19T16:11:25.000Z
|
2022-03-31T02:27:52.000Z
|
leetcode.com/python/98_Validate_Binary_Search_Tree.py
|
arunsank/coding-interview-gym
|
8131e3a82795707e144fe55d765b6c15bdb97306
|
[
"MIT"
] | 7
|
2020-01-16T17:07:18.000Z
|
2021-11-15T18:24:39.000Z
|
leetcode.com/python/98_Validate_Binary_Search_Tree.py
|
arunsank/coding-interview-gym
|
8131e3a82795707e144fe55d765b6c15bdb97306
|
[
"MIT"
] | 393
|
2019-11-18T17:55:45.000Z
|
2022-03-28T20:26:32.000Z
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BST:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
# Average: O(log(n)) time | O(1) space
# Worst: O(n) time | O(1) space
def insert(self, val):
currentNode = self
while True:
if val < currentNode.val:
if currentNode.left is None:
currentNode.left = BST(val)
break
else:
currentNode = currentNode.left
else:
if currentNode.right is None:
currentNode.right = BST(val)
break
else:
currentNode = currentNode.right
return self
import sys
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
MAX = sys.maxint
MIN = -sys.maxint - 1
return self.isValidBSTHelper(root, MIN, MAX)
def isValidBSTHelper(self, root, minValue, maxValue):
if root is None:
return True
if root.left is None and root.right is None:
return minValue < root.val < maxValue
if root.val <= minValue or root.val >= maxValue:
return False
leftSubtreeIsValid = self.isValidBSTHelper(root.left, minValue, root.val)
rightSubtreeIsValid = self.isValidBSTHelper(root.right, root.val, maxValue)
return leftSubtreeIsValid and rightSubtreeIsValid
# driver/test code
# test_tree = BST(100).insert(5).insert(15).insert(5).insert(2).insert(1).insert(22) \
# .insert(1).insert(1).insert(3).insert(1).insert(1).insert(502).insert(55000) \
# .insert(204).insert(205).insert(207).insert(206).insert(208).insert(203) \
# .insert(-51).insert(-403).insert(1001).insert(57).insert(60).insert(4500)
test_tree = BST(2).insert(1).insert(4).insert(None).insert(None).insert(3).insert(6)
sol = Solution()
is_valid_bst = sol.isValidBST(test_tree)
print("Is BST valid ? - ", is_valid_bst)
| 30.666667
| 86
| 0.578804
|
class BST:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def insert(self, val):
currentNode = self
while True:
if val < currentNode.val:
if currentNode.left is None:
currentNode.left = BST(val)
break
else:
currentNode = currentNode.left
else:
if currentNode.right is None:
currentNode.right = BST(val)
break
else:
currentNode = currentNode.right
return self
import sys
class Solution(object):
def isValidBST(self, root):
MAX = sys.maxint
MIN = -sys.maxint - 1
return self.isValidBSTHelper(root, MIN, MAX)
def isValidBSTHelper(self, root, minValue, maxValue):
if root is None:
return True
if root.left is None and root.right is None:
return minValue < root.val < maxValue
if root.val <= minValue or root.val >= maxValue:
return False
leftSubtreeIsValid = self.isValidBSTHelper(root.left, minValue, root.val)
rightSubtreeIsValid = self.isValidBSTHelper(root.right, root.val, maxValue)
return leftSubtreeIsValid and rightSubtreeIsValid
test_tree = BST(2).insert(1).insert(4).insert(None).insert(None).insert(3).insert(6)
sol = Solution()
is_valid_bst = sol.isValidBST(test_tree)
print("Is BST valid ? - ", is_valid_bst)
| true
| true
|
79034f6c6c9dc438a9c2515124b624638a9505d1
| 19,318
|
py
|
Python
|
veracode_api_py/api.py
|
DaYuM/veracode-api-py
|
12965d8919d9a7752398e7cd19bdcc4a81bc3c9e
|
[
"MIT"
] | null | null | null |
veracode_api_py/api.py
|
DaYuM/veracode-api-py
|
12965d8919d9a7752398e7cd19bdcc4a81bc3c9e
|
[
"MIT"
] | null | null | null |
veracode_api_py/api.py
|
DaYuM/veracode-api-py
|
12965d8919d9a7752398e7cd19bdcc4a81bc3c9e
|
[
"MIT"
] | null | null | null |
# Purpose: API utilities
#
# Notes: API credentials must be enabled on Veracode account and placed in ~/.veracode/credentials like
#
# [default]
# veracode_api_key_id = <YOUR_API_KEY_ID>
# veracode_api_key_secret = <YOUR_API_KEY_SECRET>
#
# and file permission set appropriately (chmod 600)
import requests
import logging
from requests.adapters import HTTPAdapter
from typing import List
from veracode_api_signing.exceptions import VeracodeAPISigningException
from veracode_api_signing.plugin_requests import RequestsAuthPluginVeracodeHMAC
from .constants import Constants
from .exceptions import VeracodeAPIError
from .applications import Applications, Sandboxes, CustomFields
from .findings import Findings, SummaryReport
from .policy import Policies
from .sca import ComponentActivity, Workspaces
from .collections import Collections
from .identity import Users, Teams, BusinessUnits, APICredentials, Roles
from .healthcheck import Healthcheck
from .dynamic import Analyses, Scans, Occurrences, Configuration, CodeGroups, ScanCapacitySummary, ScanOccurrences, ScannerVariables, DynUtils
from .xmlapi import XMLAPI
class VeracodeAPI:
def __init__(self, proxies=None):
self.baseurl = 'https://analysiscenter.veracode.com/api'
requests.Session().mount(self.baseurl, HTTPAdapter(max_retries=3))
self.proxies = proxies
self.retry_seconds = 120
self.connect_error_msg = "Connection Error"
#xml apis
def get_app_list(self):
return XMLAPI().get_app_list()
def get_app_info(self, app_id):
return XMLAPI().get_app_info(app_id)
def get_sandbox_list(self, app_id):
return XMLAPI().get_sandbox_list(app_id)
def get_build_list(self, app_id, sandbox_id=None):
return XMLAPI().get_build_list(app_id, sandbox_id)
def get_build_info(self, app_id, build_id=None, sandbox_id=None):
return XMLAPI().get_build_info(app_id,build_id,sandbox_id)
def get_detailed_report(self, build_id):
return XMLAPI().get_detailed_report(build_id)
def set_mitigation_info(self,build_id,flaw_id_list,action,comment):
return XMLAPI().set_mitigation_info(build_id,flaw_id_list,action,comment)
def generate_archer(self,payload):
return XMLAPI().generate_archer(payload)
def download_archer(self, token=None):
return XMLAPI().download_archer(token)
# rest apis
## Healthcheck APIs
def healthcheck(self):
return Healthcheck().healthcheck()
def status(self):
return Healthcheck().status()
## Application and Sandbox APIs
def get_apps(self):
return Applications().get_all()
def get_app (self,guid=None,legacy_id=None):
return Applications().get(guid,legacy_id)
def get_app_by_name (self,appname):
return Applications().get_by_name(appname)
def create_app(self,app_name,business_criticality, business_unit=None, teams=[]):
return Applications().create(app_name,business_criticality,business_unit,teams)
def delete_app (self,guid):
return Applications().delete(guid)
def get_custom_fields (self):
return CustomFields().get_all()
def get_app_sandboxes (self,guid):
return Sandboxes().get_all(guid)
def create_sandbox (self, app, name, auto_recreate=False, custom_fields=[]):
return Sandboxes().create(app,name,auto_recreate,custom_fields)
def update_sandbox (self, app, sandbox, name, auto_recreate=False, custom_fields=[]):
return Sandboxes().update(app,sandbox,name,auto_recreate,custom_fields)
def delete_sandbox (self, app, sandbox):
return Sandboxes().delete(app,sandbox)
# Policy APIs
def get_policies (self):
return Policies().get_all()
def get_policy (self,guid):
return Policies().get(guid)
def create_policy(self, name, description, vendor_policy=False, finding_rules=[], scan_frequency_rules=[], grace_periods={}):
return Policies().create(name, description, vendor_policy, finding_rules, scan_frequency_rules, grace_periods)
def delete_policy (self,guid):
return Policies().delete(guid)
def update_policy(self, guid, name, description, vendor_policy=False, finding_rules=[], scan_frequency_rules=[], grace_periods={}):
return Policies().update(guid, name, description, vendor_policy, finding_rules, scan_frequency_rules, grace_periods)
# Findings and Reporting APIs
def get_findings(self,app,scantype='STATIC',annot='TRUE',request_params=None,sandbox=None):
return Findings().get_findings(app,scantype,annot,request_params,sandbox)
def get_static_flaw_info(self,app,issueid,sandbox=None):
return Findings().get_static_flaw_info(app,issueid,sandbox)
def get_dynamic_flaw_info(self,app,issueid):
return Findings().get_dynamic_flaw_info(app,issueid)
def get_summary_report(self,app,sandbox=None):
return SummaryReport().get_summary_report(app,sandbox)
def add_annotation(self,app,issue_list,comment,action,sandbox=None):
return Findings().add_annotation(app,issue_list,comment,action,sandbox)
def match_findings(self,origin_finding,potential_matches,approved_findings_only=True):
return Findings().match(origin_finding,potential_matches,approved_findings_only)
## Collections APIs
def get_collections(self):
return Collections().get_all()
def get_collections_by_name(self,collection_name):
return Collections().get_by_name(collection_name)
def get_collections_by_business_unit(self,business_unit_name):
return Collections().get_by_business_unit(business_unit_name)
def get_collections_statistics(self):
return Collections().get_statistics()
def get_collection(self,guid):
return Collections().get(guid)
def get_collection_assets(self,guid):
return Collections().get_assets(guid)
def create_collection(self,name,description="",tags='',business_unit_guid=None,custom_fields=[],assets=[]):
return Collections().create(name,description,tags,business_unit_guid,custom_fields,assets)
def update_collection(self,guid,name,description="",tags="",business_unit_guid=None,custom_fields=[],assets=[]):
return Collections().update(name,description,tags,business_unit_guid,custom_fields,assets)
def delete_collection(self,guid):
return Collections().delete(guid)
## Identity APIs
def get_users(self):
return Users().get_all()
def get_user_self (self):
return Users().get_self()
def get_user(self,user_guid):
return Users().get(user_guid)
def get_user_by_name(self,username):
return Users().get_by_name(username)
def get_user_by_search(self, search_term=None, api_id=None, role_id=None, login_status=None, saml_user=None, team_id=None, detailed=False, user_type=None, request_params=None):
return Users().get_user_search(search_term,api_id,role_id,login_status,saml_user,team_id,detailed,user_type,request_params)
def create_user (self,email,firstname,lastname,username=None,type="HUMAN",roles=[],teams=[],mfa=False):
return Users().create(email,firstname,lastname,username,type,roles,teams,mfa=mfa)
def update_user_roles (self,user_guid,roles):
return Users().update_roles(user_guid,roles)
def update_user (self,user_guid,changes):
return Users().update(user_guid,changes)
def update_user_email_address (self,user_guid,email_address,ignore_verification=False):
return Users().update_email_address(user_guid,email_address,ignore_verification)
def send_password_reset (self,user_legacy_id):
return Users().reset_password(user_legacy_id)
def disable_user (self,user_guid):
return Users().disable(user_guid)
def delete_user (self,user_guid):
return Users().delete(user_guid)
def get_teams (self, all_for_org=False):
return Teams().get_all()
def create_team (self, team_name, business_unit=None, members=[]):
return Teams().create(team_name,business_unit,members)
def update_team (self, team_guid, team_name="", business_unit=None, members=[]):
return Teams().update(team_guid,team_name,business_unit,members)
def delete_team (self, team_guid):
return Teams().delete(team_guid)
def get_business_units (self):
return BusinessUnits().get_all()
def get_business_unit (self, guid):
return BusinessUnits().get(guid)
def create_business_unit (self, name, teams=[]):
return BusinessUnits().create(name,teams)
def update_business_unit (self, guid, name='', teams=[]):
return BusinessUnits().update(guid,name,teams)
def delete_business_unit (self, guid):
return BusinessUnits().delete(guid)
def get_creds (self,api_id=None):
if api_id != None:
return APICredentials().get(api_id)
else:
return APICredentials().get_self()
def renew_creds (self):
return APICredentials().renew()
def revoke_creds (self, api_id):
return APICredentials().revoke(api_id)
def get_roles (self):
return Roles().get_all()
## SCA APIs - note must be human user to use these, not API user
def get_workspaces(self):
return Workspaces().get_all()
def get_workspace_by_name(self,name):
return Workspaces().get_by_name(name)
def create_workspace(self,name):
return Workspaces().create(name)
def add_workspace_team(self,workspace_guid,team_id):
return Workspaces().add_team(workspace_guid,team_id)
def delete_workspace(self,workspace_guid):
return Workspaces().delete(workspace_guid)
def get_projects(self,workspace_guid):
return Workspaces().get_projects(workspace_guid)
def get_project(self,workspace_guid,project_guid):
return Workspaces().get_project(workspace_guid,project_guid)
def get_project_issues(self,workspace_guid,project_guid):
return Workspaces().get_project_issues(workspace_guid,project_guid)
def get_project_libraries(self,workspace_guid,project_guid):
return Workspaces().get_project_libraries(workspace_guid,project_guid)
def get_agents(self,workspace_guid):
return Workspaces().get_agents(workspace_guid)
def get_agent(self,workspace_guid,agent_guid):
return Workspaces().get_agent(workspace_guid,agent_guid)
def create_agent(self,workspace_guid,name,agent_type='CLI'):
return Workspaces().create_agent(workspace_guid,name,agent_type)
def get_agent_tokens(self,workspace_guid,agent_guid):
return Workspaces().get_agent_tokens(workspace_guid,agent_guid)
def get_agent_token(self,workspace_guid,agent_guid,token_id):
return Workspaces().get_agent_token(workspace_guid,agent_guid,token_id)
def regenerate_agent_token(self,workspace_guid,agent_guid):
return Workspaces().regenerate_agent_token(workspace_guid,agent_guid)
def revoke_agent_token(self,workspace_guid,agent_guid,token_id):
return Workspaces().revoke_agent_token(workspace_guid,agent_guid,token_id)
def get_issues(self,workspace_guid):
return Workspaces().get_issues(workspace_guid)
def get_issue(self,issue_id):
return Workspaces().get_issues(issue_id)
def get_libraries(self,workspace_guid,unmatched=False):
return Workspaces().get_libraries(workspace_guid, unmatched)
def get_library(self,library_id):
return Workspaces().get_library(library_id)
def get_vulnerability(self,vulnerability_id):
return Workspaces().get_vulnerability(vulnerability_id)
def get_license(self,license_id):
return Workspaces().get_license(license_id)
def get_sca_events(self,date_gte=None,event_group=None,event_type=None):
return Workspaces().get_events(date_gte,event_group,event_type)
def get_sca_scan(self,scan_id):
return Workspaces().get_scan(scan_id)
def get_component_activity(self,component_id):
return ComponentActivity().get(component_id)
#dynamic APIs
def get_analyses(self):
return Analyses().get_all()
def get_analyses_by_name(self,name):
return Analyses().get_by_name(analysis_name=name)
def get_analyses_by_target_url(self,url):
return Analyses().get_by_target_url(target_url=url)
def get_analyses_by_search_term(self,search_term):
return Analyses().get_by_search_term(search_term=search_term)
def get_analysis(self,analysis_id):
return Analyses().get(guid=analysis_id)
def get_analysis_audits(self,analysis_id):
return Analyses().get_audits(guid=analysis_id)
def get_analysis_scans(self,analysis_id):
return Analyses().get_scans(guid=analysis_id)
def get_analysis_scanner_variables(self,analysis_id):
return Analyses().get_scanner_variables(guid=analysis_id)
def create_analysis(self,name,scans,schedule_frequency='ONCE',business_unit_guid=None,email=None,owner=None):
return Analyses().create(name,scans,schedule_frequency,business_unit_guid,email,owner)
def update_analysis(self,guid,name,scans,schedule_frequency='ONCE',business_unit_guid=None,email=None,owner=None):
return Analyses().update(guid,name,scans,schedule_frequency,business_unit_guid,email,owner)
def update_analysis_scanner_variable(self,analysis_guid,scanner_variable_guid,reference_key,value,description):
return Analyses().update_scanner_variable(analysis_guid,scanner_variable_guid,reference_key,value,description)
def delete_analysis_scanner_variable(self,analysis_guid,scanner_variable_guid):
return Analyses().delete_scanner_variable(analysis_guid,scanner_variable_guid)
def delete_analysis(self,analysis_guid):
return Analyses().delete(guid=analysis_guid)
def get_dyn_scan(self,scan_guid):
return Scans().get(guid=scan_guid)
def get_dyn_scan_audits(self,scan_guid):
return Scans().get_audits(guid=scan_guid)
def get_dyn_scan_config(self,scan_guid):
return Scans().get_configuration(guid=scan_guid)
def update_dyn_scan(self,scan_guid,scan):
return Scans().update(guid=scan_guid,scan=scan)
def delete_dyn_scan(self,scan_guid):
return Scans().delete(guid=scan_guid)
def get_scan_scanner_variables(self,scan_id):
return Scans().get_scanner_variables(guid=scan_id)
def update_scan_scanner_variable(self,scan_guid,scanner_variable_guid,reference_key,value,description):
return Scans().update_scanner_variable(scan_guid,scanner_variable_guid,reference_key,value,description)
def delete_scan_scanner_variable(self,scan_guid,scanner_variable_guid):
return Scans().delete_scanner_variable(scan_guid,scanner_variable_guid)
def get_analysis_occurrences(self):
return Occurrences().get_all()
def get_analysis_occurrence(self,occurrence_guid):
return Occurrences().get(guid=occurrence_guid)
def stop_analysis_occurrence(self,occurrence_guid,save_or_delete):
return Occurrences().stop(guid=occurrence_guid,save_or_delete=save_or_delete)
def get_scan_occurrences(self,occurrence_guid):
return Occurrences().get_scan_occurrences(guid=occurrence_guid)
def get_scan_occurrence(self,scan_occ_guid):
return ScanOccurrences().get(guid=scan_occ_guid)
def stop_scan_occurrence(self,scan_occ_guid,save_or_delete):
return ScanOccurrences().stop(guid=scan_occ_guid, save_or_delete=save_or_delete)
def get_scan_occurrence_configuration(self,scan_occ_guid):
return ScanOccurrences().get_configuration(guid=scan_occ_guid)
def get_scan_occurrence_verification_report(self,scan_occ_guid):
return ScanOccurrences().get_verification_report(guid=scan_occ_guid)
def get_scan_occurrence_notes_report(self,scan_occ_guid):
return ScanOccurrences().get_scan_notes_report(guid=scan_occ_guid)
def get_scan_occurrence_screenshots(self,scan_occ_guid):
return ScanOccurrences().get_screenshots(guid=scan_occ_guid)
def get_codegroups(self):
return CodeGroups().get_all()
def get_codegroup(self,name):
return CodeGroups().get(name=name)
def get_dynamic_configuration(self):
return Configuration().get()
def get_dynamic_scan_capacity_summary(self):
return ScanCapacitySummary().get()
def get_global_scanner_variables(self):
return ScannerVariables().get_all()
def get_global_scanner_variable(self,guid):
return ScannerVariables().get(guid)
def create_global_scanner_variable(self,reference_key,value,description):
return ScannerVariables().create(reference_key,value,description)
def update_global_scanner_variable(self,guid,reference_key,value,description):
return ScannerVariables().update(guid,reference_key,value,description)
def delete_global_scanner_variable(self,guid):
return ScannerVariables().delete(guid)
def dyn_setup_user_agent(self,custom_header,type):
return DynUtils().setup_user_agent(custom_header,type)
def dyn_setup_custom_host(self,host_name,ip_address):
return DynUtils().setup_custom_host(host_name,ip_address)
def dyn_setup_blocklist(self, urls:List):
return DynUtils().setup_blocklist(urls)
def dyn_setup_url(self,url,directory_restriction_type='DIRECTORY_AND_SUBDIRECTORY',http_and_https=True):
return DynUtils().setup_url(url,directory_restriction_type,http_and_https)
def dyn_setup_scan_setting(self,blocklist_configs:list,custom_hosts:List, user_agent:None):
return DynUtils().setup_scan_setting(blocklist_configs,custom_hosts,user_agent)
def dyn_setup_scan_contact_info(self,email,first_and_last_name,telephone):
return DynUtils().setup_scan_contact_info(email,first_and_last_name,telephone)
def dyn_setup_crawl_script(self,script_body,script_type='SELENIUM'):
return DynUtils().setup_crawl_script(script_body,script_type)
def dyn_setup_crawl_configuration(self,scripts:List,disabled=False):
return DynUtils().setup_crawl_configuration(scripts,disabled)
def dyn_setup_login_logout_script(self,script_body,script_type='SELENIUM'):
return DynUtils().setup_login_logout_script(script_body,script_type)
def dyn_setup_auth(self,authtype,username,password,domain=None,base64_pkcs12=None,cert_name=None, login_script_data=None, logout_script_data=None):
return DynUtils().setup_auth(authtype,username,password,domain,base64_pkcs12,cert_name,login_script_data,logout_script_data)
def dyn_setup_auth_config(self,authentication_node:dict):
return DynUtils().setup_auth_config(authentication_node)
def dyn_setup_scan_config_request(self, url, allowed_hosts:List, auth_config=None, crawl_config=None, scan_setting=None):
return DynUtils().setup_scan_config_request(url,allowed_hosts,auth_config,crawl_config,scan_setting)
def dyn_setup_scan(self, scan_config_request, scan_contact_info=None, linked_app_guid=None):
return DynUtils().setup_scan(scan_config_request,scan_contact_info, linked_app_guid)
| 39.184584
| 180
| 0.747904
|
import requests
import logging
from requests.adapters import HTTPAdapter
from typing import List
from veracode_api_signing.exceptions import VeracodeAPISigningException
from veracode_api_signing.plugin_requests import RequestsAuthPluginVeracodeHMAC
from .constants import Constants
from .exceptions import VeracodeAPIError
from .applications import Applications, Sandboxes, CustomFields
from .findings import Findings, SummaryReport
from .policy import Policies
from .sca import ComponentActivity, Workspaces
from .collections import Collections
from .identity import Users, Teams, BusinessUnits, APICredentials, Roles
from .healthcheck import Healthcheck
from .dynamic import Analyses, Scans, Occurrences, Configuration, CodeGroups, ScanCapacitySummary, ScanOccurrences, ScannerVariables, DynUtils
from .xmlapi import XMLAPI
class VeracodeAPI:
def __init__(self, proxies=None):
self.baseurl = 'https://analysiscenter.veracode.com/api'
requests.Session().mount(self.baseurl, HTTPAdapter(max_retries=3))
self.proxies = proxies
self.retry_seconds = 120
self.connect_error_msg = "Connection Error"
def get_app_list(self):
return XMLAPI().get_app_list()
def get_app_info(self, app_id):
return XMLAPI().get_app_info(app_id)
def get_sandbox_list(self, app_id):
return XMLAPI().get_sandbox_list(app_id)
def get_build_list(self, app_id, sandbox_id=None):
return XMLAPI().get_build_list(app_id, sandbox_id)
def get_build_info(self, app_id, build_id=None, sandbox_id=None):
return XMLAPI().get_build_info(app_id,build_id,sandbox_id)
def get_detailed_report(self, build_id):
return XMLAPI().get_detailed_report(build_id)
def set_mitigation_info(self,build_id,flaw_id_list,action,comment):
return XMLAPI().set_mitigation_info(build_id,flaw_id_list,action,comment)
def generate_archer(self,payload):
return XMLAPI().generate_archer(payload)
def download_archer(self, token=None):
return XMLAPI().download_archer(token)
eck(self):
return Healthcheck().healthcheck()
def status(self):
return Healthcheck().status()
return Applications().get_all()
def get_app (self,guid=None,legacy_id=None):
return Applications().get(guid,legacy_id)
def get_app_by_name (self,appname):
return Applications().get_by_name(appname)
def create_app(self,app_name,business_criticality, business_unit=None, teams=[]):
return Applications().create(app_name,business_criticality,business_unit,teams)
def delete_app (self,guid):
return Applications().delete(guid)
def get_custom_fields (self):
return CustomFields().get_all()
def get_app_sandboxes (self,guid):
return Sandboxes().get_all(guid)
def create_sandbox (self, app, name, auto_recreate=False, custom_fields=[]):
return Sandboxes().create(app,name,auto_recreate,custom_fields)
def update_sandbox (self, app, sandbox, name, auto_recreate=False, custom_fields=[]):
return Sandboxes().update(app,sandbox,name,auto_recreate,custom_fields)
def delete_sandbox (self, app, sandbox):
return Sandboxes().delete(app,sandbox)
def get_policies (self):
return Policies().get_all()
def get_policy (self,guid):
return Policies().get(guid)
def create_policy(self, name, description, vendor_policy=False, finding_rules=[], scan_frequency_rules=[], grace_periods={}):
return Policies().create(name, description, vendor_policy, finding_rules, scan_frequency_rules, grace_periods)
def delete_policy (self,guid):
return Policies().delete(guid)
def update_policy(self, guid, name, description, vendor_policy=False, finding_rules=[], scan_frequency_rules=[], grace_periods={}):
return Policies().update(guid, name, description, vendor_policy, finding_rules, scan_frequency_rules, grace_periods)
def get_findings(self,app,scantype='STATIC',annot='TRUE',request_params=None,sandbox=None):
return Findings().get_findings(app,scantype,annot,request_params,sandbox)
def get_static_flaw_info(self,app,issueid,sandbox=None):
return Findings().get_static_flaw_info(app,issueid,sandbox)
def get_dynamic_flaw_info(self,app,issueid):
return Findings().get_dynamic_flaw_info(app,issueid)
def get_summary_report(self,app,sandbox=None):
return SummaryReport().get_summary_report(app,sandbox)
def add_annotation(self,app,issue_list,comment,action,sandbox=None):
return Findings().add_annotation(app,issue_list,comment,action,sandbox)
def match_findings(self,origin_finding,potential_matches,approved_findings_only=True):
return Findings().match(origin_finding,potential_matches,approved_findings_only)
ections(self):
return Collections().get_all()
def get_collections_by_name(self,collection_name):
return Collections().get_by_name(collection_name)
def get_collections_by_business_unit(self,business_unit_name):
return Collections().get_by_business_unit(business_unit_name)
def get_collections_statistics(self):
return Collections().get_statistics()
def get_collection(self,guid):
return Collections().get(guid)
def get_collection_assets(self,guid):
return Collections().get_assets(guid)
def create_collection(self,name,description="",tags='',business_unit_guid=None,custom_fields=[],assets=[]):
return Collections().create(name,description,tags,business_unit_guid,custom_fields,assets)
def update_collection(self,guid,name,description="",tags="",business_unit_guid=None,custom_fields=[],assets=[]):
return Collections().update(name,description,tags,business_unit_guid,custom_fields,assets)
def delete_collection(self,guid):
return Collections().delete(guid)
sers(self):
return Users().get_all()
def get_user_self (self):
return Users().get_self()
def get_user(self,user_guid):
return Users().get(user_guid)
def get_user_by_name(self,username):
return Users().get_by_name(username)
def get_user_by_search(self, search_term=None, api_id=None, role_id=None, login_status=None, saml_user=None, team_id=None, detailed=False, user_type=None, request_params=None):
return Users().get_user_search(search_term,api_id,role_id,login_status,saml_user,team_id,detailed,user_type,request_params)
def create_user (self,email,firstname,lastname,username=None,type="HUMAN",roles=[],teams=[],mfa=False):
return Users().create(email,firstname,lastname,username,type,roles,teams,mfa=mfa)
def update_user_roles (self,user_guid,roles):
return Users().update_roles(user_guid,roles)
def update_user (self,user_guid,changes):
return Users().update(user_guid,changes)
def update_user_email_address (self,user_guid,email_address,ignore_verification=False):
return Users().update_email_address(user_guid,email_address,ignore_verification)
def send_password_reset (self,user_legacy_id):
return Users().reset_password(user_legacy_id)
def disable_user (self,user_guid):
return Users().disable(user_guid)
def delete_user (self,user_guid):
return Users().delete(user_guid)
def get_teams (self, all_for_org=False):
return Teams().get_all()
def create_team (self, team_name, business_unit=None, members=[]):
return Teams().create(team_name,business_unit,members)
def update_team (self, team_guid, team_name="", business_unit=None, members=[]):
return Teams().update(team_guid,team_name,business_unit,members)
def delete_team (self, team_guid):
return Teams().delete(team_guid)
def get_business_units (self):
return BusinessUnits().get_all()
def get_business_unit (self, guid):
return BusinessUnits().get(guid)
def create_business_unit (self, name, teams=[]):
return BusinessUnits().create(name,teams)
def update_business_unit (self, guid, name='', teams=[]):
return BusinessUnits().update(guid,name,teams)
def delete_business_unit (self, guid):
return BusinessUnits().delete(guid)
def get_creds (self,api_id=None):
if api_id != None:
return APICredentials().get(api_id)
else:
return APICredentials().get_self()
def renew_creds (self):
return APICredentials().renew()
def revoke_creds (self, api_id):
return APICredentials().revoke(api_id)
def get_roles (self):
return Roles().get_all()
_all()
def get_workspace_by_name(self,name):
return Workspaces().get_by_name(name)
def create_workspace(self,name):
return Workspaces().create(name)
def add_workspace_team(self,workspace_guid,team_id):
return Workspaces().add_team(workspace_guid,team_id)
def delete_workspace(self,workspace_guid):
return Workspaces().delete(workspace_guid)
def get_projects(self,workspace_guid):
return Workspaces().get_projects(workspace_guid)
def get_project(self,workspace_guid,project_guid):
return Workspaces().get_project(workspace_guid,project_guid)
def get_project_issues(self,workspace_guid,project_guid):
return Workspaces().get_project_issues(workspace_guid,project_guid)
def get_project_libraries(self,workspace_guid,project_guid):
return Workspaces().get_project_libraries(workspace_guid,project_guid)
def get_agents(self,workspace_guid):
return Workspaces().get_agents(workspace_guid)
def get_agent(self,workspace_guid,agent_guid):
return Workspaces().get_agent(workspace_guid,agent_guid)
def create_agent(self,workspace_guid,name,agent_type='CLI'):
return Workspaces().create_agent(workspace_guid,name,agent_type)
def get_agent_tokens(self,workspace_guid,agent_guid):
return Workspaces().get_agent_tokens(workspace_guid,agent_guid)
def get_agent_token(self,workspace_guid,agent_guid,token_id):
return Workspaces().get_agent_token(workspace_guid,agent_guid,token_id)
def regenerate_agent_token(self,workspace_guid,agent_guid):
return Workspaces().regenerate_agent_token(workspace_guid,agent_guid)
def revoke_agent_token(self,workspace_guid,agent_guid,token_id):
return Workspaces().revoke_agent_token(workspace_guid,agent_guid,token_id)
def get_issues(self,workspace_guid):
return Workspaces().get_issues(workspace_guid)
def get_issue(self,issue_id):
return Workspaces().get_issues(issue_id)
def get_libraries(self,workspace_guid,unmatched=False):
return Workspaces().get_libraries(workspace_guid, unmatched)
def get_library(self,library_id):
return Workspaces().get_library(library_id)
def get_vulnerability(self,vulnerability_id):
return Workspaces().get_vulnerability(vulnerability_id)
def get_license(self,license_id):
return Workspaces().get_license(license_id)
def get_sca_events(self,date_gte=None,event_group=None,event_type=None):
return Workspaces().get_events(date_gte,event_group,event_type)
def get_sca_scan(self,scan_id):
return Workspaces().get_scan(scan_id)
def get_component_activity(self,component_id):
return ComponentActivity().get(component_id)
def get_analyses(self):
return Analyses().get_all()
def get_analyses_by_name(self,name):
return Analyses().get_by_name(analysis_name=name)
def get_analyses_by_target_url(self,url):
return Analyses().get_by_target_url(target_url=url)
def get_analyses_by_search_term(self,search_term):
return Analyses().get_by_search_term(search_term=search_term)
def get_analysis(self,analysis_id):
return Analyses().get(guid=analysis_id)
def get_analysis_audits(self,analysis_id):
return Analyses().get_audits(guid=analysis_id)
def get_analysis_scans(self,analysis_id):
return Analyses().get_scans(guid=analysis_id)
def get_analysis_scanner_variables(self,analysis_id):
return Analyses().get_scanner_variables(guid=analysis_id)
def create_analysis(self,name,scans,schedule_frequency='ONCE',business_unit_guid=None,email=None,owner=None):
return Analyses().create(name,scans,schedule_frequency,business_unit_guid,email,owner)
def update_analysis(self,guid,name,scans,schedule_frequency='ONCE',business_unit_guid=None,email=None,owner=None):
return Analyses().update(guid,name,scans,schedule_frequency,business_unit_guid,email,owner)
def update_analysis_scanner_variable(self,analysis_guid,scanner_variable_guid,reference_key,value,description):
return Analyses().update_scanner_variable(analysis_guid,scanner_variable_guid,reference_key,value,description)
def delete_analysis_scanner_variable(self,analysis_guid,scanner_variable_guid):
return Analyses().delete_scanner_variable(analysis_guid,scanner_variable_guid)
def delete_analysis(self,analysis_guid):
return Analyses().delete(guid=analysis_guid)
def get_dyn_scan(self,scan_guid):
return Scans().get(guid=scan_guid)
def get_dyn_scan_audits(self,scan_guid):
return Scans().get_audits(guid=scan_guid)
def get_dyn_scan_config(self,scan_guid):
return Scans().get_configuration(guid=scan_guid)
def update_dyn_scan(self,scan_guid,scan):
return Scans().update(guid=scan_guid,scan=scan)
def delete_dyn_scan(self,scan_guid):
return Scans().delete(guid=scan_guid)
def get_scan_scanner_variables(self,scan_id):
return Scans().get_scanner_variables(guid=scan_id)
def update_scan_scanner_variable(self,scan_guid,scanner_variable_guid,reference_key,value,description):
return Scans().update_scanner_variable(scan_guid,scanner_variable_guid,reference_key,value,description)
def delete_scan_scanner_variable(self,scan_guid,scanner_variable_guid):
return Scans().delete_scanner_variable(scan_guid,scanner_variable_guid)
def get_analysis_occurrences(self):
return Occurrences().get_all()
def get_analysis_occurrence(self,occurrence_guid):
return Occurrences().get(guid=occurrence_guid)
def stop_analysis_occurrence(self,occurrence_guid,save_or_delete):
return Occurrences().stop(guid=occurrence_guid,save_or_delete=save_or_delete)
def get_scan_occurrences(self,occurrence_guid):
return Occurrences().get_scan_occurrences(guid=occurrence_guid)
def get_scan_occurrence(self,scan_occ_guid):
return ScanOccurrences().get(guid=scan_occ_guid)
def stop_scan_occurrence(self,scan_occ_guid,save_or_delete):
return ScanOccurrences().stop(guid=scan_occ_guid, save_or_delete=save_or_delete)
def get_scan_occurrence_configuration(self,scan_occ_guid):
return ScanOccurrences().get_configuration(guid=scan_occ_guid)
def get_scan_occurrence_verification_report(self,scan_occ_guid):
return ScanOccurrences().get_verification_report(guid=scan_occ_guid)
def get_scan_occurrence_notes_report(self,scan_occ_guid):
return ScanOccurrences().get_scan_notes_report(guid=scan_occ_guid)
def get_scan_occurrence_screenshots(self,scan_occ_guid):
return ScanOccurrences().get_screenshots(guid=scan_occ_guid)
def get_codegroups(self):
return CodeGroups().get_all()
def get_codegroup(self,name):
return CodeGroups().get(name=name)
def get_dynamic_configuration(self):
return Configuration().get()
def get_dynamic_scan_capacity_summary(self):
return ScanCapacitySummary().get()
def get_global_scanner_variables(self):
return ScannerVariables().get_all()
def get_global_scanner_variable(self,guid):
return ScannerVariables().get(guid)
def create_global_scanner_variable(self,reference_key,value,description):
return ScannerVariables().create(reference_key,value,description)
def update_global_scanner_variable(self,guid,reference_key,value,description):
return ScannerVariables().update(guid,reference_key,value,description)
def delete_global_scanner_variable(self,guid):
return ScannerVariables().delete(guid)
def dyn_setup_user_agent(self,custom_header,type):
return DynUtils().setup_user_agent(custom_header,type)
def dyn_setup_custom_host(self,host_name,ip_address):
return DynUtils().setup_custom_host(host_name,ip_address)
def dyn_setup_blocklist(self, urls:List):
return DynUtils().setup_blocklist(urls)
def dyn_setup_url(self,url,directory_restriction_type='DIRECTORY_AND_SUBDIRECTORY',http_and_https=True):
return DynUtils().setup_url(url,directory_restriction_type,http_and_https)
def dyn_setup_scan_setting(self,blocklist_configs:list,custom_hosts:List, user_agent:None):
return DynUtils().setup_scan_setting(blocklist_configs,custom_hosts,user_agent)
def dyn_setup_scan_contact_info(self,email,first_and_last_name,telephone):
return DynUtils().setup_scan_contact_info(email,first_and_last_name,telephone)
def dyn_setup_crawl_script(self,script_body,script_type='SELENIUM'):
return DynUtils().setup_crawl_script(script_body,script_type)
def dyn_setup_crawl_configuration(self,scripts:List,disabled=False):
return DynUtils().setup_crawl_configuration(scripts,disabled)
def dyn_setup_login_logout_script(self,script_body,script_type='SELENIUM'):
return DynUtils().setup_login_logout_script(script_body,script_type)
def dyn_setup_auth(self,authtype,username,password,domain=None,base64_pkcs12=None,cert_name=None, login_script_data=None, logout_script_data=None):
return DynUtils().setup_auth(authtype,username,password,domain,base64_pkcs12,cert_name,login_script_data,logout_script_data)
def dyn_setup_auth_config(self,authentication_node:dict):
return DynUtils().setup_auth_config(authentication_node)
def dyn_setup_scan_config_request(self, url, allowed_hosts:List, auth_config=None, crawl_config=None, scan_setting=None):
return DynUtils().setup_scan_config_request(url,allowed_hosts,auth_config,crawl_config,scan_setting)
def dyn_setup_scan(self, scan_config_request, scan_contact_info=None, linked_app_guid=None):
return DynUtils().setup_scan(scan_config_request,scan_contact_info, linked_app_guid)
| true
| true
|
790350441e4dd00cf820b2bcd99e03d0cf57cb67
| 8,010
|
py
|
Python
|
helper_servers/http_forwarder.py
|
stephenbradshaw/pentesting_stuff
|
be14765aa6c435e9a41b0a680d259fc0495c6ff1
|
[
"BSD-3-Clause"
] | 14
|
2018-07-21T02:56:10.000Z
|
2022-01-15T16:00:07.000Z
|
helper_servers/http_forwarder.py
|
stephenbradshaw/pentesting_stuff
|
be14765aa6c435e9a41b0a680d259fc0495c6ff1
|
[
"BSD-3-Clause"
] | null | null | null |
helper_servers/http_forwarder.py
|
stephenbradshaw/pentesting_stuff
|
be14765aa6c435e9a41b0a680d259fc0495c6ff1
|
[
"BSD-3-Clause"
] | 4
|
2017-11-16T16:06:15.000Z
|
2019-01-17T08:43:59.000Z
|
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
import sys
import urllib
import logging
from optparse import OptionParser
class ResultsProvider(object):
'''Base class used to fetch data from server for forwarding'''
import requests
import socket
import time
def __init__(self, **kwargs):
'''Constructor with sensible requests defaults'''
self.session = self.requests.Session()
self.wait = kwargs.get('wait', 2.0)
self.session.verify = kwargs.get('verify', False)
self.session.timeout = kwargs.get('timeout', 5)
self.session.stream = kwargs.get('stream', False)
self.session.proxies = kwargs.get('proxies', {})
self.session.headers = kwargs.get('headers', {})
self.session.allow_redirects = kwargs.get('allow_redirects', True)
self.session.cookies = self.requests.utils.cookiejar_from_dict(kwargs.get('cookies', {}))
self.url = kwargs.get('url', None)
def doRequest(self, verb, url, **kwargs):
'''Makes web request with timeoout support using requests session'''
while 1:
try:
body = kwargs.pop('body') if kwargs.has_key('body') else None
rargs = {}
for a in ['data', 'json', 'params', 'headers']:
if kwargs.has_key(a):
rargs[a] = kwargs.pop(a)
req = self.requests.Request(verb, url, **rargs) # data, headers, params, json
prepped = req.prepare()
if body:
prepped.body = body
response = self.session.send(prepped, **kwargs) # other params here
break
except (self.socket.error, self.requests.exceptions.RequestException):
logging.exception('Retrying request in %.2f seconds...', self.wait)
self.time.sleep(self.wait)
continue
return response
def nextResult(self):
'''Redefine me to make the request and return the response.text'''
#return self.doRequest(url='http://site/whatever/' + str(calculated_value)).text
raise NotImplementedError
class ResultsProviderImpl(ResultsProvider):
'''Implementation for forwarding arbitrary requests to another server'''
def __init__(self, **kwargs):
super(ResultsProviderImpl, self).__init__(**kwargs)
self.hostname=kwargs.get('hostname')
self.protocol=kwargs.get('protocol', 'http')
self.port=kwargs.get('port')
def nextResult(self, verb, path, **kwargs):
r = self.doRequest(verb, '%s://%s:%s%s' %(self.protocol, self.hostname, self.port, path), **kwargs)
return r
class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
'''Simple Threaded TCP server'''
pass
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
'''Simple http server request handler'''
import datetime
counter=0
skip_headers = ['content-length', 'transfer-encoding', 'content-encoding', 'connection']
def print_debug(self, title, data):
sep = '=' * 40 + '\n'
dt = self.datetime.datetime.now()
dts = dt.strftime('%d/%m/%Y %H:%M:%S')
self.counter+=1
print sep + title + ' - ' + str(self.counter) + ' - ' + dts + '\n' + sep + data + '\n'
def send_response(self, code, message=None):
'''Redefine from original to get rid of extra headers'''
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
#self.send_header('Server', self.version_string())
#self.send_header('Date', self.date_time_string())
def do(self, verb, data=None):
args = {'headers' : self.headers.dict}
if data:
args['data'] = data
response = self.server.resultsProvider.nextResult(verb, self.path, **args)
if self.server.debug:
self.print_debug('HTTP Request Received', self.raw_requestline + str(self.headers) + '\r\n' + (data if data else ''))
self.send_response(response.status_code, response.reason)
for header in response.headers.iteritems():
if header[0].lower() not in self.skip_headers:
#self.print_debug('Header Sent', ' :'.join([header[0], header[1]]))
self.send_header(header[0], header[1])
self.send_header('Content-Length', int(len(response.content)))
self.send_header('Connection', 'close')
self.wfile.write('\r\n')
self.wfile.write(response.content)
if self.server.debug:
http_version = '.'.join([a for a in str(response.raw.version)])
version_line = 'HTTP/%s %s %s' %(http_version, response.status_code, response.reason)
headers = '\r\n'.join([ '%s : %s' %(a[0],a[1]) for a in response.headers.items()])
self.print_debug('HTTP Response Received', '\r\n'.join([version_line, headers, '\r\n' + response.content]))
#self.print_debug('Length of response', str(int(len(response.content))))
self.wfile.flush()
self.wfile.close()
def do_GET(self):
self.do('GET')
def do_HEAD(self):
self.do('HEAD')
def do_POST(self):
data = self.rfile.read(int(self.headers['Content-Length'])) if \
self.headers.has_key('Content-Length') else ''
self.do('POST', data=data)
def match_url(input):
return ((input.startswith('http://') or input.startswith('https://')) and \
input.endswith('/') and len(input.split('/')[2]) > 4 and len(input.split('/')) == 4)
if __name__ == '__main__':
parser = OptionParser(usage='%prog -u [url] [options]')
parser.add_option('-d', '--debug', dest='debug', action='store_true', help='show debugging messages')
parser.add_option('-u', '--url', dest='remoteurl', type='string', help='remote base url')
parser.add_option('-p', '--port', dest='port', type='int', default=8000, help='local listen port')
parser.add_option('-a', '--address', dest='address', type='string', default='0.0.0.0', help='local listen address')
parser.add_option('-x', '--proxy', dest='proxy', type='string', help='optional proxy to use in format http://address:port/')
opts, args = parser.parse_args()
if opts.remoteurl == None:
print 'Please provide a remote url using the -u --url option'
sys.exit()
elif not match_url(opts.remoteurl):
print 'Please enter remote url in format protocol://host[:port]/'
sys.exit()
try:
[protocol, _, host_port, _] = opts.remoteurl.split('/')
protocol = protocol.rstrip(':')
hostparts = host_port.split(':')
hostname = hostparts[0]
rport = int(hostparts[1]) if len(hostparts) > 1 else {'http' : 80, 'https' : 443}[protocol]
except:
print 'Please enter remote url in format protocol://host[:port]/'
sys.exit()
if opts.proxy:
if not match_url(opts.proxy) and not opts.proxy.startswith('https'):
print 'Please enter proxy in format http://host:port/'
sys.exit()
if opts.debug:
print 'Using proxy ' + opts.proxy
proxies = {protocol : opts.proxy}
else:
proxies = {}
httpd = ThreadedTCPServer((opts.address, opts.port), ServerHandler)
httpd.debug = opts.debug or False
# add the custom resultsprovider implementation
httpd.resultsProvider = ResultsProviderImpl(hostname=hostname, protocol=protocol, port=rport, proxies=proxies)
print "Serving at: http://%s:%s/, forwarding requests to %s" % (opts.address, str(opts.port), opts.remoteurl)
httpd.serve_forever()
| 37.605634
| 129
| 0.607241
|
import SimpleHTTPServer
import SocketServer
import sys
import urllib
import logging
from optparse import OptionParser
class ResultsProvider(object):
'''Base class used to fetch data from server for forwarding'''
import requests
import socket
import time
def __init__(self, **kwargs):
'''Constructor with sensible requests defaults'''
self.session = self.requests.Session()
self.wait = kwargs.get('wait', 2.0)
self.session.verify = kwargs.get('verify', False)
self.session.timeout = kwargs.get('timeout', 5)
self.session.stream = kwargs.get('stream', False)
self.session.proxies = kwargs.get('proxies', {})
self.session.headers = kwargs.get('headers', {})
self.session.allow_redirects = kwargs.get('allow_redirects', True)
self.session.cookies = self.requests.utils.cookiejar_from_dict(kwargs.get('cookies', {}))
self.url = kwargs.get('url', None)
def doRequest(self, verb, url, **kwargs):
'''Makes web request with timeoout support using requests session'''
while 1:
try:
body = kwargs.pop('body') if kwargs.has_key('body') else None
rargs = {}
for a in ['data', 'json', 'params', 'headers']:
if kwargs.has_key(a):
rargs[a] = kwargs.pop(a)
req = self.requests.Request(verb, url, **rargs)
prepped = req.prepare()
if body:
prepped.body = body
response = self.session.send(prepped, **kwargs)
break
except (self.socket.error, self.requests.exceptions.RequestException):
logging.exception('Retrying request in %.2f seconds...', self.wait)
self.time.sleep(self.wait)
continue
return response
def nextResult(self):
'''Redefine me to make the request and return the response.text'''
raise NotImplementedError
class ResultsProviderImpl(ResultsProvider):
'''Implementation for forwarding arbitrary requests to another server'''
def __init__(self, **kwargs):
super(ResultsProviderImpl, self).__init__(**kwargs)
self.hostname=kwargs.get('hostname')
self.protocol=kwargs.get('protocol', 'http')
self.port=kwargs.get('port')
def nextResult(self, verb, path, **kwargs):
r = self.doRequest(verb, '%s://%s:%s%s' %(self.protocol, self.hostname, self.port, path), **kwargs)
return r
class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
'''Simple Threaded TCP server'''
pass
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
'''Simple http server request handler'''
import datetime
counter=0
skip_headers = ['content-length', 'transfer-encoding', 'content-encoding', 'connection']
def print_debug(self, title, data):
sep = '=' * 40 + '\n'
dt = self.datetime.datetime.now()
dts = dt.strftime('%d/%m/%Y %H:%M:%S')
self.counter+=1
print sep + title + ' - ' + str(self.counter) + ' - ' + dts + '\n' + sep + data + '\n'
def send_response(self, code, message=None):
'''Redefine from original to get rid of extra headers'''
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
def do(self, verb, data=None):
args = {'headers' : self.headers.dict}
if data:
args['data'] = data
response = self.server.resultsProvider.nextResult(verb, self.path, **args)
if self.server.debug:
self.print_debug('HTTP Request Received', self.raw_requestline + str(self.headers) + '\r\n' + (data if data else ''))
self.send_response(response.status_code, response.reason)
for header in response.headers.iteritems():
if header[0].lower() not in self.skip_headers:
self.send_header(header[0], header[1])
self.send_header('Content-Length', int(len(response.content)))
self.send_header('Connection', 'close')
self.wfile.write('\r\n')
self.wfile.write(response.content)
if self.server.debug:
http_version = '.'.join([a for a in str(response.raw.version)])
version_line = 'HTTP/%s %s %s' %(http_version, response.status_code, response.reason)
headers = '\r\n'.join([ '%s : %s' %(a[0],a[1]) for a in response.headers.items()])
self.print_debug('HTTP Response Received', '\r\n'.join([version_line, headers, '\r\n' + response.content]))
self.wfile.flush()
self.wfile.close()
def do_GET(self):
self.do('GET')
def do_HEAD(self):
self.do('HEAD')
def do_POST(self):
data = self.rfile.read(int(self.headers['Content-Length'])) if \
self.headers.has_key('Content-Length') else ''
self.do('POST', data=data)
def match_url(input):
return ((input.startswith('http://') or input.startswith('https://')) and \
input.endswith('/') and len(input.split('/')[2]) > 4 and len(input.split('/')) == 4)
if __name__ == '__main__':
parser = OptionParser(usage='%prog -u [url] [options]')
parser.add_option('-d', '--debug', dest='debug', action='store_true', help='show debugging messages')
parser.add_option('-u', '--url', dest='remoteurl', type='string', help='remote base url')
parser.add_option('-p', '--port', dest='port', type='int', default=8000, help='local listen port')
parser.add_option('-a', '--address', dest='address', type='string', default='0.0.0.0', help='local listen address')
parser.add_option('-x', '--proxy', dest='proxy', type='string', help='optional proxy to use in format http://address:port/')
opts, args = parser.parse_args()
if opts.remoteurl == None:
print 'Please provide a remote url using the -u --url option'
sys.exit()
elif not match_url(opts.remoteurl):
print 'Please enter remote url in format protocol://host[:port]/'
sys.exit()
try:
[protocol, _, host_port, _] = opts.remoteurl.split('/')
protocol = protocol.rstrip(':')
hostparts = host_port.split(':')
hostname = hostparts[0]
rport = int(hostparts[1]) if len(hostparts) > 1 else {'http' : 80, 'https' : 443}[protocol]
except:
print 'Please enter remote url in format protocol://host[:port]/'
sys.exit()
if opts.proxy:
if not match_url(opts.proxy) and not opts.proxy.startswith('https'):
print 'Please enter proxy in format http://host:port/'
sys.exit()
if opts.debug:
print 'Using proxy ' + opts.proxy
proxies = {protocol : opts.proxy}
else:
proxies = {}
httpd = ThreadedTCPServer((opts.address, opts.port), ServerHandler)
httpd.debug = opts.debug or False
httpd.resultsProvider = ResultsProviderImpl(hostname=hostname, protocol=protocol, port=rport, proxies=proxies)
print "Serving at: http://%s:%s/, forwarding requests to %s" % (opts.address, str(opts.port), opts.remoteurl)
httpd.serve_forever()
| false
| true
|
790350ad5fa9f011c6bd6a86b1a5e229de30fae8
| 2,388
|
py
|
Python
|
amime/modules/anime/TV-SHORT/tvshort_trend/TVSHORT_TREND/tvshort_trend7.py
|
Myudi422/ccgnime_req
|
a0f7596ba101204539b4120dffa08912b6560efe
|
[
"MIT"
] | null | null | null |
amime/modules/anime/TV-SHORT/tvshort_trend/TVSHORT_TREND/tvshort_trend7.py
|
Myudi422/ccgnime_req
|
a0f7596ba101204539b4120dffa08912b6560efe
|
[
"MIT"
] | null | null | null |
amime/modules/anime/TV-SHORT/tvshort_trend/TVSHORT_TREND/tvshort_trend7.py
|
Myudi422/ccgnime_req
|
a0f7596ba101204539b4120dffa08912b6560efe
|
[
"MIT"
] | null | null | null |
import httpx
from anilist.types import Anime
from pyrogram import filters
from pyrogram.types import CallbackQuery
from pyromod.helpers import ikb
from pyromod.nav import Pagination
from amime.amime import Amime
@Amime.on_callback_query(filters.regex(r"^tvshort_trending7 anime (?P<page>\d+)"))
async def anime_suggestions(bot: Amime, callback: CallbackQuery):
page = int(callback.matches[0]["page"])
message = callback.message
lang = callback._lang
keyboard = []
async with httpx.AsyncClient(http2=True) as client:
response = await client.post(
url="https://graphql.anilist.co",
json=dict(
query="""
query($per_page: Int) {
Page(page: 8, perPage: $per_page) {
media(type: ANIME, format: TV_SHORT, sort: TRENDING_DESC, status: FINISHED) {
id
title {
romaji
english
native
}
siteUrl
}
}
}
""",
variables=dict(
perPage=100,
),
),
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
)
data = response.json()
await client.aclose()
if data["data"]:
items = data["data"]["Page"]["media"]
suggestions = [
Anime(id=item["id"], title=item["title"], url=item["siteUrl"])
for item in items
]
layout = Pagination(
suggestions,
item_data=lambda i, pg: f"menu {i.id}",
item_title=lambda i, pg: i.title.romaji,
page_data=lambda pg: f"tvshort_trending7 anime {pg}",
)
lines = layout.create(page, lines=8)
if len(lines) > 0:
keyboard += lines
keyboard.append([(lang.Prev, "tvshort_trending6 anime 1"), (lang.Next, "tvshort_trending8 anime 1")])
keyboard.append([(lang.back_button, "tvshort_menu")])
await message.edit_text(
lang.suggestions_text,
reply_markup=ikb(keyboard),
)
| 32.27027
| 105
| 0.490787
|
import httpx
from anilist.types import Anime
from pyrogram import filters
from pyrogram.types import CallbackQuery
from pyromod.helpers import ikb
from pyromod.nav import Pagination
from amime.amime import Amime
@Amime.on_callback_query(filters.regex(r"^tvshort_trending7 anime (?P<page>\d+)"))
async def anime_suggestions(bot: Amime, callback: CallbackQuery):
page = int(callback.matches[0]["page"])
message = callback.message
lang = callback._lang
keyboard = []
async with httpx.AsyncClient(http2=True) as client:
response = await client.post(
url="https://graphql.anilist.co",
json=dict(
query="""
query($per_page: Int) {
Page(page: 8, perPage: $per_page) {
media(type: ANIME, format: TV_SHORT, sort: TRENDING_DESC, status: FINISHED) {
id
title {
romaji
english
native
}
siteUrl
}
}
}
""",
variables=dict(
perPage=100,
),
),
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
)
data = response.json()
await client.aclose()
if data["data"]:
items = data["data"]["Page"]["media"]
suggestions = [
Anime(id=item["id"], title=item["title"], url=item["siteUrl"])
for item in items
]
layout = Pagination(
suggestions,
item_data=lambda i, pg: f"menu {i.id}",
item_title=lambda i, pg: i.title.romaji,
page_data=lambda pg: f"tvshort_trending7 anime {pg}",
)
lines = layout.create(page, lines=8)
if len(lines) > 0:
keyboard += lines
keyboard.append([(lang.Prev, "tvshort_trending6 anime 1"), (lang.Next, "tvshort_trending8 anime 1")])
keyboard.append([(lang.back_button, "tvshort_menu")])
await message.edit_text(
lang.suggestions_text,
reply_markup=ikb(keyboard),
)
| true
| true
|
790351a15bdffbda5883270d588a916d0ed8ddd6
| 2,845
|
py
|
Python
|
training/test/test_metric_logger.py
|
sbam13/open_lth
|
d8c8d450cc8229afed54b26f77b91c3fe0c3f339
|
[
"MIT"
] | null | null | null |
training/test/test_metric_logger.py
|
sbam13/open_lth
|
d8c8d450cc8229afed54b26f77b91c3fe0c3f339
|
[
"MIT"
] | null | null | null |
training/test/test_metric_logger.py
|
sbam13/open_lth
|
d8c8d450cc8229afed54b26f77b91c3fe0c3f339
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from foundations.step import Step
from training.metric_logger import MetricLogger
from testing import test_case
class TestMetricLogger(test_case.TestCase):
def test_create(self):
MetricLogger()
@staticmethod
def create_logger():
logger = MetricLogger()
logger.add('train_accuracy', Step.from_iteration(0, 400), 0.5)
logger.add('train_accuracy', Step.from_iteration(1, 400), 0.6)
logger.add('test_accuracy', Step.from_iteration(0, 400), 0.4)
return logger
def test_add_get(self):
logger = TestMetricLogger.create_logger()
self.assertEqual(logger.get_data('train_accuracy'), [(0, 0.5), (1, 0.6)])
self.assertEqual(logger.get_data('test_accuracy'), [(0, 0.4)])
self.assertEqual(logger.get_data('test_loss'), [])
def test_overwrite(self):
logger = TestMetricLogger.create_logger()
logger.add('train_accuracy', Step.from_iteration(0, 400), 1.0)
self.assertEqual(logger.get_data('train_accuracy'), [(0, 1.0), (1, 0.6)])
def test_sorting(self):
logger = TestMetricLogger.create_logger()
logger.add('train_accuracy', Step.from_iteration(5, 400), 0.9)
logger.add('train_accuracy', Step.from_iteration(3, 400), 0.7)
logger.add('train_accuracy', Step.from_iteration(4, 400), 0.8)
self.assertEqual(logger.get_data('train_accuracy'),
[(0, 0.5), (1, 0.6), (3, 0.7), (4, 0.8), (5, 0.9)])
def test_str(self):
logger = TestMetricLogger.create_logger()
expected = ['train_accuracy,0,0.5', 'train_accuracy,1,0.6', 'test_accuracy,0,0.4']
self.assertEqual(str(logger), '\n'.join(expected))
def test_create_from_string(self):
logger = TestMetricLogger.create_logger()
logger2 = MetricLogger.create_from_string(str(logger))
self.assertEqual(logger.get_data('train_accuracy'), logger2.get_data('train_accuracy'))
self.assertEqual(logger.get_data('test_accuracy'), logger2.get_data('test_accuracy'))
self.assertEqual(str(logger), str(logger2))
def test_file_operations(self):
logger = TestMetricLogger.create_logger()
save_loc = os.path.join(self.root, 'temp_logger')
logger.save(save_loc)
logger2 = MetricLogger.create_from_file(save_loc)
self.assertEqual(logger.get_data('train_accuracy'), logger2.get_data('train_accuracy'))
self.assertEqual(logger.get_data('test_accuracy'), logger2.get_data('test_accuracy'))
self.assertEqual(str(logger), str(logger2))
test_case.main()
| 41.231884
| 96
| 0.660105
|
import os
from foundations.step import Step
from training.metric_logger import MetricLogger
from testing import test_case
class TestMetricLogger(test_case.TestCase):
def test_create(self):
MetricLogger()
@staticmethod
def create_logger():
logger = MetricLogger()
logger.add('train_accuracy', Step.from_iteration(0, 400), 0.5)
logger.add('train_accuracy', Step.from_iteration(1, 400), 0.6)
logger.add('test_accuracy', Step.from_iteration(0, 400), 0.4)
return logger
def test_add_get(self):
logger = TestMetricLogger.create_logger()
self.assertEqual(logger.get_data('train_accuracy'), [(0, 0.5), (1, 0.6)])
self.assertEqual(logger.get_data('test_accuracy'), [(0, 0.4)])
self.assertEqual(logger.get_data('test_loss'), [])
def test_overwrite(self):
logger = TestMetricLogger.create_logger()
logger.add('train_accuracy', Step.from_iteration(0, 400), 1.0)
self.assertEqual(logger.get_data('train_accuracy'), [(0, 1.0), (1, 0.6)])
def test_sorting(self):
logger = TestMetricLogger.create_logger()
logger.add('train_accuracy', Step.from_iteration(5, 400), 0.9)
logger.add('train_accuracy', Step.from_iteration(3, 400), 0.7)
logger.add('train_accuracy', Step.from_iteration(4, 400), 0.8)
self.assertEqual(logger.get_data('train_accuracy'),
[(0, 0.5), (1, 0.6), (3, 0.7), (4, 0.8), (5, 0.9)])
def test_str(self):
logger = TestMetricLogger.create_logger()
expected = ['train_accuracy,0,0.5', 'train_accuracy,1,0.6', 'test_accuracy,0,0.4']
self.assertEqual(str(logger), '\n'.join(expected))
def test_create_from_string(self):
logger = TestMetricLogger.create_logger()
logger2 = MetricLogger.create_from_string(str(logger))
self.assertEqual(logger.get_data('train_accuracy'), logger2.get_data('train_accuracy'))
self.assertEqual(logger.get_data('test_accuracy'), logger2.get_data('test_accuracy'))
self.assertEqual(str(logger), str(logger2))
def test_file_operations(self):
logger = TestMetricLogger.create_logger()
save_loc = os.path.join(self.root, 'temp_logger')
logger.save(save_loc)
logger2 = MetricLogger.create_from_file(save_loc)
self.assertEqual(logger.get_data('train_accuracy'), logger2.get_data('train_accuracy'))
self.assertEqual(logger.get_data('test_accuracy'), logger2.get_data('test_accuracy'))
self.assertEqual(str(logger), str(logger2))
test_case.main()
| true
| true
|
79035325aebf7481cbae87dad61d5abc35502bc0
| 4,531
|
py
|
Python
|
utils/metrics.py
|
ljzycmd/SimDeblur
|
dd2f60c41176b75c4eaf80d740f547c206aa8227
|
[
"MIT"
] | 190
|
2021-03-22T13:59:42.000Z
|
2022-03-08T21:14:41.000Z
|
utils/metrics.py
|
Wang-jiahao/SimDeblur
|
31d88e1fbec91d5cc9062f4a46538e4ba806ab29
|
[
"MIT"
] | 9
|
2021-04-26T06:44:40.000Z
|
2022-03-25T07:48:30.000Z
|
utils/metrics.py
|
Wang-jiahao/SimDeblur
|
31d88e1fbec91d5cc9062f4a46538e4ba806ab29
|
[
"MIT"
] | 27
|
2021-03-23T03:11:00.000Z
|
2022-03-19T21:26:02.000Z
|
# CMD
import torch
import torch.nn.functional as F
import cv2
def calculate_psnr(img1, img2):
"""
data range [0, 1]
"""
img1 = img1.clamp(0, 1)
img2 = img2.clamp(0, 1)
mse = torch.mean((img1 - img2) ** 2, [1, 2, 3])
# if mse == 0:
# return 100
PIXEL_MAX = 1
return 20 * torch.mean(torch.log10(PIXEL_MAX / torch.sqrt(mse)))
def calculate_ssim(img1, img2):
# implemented with pytorch
assert isinstance(img1, torch.Tensor)
assert isinstance(img1, torch.Tensor)
img1 = img1.clamp(0, 1)
img2 = img2.clamp(0, 1)
C1 = (0.01 * 1)**2
C2 = (0.03 * 1)**2
# img1 = img1.to(torch.float32)
# img2 = img2.to(torch.float32)
kernel = gaussian(11, 1.5).to(img1).unsqueeze(1)
window = kernel.mm(kernel.t()).float().expand(3, 1, 11, 11)
mu1 = F.conv2d(img1, window, groups = 3) # valid
mu2 = F.conv1d(img2, window, groups = 3)
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1**2, window, groups=3) - mu1_sq
sigma2_sq = F.conv2d(img2**2, window, groups=3) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, groups=3) - mu1_mu2
# mu1 = F.conv2d(img1, window, padding = 11//2, groups = 3) # same
# mu2 = F.conv1d(img2, window, padding = 11//2, groups = 3)
# mu1_sq = mu1**2
# mu2_sq = mu2**2
# mu1_mu2 = mu1 * mu2
# sigma1_sq = F.conv2d(img1**2, window, padding=11//2, groups=3) - mu1_sq
# sigma2_sq = F.conv2d(img2**2, window, padding=11//2, groups=3) - mu2_sq
# sigma12 = F.conv2d(img1 * img2, window, padding=11//2, groups=3) - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def gaussian(window_size, sigma):
gauss = torch.exp(torch.Tensor([-(x - window_size//2)**2/float(2*sigma**2) for x in range(window_size)]).float())
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = (_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim2(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
if __name__ == "__main__":
img1 = torch.ones(1, 3, 256, 256)*0.95
img2 = torch.ones(1, 3, 256, 256)
print(ssim2(img1, img2))
print(ssim(img1, img2))
print(psnr(img1, img2))
| 33.813433
| 117
| 0.609137
|
import torch
import torch.nn.functional as F
import cv2
def calculate_psnr(img1, img2):
img1 = img1.clamp(0, 1)
img2 = img2.clamp(0, 1)
mse = torch.mean((img1 - img2) ** 2, [1, 2, 3])
PIXEL_MAX = 1
return 20 * torch.mean(torch.log10(PIXEL_MAX / torch.sqrt(mse)))
def calculate_ssim(img1, img2):
assert isinstance(img1, torch.Tensor)
assert isinstance(img1, torch.Tensor)
img1 = img1.clamp(0, 1)
img2 = img2.clamp(0, 1)
C1 = (0.01 * 1)**2
C2 = (0.03 * 1)**2
kernel = gaussian(11, 1.5).to(img1).unsqueeze(1)
window = kernel.mm(kernel.t()).float().expand(3, 1, 11, 11)
mu1 = F.conv2d(img1, window, groups = 3)
mu2 = F.conv1d(img2, window, groups = 3)
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1**2, window, groups=3) - mu1_sq
sigma2_sq = F.conv2d(img2**2, window, groups=3) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, groups=3) - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def gaussian(window_size, sigma):
gauss = torch.exp(torch.Tensor([-(x - window_size//2)**2/float(2*sigma**2) for x in range(window_size)]).float())
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = (_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim2(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
if __name__ == "__main__":
img1 = torch.ones(1, 3, 256, 256)*0.95
img2 = torch.ones(1, 3, 256, 256)
print(ssim2(img1, img2))
print(ssim(img1, img2))
print(psnr(img1, img2))
| true
| true
|
790354328b4bbc6897980046f5649be0ca6d7552
| 2,459
|
py
|
Python
|
simplegist/simplegist.py
|
acatiadroid/simplegist
|
ae677872219d0697abf1fdc726e1a15470e3324f
|
[
"MIT"
] | null | null | null |
simplegist/simplegist.py
|
acatiadroid/simplegist
|
ae677872219d0697abf1fdc726e1a15470e3324f
|
[
"MIT"
] | null | null | null |
simplegist/simplegist.py
|
acatiadroid/simplegist
|
ae677872219d0697abf1fdc726e1a15470e3324f
|
[
"MIT"
] | null | null | null |
import requests
import json
from simplegist.mygist import Mygist
from simplegist.do import Do
from comments import Comments
try:
from simplegist.config import USERNAME, API_TOKEN, BASE_URL, GIST_URL
except:
pass
class Simplegist:
"""
Gist Base Class
This class is to used to instantiate the wrapper and authenticate.
Authenticate with providing Github Username and API-Token to use
it for all future API requests
"""
def __init__(self, **args):
# Save our username and api_token (If given) for later use.
if 'username' in args:
self.username = args['username']
else:
if not USERNAME:
raise Exception('Please provide your Github username.')
else:
self.username = USERNAME
if 'api_token' in args:
self.api_token = args['api_token']
else:
if not API_TOKEN:
raise Exception('Please provide your Github API Token.')
else:
self.api_token = API_TOKEN
# Set header information in every request.
self.header = { 'X-Github-Username': self.username,
'Content-Type': 'application/json',
'Authorization': 'token %s' %self.api_token
}
def profile(self):
return Mygist(self)
def search(self, user):
return Mygist(self,user=user)
def do(self):
return Do(self)
def comments(self):
return Comments(self)
def create(self, **args):
if 'description' in args:
self.description = args['description']
else:
self.description = ''
if 'name' in args:
self.gist_name = args['name']
else:
self.gist_name = ''
if 'public' in args:
self.public = args['public']
else:
self.public = 1
if 'content' in args:
self.content = args['content']
else:
raise Exception('Gist content can\'t be empty')
url = '/gists'
data = {"description": self.description,
"public": self.public,
"files": {
self.gist_name: {
"content": self.content
}
}
}
r = requests.post(
'%s%s' % (BASE_URL, url),
data=json.dumps(data),
headers=self.header
)
if (r.status_code == 201):
response = {
'Gist-Link': '%s/%s/%s' %(GIST_URL,self.username,r.json()['id']),
'Clone-Link': '%s/%s.git' %(GIST_URL,r.json()['id']),
'Embed-Script': '<script src="%s/%s/%s.js"</script>' %(GIST_URL,self.username,r.json()['id']),
'id': r.json()['id'],
'created_at': r.json()['created_at'],
}
return response
raise Exception('Gist not created: server response was [%s] %s' % (r.status_code, r.text))
| 22.981308
| 97
| 0.646604
|
import requests
import json
from simplegist.mygist import Mygist
from simplegist.do import Do
from comments import Comments
try:
from simplegist.config import USERNAME, API_TOKEN, BASE_URL, GIST_URL
except:
pass
class Simplegist:
def __init__(self, **args):
if 'username' in args:
self.username = args['username']
else:
if not USERNAME:
raise Exception('Please provide your Github username.')
else:
self.username = USERNAME
if 'api_token' in args:
self.api_token = args['api_token']
else:
if not API_TOKEN:
raise Exception('Please provide your Github API Token.')
else:
self.api_token = API_TOKEN
self.header = { 'X-Github-Username': self.username,
'Content-Type': 'application/json',
'Authorization': 'token %s' %self.api_token
}
def profile(self):
return Mygist(self)
def search(self, user):
return Mygist(self,user=user)
def do(self):
return Do(self)
def comments(self):
return Comments(self)
def create(self, **args):
if 'description' in args:
self.description = args['description']
else:
self.description = ''
if 'name' in args:
self.gist_name = args['name']
else:
self.gist_name = ''
if 'public' in args:
self.public = args['public']
else:
self.public = 1
if 'content' in args:
self.content = args['content']
else:
raise Exception('Gist content can\'t be empty')
url = '/gists'
data = {"description": self.description,
"public": self.public,
"files": {
self.gist_name: {
"content": self.content
}
}
}
r = requests.post(
'%s%s' % (BASE_URL, url),
data=json.dumps(data),
headers=self.header
)
if (r.status_code == 201):
response = {
'Gist-Link': '%s/%s/%s' %(GIST_URL,self.username,r.json()['id']),
'Clone-Link': '%s/%s.git' %(GIST_URL,r.json()['id']),
'Embed-Script': '<script src="%s/%s/%s.js"</script>' %(GIST_URL,self.username,r.json()['id']),
'id': r.json()['id'],
'created_at': r.json()['created_at'],
}
return response
raise Exception('Gist not created: server response was [%s] %s' % (r.status_code, r.text))
| true
| true
|
790355fc2c17677f74f2230fbf2c11be027f4021
| 3,143
|
py
|
Python
|
configs/textdet/dbnet/dbnet_r18_fpnc_1200e_icdar2015.py
|
jeffreykuang/mmocr-1
|
b17304edeb493b0a4d7224c23d23b952350d0db5
|
[
"Apache-2.0"
] | 206
|
2021-07-30T09:04:08.000Z
|
2022-03-22T00:57:44.000Z
|
configs/textdet/dbnet/dbnet_r18_fpnc_1200e_icdar2015.py
|
jeffreykuang/mmocr-1
|
b17304edeb493b0a4d7224c23d23b952350d0db5
|
[
"Apache-2.0"
] | 39
|
2021-08-05T07:16:46.000Z
|
2022-03-14T13:23:48.000Z
|
configs/textdet/dbnet/dbnet_r18_fpnc_1200e_icdar2015.py
|
jeffreykuang/mmocr-1
|
b17304edeb493b0a4d7224c23d23b952350d0db5
|
[
"Apache-2.0"
] | 61
|
2021-07-30T07:51:41.000Z
|
2022-03-30T14:40:02.000Z
|
_base_ = [
'../../_base_/schedules/schedule_1200e.py', '../../_base_/runtime_10e.py'
]
model = dict(
type='DBNet',
pretrained='torchvision://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
style='caffe'),
neck=dict(
type='FPNC', in_channels=[64, 128, 256, 512], lateral_channels=256),
bbox_head=dict(
type='DBHead',
text_repr_type='quad',
in_channels=256,
loss=dict(type='DBLoss', alpha=5.0, beta=10.0, bbce_loss=True)),
train_cfg=None,
test_cfg=None)
dataset_type = 'IcdarDataset'
data_root = 'data/icdar2015/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# for visualizing img, pls uncomment it.
# img_norm_cfg = dict(mean=[0, 0, 0], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadTextAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
dict(type='Normalize', **img_norm_cfg),
# img aug
dict(
type='ImgAug',
args=[['Fliplr', 0.5],
dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]),
# random crop
dict(type='EastRandomCrop', target_size=(640, 640)),
dict(type='DBNetTargets', shrink_ratio=0.4),
dict(type='Pad', size_divisor=32),
# for visualizing img and gts, pls set visualize = True
dict(
type='CustomFormatBundle',
keys=['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'],
visualize=dict(flag=False, boundary_key='gt_shrink')),
dict(
type='Collect',
keys=['img', 'gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 736),
flip=False,
transforms=[
dict(type='Resize', img_scale=(2944, 736), keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=16,
workers_per_gpu=8,
train=dict(
type=dataset_type,
ann_file=data_root + '/instances_training.json',
# for debugging top k imgs
# select_first_k=200,
img_prefix=data_root + '/imgs',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
# select_first_k=100,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
# select_first_k=100,
pipeline=test_pipeline))
evaluation = dict(interval=100, metric='hmean-iou')
| 32.402062
| 77
| 0.596564
|
_base_ = [
'../../_base_/schedules/schedule_1200e.py', '../../_base_/runtime_10e.py'
]
model = dict(
type='DBNet',
pretrained='torchvision://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
style='caffe'),
neck=dict(
type='FPNC', in_channels=[64, 128, 256, 512], lateral_channels=256),
bbox_head=dict(
type='DBHead',
text_repr_type='quad',
in_channels=256,
loss=dict(type='DBLoss', alpha=5.0, beta=10.0, bbce_loss=True)),
train_cfg=None,
test_cfg=None)
dataset_type = 'IcdarDataset'
data_root = 'data/icdar2015/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadTextAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(
type='ImgAug',
args=[['Fliplr', 0.5],
dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]),
dict(type='EastRandomCrop', target_size=(640, 640)),
dict(type='DBNetTargets', shrink_ratio=0.4),
dict(type='Pad', size_divisor=32),
dict(
type='CustomFormatBundle',
keys=['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'],
visualize=dict(flag=False, boundary_key='gt_shrink')),
dict(
type='Collect',
keys=['img', 'gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 736),
flip=False,
transforms=[
dict(type='Resize', img_scale=(2944, 736), keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=16,
workers_per_gpu=8,
train=dict(
type=dataset_type,
ann_file=data_root + '/instances_training.json',
img_prefix=data_root + '/imgs',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
pipeline=test_pipeline))
evaluation = dict(interval=100, metric='hmean-iou')
| true
| true
|
790358237c023b381ab2848aabf942a0bd5444c7
| 2,678
|
py
|
Python
|
app/webhooks.py
|
heaptracetechnology/github
|
7b7eaddf2e2eec4d28855c81d68ded65dc05cc09
|
[
"MIT"
] | null | null | null |
app/webhooks.py
|
heaptracetechnology/github
|
7b7eaddf2e2eec4d28855c81d68ded65dc05cc09
|
[
"MIT"
] | null | null | null |
app/webhooks.py
|
heaptracetechnology/github
|
7b7eaddf2e2eec4d28855c81d68ded65dc05cc09
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import hmac
import requests
from json import dumps
from hashlib import sha1
from .app import api, env
def match_any_if_any(event, events):
return events is None or event in events
class Subscription:
def __init__(self, data):
self.data = data
self.events = data['data'].get('events') # user defined
def __getitem__(self, config):
return self.data[config]
class Subscriptions:
store = {}
@classmethod
def add(cls, sub):
Subscriptions.store[sub['id']] = Subscription(sub)
@classmethod
def is_listening_for(cls, event):
for id, sub in Subscriptions.store.items():
if match_any_if_any(event, sub.events):
return True
return False
@classmethod
def publish(cls, eventid, event, data):
for id, sub in Subscriptions.store.items():
if match_any_if_any(event, sub.events):
requests.post(
sub['endpoint'],
headers={'Content-Type': 'application/json'},
data=dumps(dict(
eventType=event,
cloudEventsVersion='0.1',
contentType='application/vnd.omg.object+json',
eventID=eventid,
data=data
))
)
@classmethod
def remove(cls, eventid):
Subscriptions.store.pop(eventid, None)
@api.route('/webhooks/subscribe')
async def subscribe(req, resp):
data = await req.media()
Subscriptions.add(data)
resp.text = 'Subscribed'
@api.route('/webhooks/unsubscribe')
async def unsubscribe(req, resp):
data = await req.media()
Subscriptions.remove(data['id'])
resp.text = 'Unsubscribed'
@api.route('/webhooks')
async def webhooks(req, resp):
"""
Handle incoming GitHub webhooks
"""
data = await req.media()
eventid = req.headers.get('X-GitHub-Delivery')
event = req.headers.get('X-GitHub-Event')
if not Subscriptions.is_listening_for(event):
resp.text = f'Accepted, but not listening for {event} events.'
return
if env.webhook_secret:
signature = req.headers.get('X-Hub-Signature')
assert signature, 'X-Hub-Signature not found in the header.'
sha_name, signature = signature.split('=')
assert sha_name == 'sha1'
mac = hmac.new(env.webhook_secret, msg=data, digestmod='sha1')
assert str(mac.hexdigest()) == str(signature)
Subscriptions.publish(eventid, event, {'event': event, 'payload': data})
resp.text = 'Accepted'
| 26.78
| 76
| 0.590739
|
import hmac
import requests
from json import dumps
from hashlib import sha1
from .app import api, env
def match_any_if_any(event, events):
return events is None or event in events
class Subscription:
def __init__(self, data):
self.data = data
self.events = data['data'].get('events')
def __getitem__(self, config):
return self.data[config]
class Subscriptions:
store = {}
@classmethod
def add(cls, sub):
Subscriptions.store[sub['id']] = Subscription(sub)
@classmethod
def is_listening_for(cls, event):
for id, sub in Subscriptions.store.items():
if match_any_if_any(event, sub.events):
return True
return False
@classmethod
def publish(cls, eventid, event, data):
for id, sub in Subscriptions.store.items():
if match_any_if_any(event, sub.events):
requests.post(
sub['endpoint'],
headers={'Content-Type': 'application/json'},
data=dumps(dict(
eventType=event,
cloudEventsVersion='0.1',
contentType='application/vnd.omg.object+json',
eventID=eventid,
data=data
))
)
@classmethod
def remove(cls, eventid):
Subscriptions.store.pop(eventid, None)
@api.route('/webhooks/subscribe')
async def subscribe(req, resp):
data = await req.media()
Subscriptions.add(data)
resp.text = 'Subscribed'
@api.route('/webhooks/unsubscribe')
async def unsubscribe(req, resp):
data = await req.media()
Subscriptions.remove(data['id'])
resp.text = 'Unsubscribed'
@api.route('/webhooks')
async def webhooks(req, resp):
data = await req.media()
eventid = req.headers.get('X-GitHub-Delivery')
event = req.headers.get('X-GitHub-Event')
if not Subscriptions.is_listening_for(event):
resp.text = f'Accepted, but not listening for {event} events.'
return
if env.webhook_secret:
signature = req.headers.get('X-Hub-Signature')
assert signature, 'X-Hub-Signature not found in the header.'
sha_name, signature = signature.split('=')
assert sha_name == 'sha1'
mac = hmac.new(env.webhook_secret, msg=data, digestmod='sha1')
assert str(mac.hexdigest()) == str(signature)
Subscriptions.publish(eventid, event, {'event': event, 'payload': data})
resp.text = 'Accepted'
| true
| true
|
79035892a4162456def659cca8b6309091fcea3c
| 1,573
|
py
|
Python
|
ENV/lib/python3.5/site-packages/pyrogram/api/types/channel_admin_log_event_action_toggle_pre_history_hidden.py
|
block1o1/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | 4
|
2021-10-14T21:22:25.000Z
|
2022-03-12T19:58:48.000Z
|
ENV/lib/python3.5/site-packages/pyrogram/api/types/channel_admin_log_event_action_toggle_pre_history_hidden.py
|
inevolin/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | null | null | null |
ENV/lib/python3.5/site-packages/pyrogram/api/types/channel_admin_log_event_action_toggle_pre_history_hidden.py
|
inevolin/CryptoPredicted
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
[
"MIT"
] | 1
|
2022-03-15T22:52:53.000Z
|
2022-03-15T22:52:53.000Z
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class ChannelAdminLogEventActionTogglePreHistoryHidden(Object):
"""Attributes:
ID: ``0x5f5c95f1``
Args:
new_value: ``bool``
"""
ID = 0x5f5c95f1
def __init__(self, new_value: bool):
self.new_value = new_value # Bool
@staticmethod
def read(b: BytesIO, *args) -> "ChannelAdminLogEventActionTogglePreHistoryHidden":
# No flags
new_value = Bool.read(b)
return ChannelAdminLogEventActionTogglePreHistoryHidden(new_value)
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
b.write(Bool(self.new_value))
return b.getvalue()
| 29.12963
| 86
| 0.686586
|
from io import BytesIO
from pyrogram.api.core import *
class ChannelAdminLogEventActionTogglePreHistoryHidden(Object):
ID = 0x5f5c95f1
def __init__(self, new_value: bool):
self.new_value = new_value
@staticmethod
def read(b: BytesIO, *args) -> "ChannelAdminLogEventActionTogglePreHistoryHidden":
new_value = Bool.read(b)
return ChannelAdminLogEventActionTogglePreHistoryHidden(new_value)
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
b.write(Bool(self.new_value))
return b.getvalue()
| true
| true
|
79035898aeb438851c67166a71b8be4f337540ee
| 1,903
|
py
|
Python
|
mmdet/datasets/classify/imagenet.py
|
anorthman/mmdetection
|
52e28154364f0e19d11c206bb357d88f29fc4a2d
|
[
"Apache-2.0"
] | 5
|
2019-06-11T11:08:54.000Z
|
2021-03-25T10:06:01.000Z
|
mmdet/datasets/classify/imagenet.py
|
anorthman/mmdetection
|
52e28154364f0e19d11c206bb357d88f29fc4a2d
|
[
"Apache-2.0"
] | null | null | null |
mmdet/datasets/classify/imagenet.py
|
anorthman/mmdetection
|
52e28154364f0e19d11c206bb357d88f29fc4a2d
|
[
"Apache-2.0"
] | 1
|
2019-06-11T11:08:55.000Z
|
2019-06-11T11:08:55.000Z
|
import os
import cv2
from PIL import Image
import torch
import mmcv
import numpy as np
from torch.utils.data import Dataset
import torchvision.transforms as T
from torchvision.datasets import ImageFolder
class ImageNetDataset(Dataset):
def __init__(self,
data_root,
test_mode=False,**kwargs):
self.classes = list(range(1000))
normalize = T.Normalize(mean=[0.456], std=[1.0])
#normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if not test_mode:
traindir = os.path.join(data_root, 'train')
self.dataset = ImageFolder(traindir, T.Compose([
T.Grayscale(num_output_channels=1),
T.RandomResizedCrop(224, scale=(0.8, 1.0)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize,
]))
else:
valdir = os.path.join(data_root, 'val')
self.dataset = ImageFolder(valdir, T.Compose([
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
normalize,
]))
if not test_mode:
self._set_group_flag()
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.zeros(len(self), dtype=np.uint8)
def __getitem__(self, idx):
d = dict(img=self.dataset[idx][0], label=torch.tensor([self.dataset[idx][1]], dtype=torch.long))
return d
def __len__(self):
return len(self.dataset)
| 32.254237
| 104
| 0.504467
|
import os
import cv2
from PIL import Image
import torch
import mmcv
import numpy as np
from torch.utils.data import Dataset
import torchvision.transforms as T
from torchvision.datasets import ImageFolder
class ImageNetDataset(Dataset):
def __init__(self,
data_root,
test_mode=False,**kwargs):
self.classes = list(range(1000))
normalize = T.Normalize(mean=[0.456], std=[1.0])
if not test_mode:
traindir = os.path.join(data_root, 'train')
self.dataset = ImageFolder(traindir, T.Compose([
T.Grayscale(num_output_channels=1),
T.RandomResizedCrop(224, scale=(0.8, 1.0)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize,
]))
else:
valdir = os.path.join(data_root, 'val')
self.dataset = ImageFolder(valdir, T.Compose([
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
normalize,
]))
if not test_mode:
self._set_group_flag()
def _set_group_flag(self):
self.flag = np.zeros(len(self), dtype=np.uint8)
def __getitem__(self, idx):
d = dict(img=self.dataset[idx][0], label=torch.tensor([self.dataset[idx][1]], dtype=torch.long))
return d
def __len__(self):
return len(self.dataset)
| true
| true
|
790359ff366f72c211193c96a4f53c88177863f3
| 7,772
|
py
|
Python
|
payir/models.py
|
farahmand-m/django-payir
|
c6f1042cf9fa3b1887086cedfacc5f76755540be
|
[
"MIT"
] | null | null | null |
payir/models.py
|
farahmand-m/django-payir
|
c6f1042cf9fa3b1887086cedfacc5f76755540be
|
[
"MIT"
] | null | null | null |
payir/models.py
|
farahmand-m/django-payir
|
c6f1042cf9fa3b1887086cedfacc5f76755540be
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
import requests
from . import exceptions
class Gateway(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
api_key = models.CharField(max_length=255, verbose_name=_('API Key'))
default_callback = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Redirect to'), help_text=_('Enter the path name for a view that will verify the transaction.'))
class Meta:
verbose_name = _('Gateway')
verbose_name_plural = _('Gateways')
submission_url = 'https://pay.ir/pg/send'
verification_url = 'https://pay.ir/pg/verify'
def _prepare_submission_payload(self, request, transaction, mobile, valid_card_number, callback):
if callback is None:
raise ValueError('You need to specify a path name as the callback for your transactions.')
return {
'api': self.api_key,
'amount': transaction.amount,
'redirect': request.build_absolute_uri(reverse(callback)),
'mobile': mobile,
'factorNumber': transaction.id,
'description': transaction.description,
'validCardNumber': valid_card_number
}
def submit(self, request, transaction, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Submits a transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param transaction: A transaction object (or a similar class) that's already been saved to the database.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, callback or self.default_callback)
response = requests.post(self.submission_url, data=payload)
data = response.json()
if response:
transaction.token = data['token']
transaction.save()
return redirect(f'https://pay.ir/pg/{transaction.token}')
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def create_and_submit(self, request, account, amount: int, mobile: str = None, valid_card_number: str = None, callback: str = None):
"""Creates a transaction object and submits the transaction to Pay.ir.
When called, the method submits the necessary information about the transaction to Pay.ir and returns a
HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an
error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.
:param request: The WSGIRequest object passed to the view.
:param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.
:param amount: The amount of the transaction in IRR. The amount has to be more than 1000.
:param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.
:param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.
:param callback: (Optional) Overrides the default callback of the gateway.
"""
transaction = Transaction(account=account, amount=amount)
transaction.save()
return self.submit(request, transaction, mobile, valid_card_number, callback)
def verify(self, transaction):
"""Verifies the transaction with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param transaction: The transaction object corresponding to the specified token in request.GET.
"""
payload = {'api': self.api_key, 'token': transaction.token}
response = requests.post(self.verification_url, data=payload)
data = response.json()
if response:
if not transaction.verified:
transaction.gateway = self
transaction.verified = True
transaction.verified_at = timezone.now()
transaction.save()
return transaction, True
else:
return transaction, False
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def find_and_verify(self, token: str):
"""Finds a transaction with a matching token value and verifies it with Pay.ir.
When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to
the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.
The boolean value would be True if the `verified` flag of the transaction was switched to True. If the
`verified` attribute of transaction object and the returned boolean value do not match, the user might be trying
to confirm a payment for a second time.
:param token: The token of the transaction, which can be found in request.GET. The method will look for a
transaction object with the same token and return it as the first argument.
"""
transaction = Transaction.objects.get(token=token)
return self.verify(transaction)
def __str__(self):
return self.label
class Transaction(models.Model):
account = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('Account'))
created = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_('Created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('Modified'))
amount = models.IntegerField(verbose_name=_('Amount (IRR)'))
description = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Description'))
gateway = models.ForeignKey(to=Gateway, on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('Gateway'))
token = models.TextField(null=True, blank=True, unique=True, verbose_name=_('Token'))
verified = models.BooleanField(default=False, verbose_name=_('Verified'))
verified_at = models.DateTimeField(null=True, blank=True, verbose_name=_('Verified At'))
class Meta:
ordering = ['-modified']
verbose_name = _('Transaction')
verbose_name_plural = _('Transactions')
def __str__(self):
return _('Transaction %(id)d') % {'id': self.id}
| 55.913669
| 190
| 0.702651
|
from django.db import models
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
import requests
from . import exceptions
class Gateway(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
api_key = models.CharField(max_length=255, verbose_name=_('API Key'))
default_callback = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Redirect to'), help_text=_('Enter the path name for a view that will verify the transaction.'))
class Meta:
verbose_name = _('Gateway')
verbose_name_plural = _('Gateways')
submission_url = 'https://pay.ir/pg/send'
verification_url = 'https://pay.ir/pg/verify'
def _prepare_submission_payload(self, request, transaction, mobile, valid_card_number, callback):
if callback is None:
raise ValueError('You need to specify a path name as the callback for your transactions.')
return {
'api': self.api_key,
'amount': transaction.amount,
'redirect': request.build_absolute_uri(reverse(callback)),
'mobile': mobile,
'factorNumber': transaction.id,
'description': transaction.description,
'validCardNumber': valid_card_number
}
def submit(self, request, transaction, mobile: str = None, valid_card_number: str = None, callback: str = None):
payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, callback or self.default_callback)
response = requests.post(self.submission_url, data=payload)
data = response.json()
if response:
transaction.token = data['token']
transaction.save()
return redirect(f'https://pay.ir/pg/{transaction.token}')
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def create_and_submit(self, request, account, amount: int, mobile: str = None, valid_card_number: str = None, callback: str = None):
transaction = Transaction(account=account, amount=amount)
transaction.save()
return self.submit(request, transaction, mobile, valid_card_number, callback)
def verify(self, transaction):
payload = {'api': self.api_key, 'token': transaction.token}
response = requests.post(self.verification_url, data=payload)
data = response.json()
if response:
if not transaction.verified:
transaction.gateway = self
transaction.verified = True
transaction.verified_at = timezone.now()
transaction.save()
return transaction, True
else:
return transaction, False
raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def find_and_verify(self, token: str):
transaction = Transaction.objects.get(token=token)
return self.verify(transaction)
def __str__(self):
return self.label
class Transaction(models.Model):
account = models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_('Account'))
created = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name=_('Created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('Modified'))
amount = models.IntegerField(verbose_name=_('Amount (IRR)'))
description = models.CharField(max_length=255, null=True, blank=True, verbose_name=_('Description'))
gateway = models.ForeignKey(to=Gateway, on_delete=models.SET_NULL, null=True, blank=True, verbose_name=_('Gateway'))
token = models.TextField(null=True, blank=True, unique=True, verbose_name=_('Token'))
verified = models.BooleanField(default=False, verbose_name=_('Verified'))
verified_at = models.DateTimeField(null=True, blank=True, verbose_name=_('Verified At'))
class Meta:
ordering = ['-modified']
verbose_name = _('Transaction')
verbose_name_plural = _('Transactions')
def __str__(self):
return _('Transaction %(id)d') % {'id': self.id}
| true
| true
|
79035a03cb0b0c6594b1eb3fb61d98f3df969eaa
| 830
|
py
|
Python
|
lexicon/tests/providers/test_glesys.py
|
HelixEducation/lexicon
|
9941a61a3b208c5b35602432a75a814394e34875
|
[
"MIT"
] | null | null | null |
lexicon/tests/providers/test_glesys.py
|
HelixEducation/lexicon
|
9941a61a3b208c5b35602432a75a814394e34875
|
[
"MIT"
] | null | null | null |
lexicon/tests/providers/test_glesys.py
|
HelixEducation/lexicon
|
9941a61a3b208c5b35602432a75a814394e34875
|
[
"MIT"
] | 1
|
2020-07-13T21:45:08.000Z
|
2020-07-13T21:45:08.000Z
|
"""Integration tests for Glesys"""
from unittest import TestCase
import pytest
from lexicon.tests.providers.integration_tests import IntegrationTestsV1
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from define_tests.TheTests
# TODO: migrate to IntegrationTestsV2 and its extended test suite
class GlesysProviderTests(TestCase, IntegrationTestsV1):
"""TestCase for Glesys"""
provider_name = 'glesys'
domain = "capsulecd.com"
def _filter_headers(self):
return ['Authorization']
# TODO: enable the skipped tests
@pytest.mark.skip(reason="new test, missing recording")
def test_provider_when_calling_update_record_should_modify_record_name_specified(self):
return
| 34.583333
| 91
| 0.774699
|
from unittest import TestCase
import pytest
from lexicon.tests.providers.integration_tests import IntegrationTestsV1
class GlesysProviderTests(TestCase, IntegrationTestsV1):
provider_name = 'glesys'
domain = "capsulecd.com"
def _filter_headers(self):
return ['Authorization']
@pytest.mark.skip(reason="new test, missing recording")
def test_provider_when_calling_update_record_should_modify_record_name_specified(self):
return
| true
| true
|
79035ae7fc48491090144047e32494f56c714f32
| 888
|
py
|
Python
|
priestess.py
|
nvanbaak/dungeon-adventure-2
|
8bb3efbcf375baa149df85172b7d715d5a2930a8
|
[
"MIT"
] | null | null | null |
priestess.py
|
nvanbaak/dungeon-adventure-2
|
8bb3efbcf375baa149df85172b7d715d5a2930a8
|
[
"MIT"
] | null | null | null |
priestess.py
|
nvanbaak/dungeon-adventure-2
|
8bb3efbcf375baa149df85172b7d715d5a2930a8
|
[
"MIT"
] | null | null | null |
# name : Shoby Gnanasekaran
# net id: shoby
from dungeonchar import DungeonCharacter
from healable import Healable
from hero import Hero
class Priestess(Hero, Healable):
""" Priestess is a hero with it own statistics. The basic behaviour is same as the hero.
Special ability is to heal everytime after taking damage """
def __init__(self, name, model, **kwargs):
super().__init__(name = name, model = model, **kwargs)
super(DungeonCharacter, self).__init__(**kwargs)
def take_damage(self, dmg, source):
""" after taking damage, if the priestess is not dead, it heals itself"""
hp_before_attack = self.hp
super().take_damage(dmg, source)
if self._is_alive and hp_before_attack > self.hp and source != "pit":
heal_message = self.heal_itself()
self.model.announce(f"{self.name}: {heal_message}")
| 35.52
| 92
| 0.679054
|
from dungeonchar import DungeonCharacter
from healable import Healable
from hero import Hero
class Priestess(Hero, Healable):
def __init__(self, name, model, **kwargs):
super().__init__(name = name, model = model, **kwargs)
super(DungeonCharacter, self).__init__(**kwargs)
def take_damage(self, dmg, source):
hp_before_attack = self.hp
super().take_damage(dmg, source)
if self._is_alive and hp_before_attack > self.hp and source != "pit":
heal_message = self.heal_itself()
self.model.announce(f"{self.name}: {heal_message}")
| true
| true
|
79036004fea28c4dba4e834e26c516912938e6de
| 713
|
py
|
Python
|
web_service/config.py
|
celinekeisja/capstone
|
446201dc1aa60203b1f43995998846367bc18c7a
|
[
"MIT"
] | null | null | null |
web_service/config.py
|
celinekeisja/capstone
|
446201dc1aa60203b1f43995998846367bc18c7a
|
[
"MIT"
] | 4
|
2021-03-31T19:33:29.000Z
|
2021-12-13T20:52:17.000Z
|
web_service/config.py
|
celinekeisja/capstone
|
446201dc1aa60203b1f43995998846367bc18c7a
|
[
"MIT"
] | null | null | null |
import os
from connexion import App
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
conn = App(__name__, specification_dir='./')
app = conn.app
postgres_url = 'postgres://postgres:docker@10.5.95.65:54320/web_service_db'
app.config["SQLALCHEMY_ECHO"] = True
app.config["SQLALCHEMY_DATABASE_URI"] = postgres_url
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["UPLOAD_FOLDER"] = basedir + os.sep + "web_service_files"
app.config["DATABASE"] = "web_service_db"
app.config["PORT"] = 5433
app.config["USERNAME"] = "postgres"
app.config["HOSTNAME"] = "10.5.95.65"
db = SQLAlchemy(app)
ma = Marshmallow(app)
| 25.464286
| 75
| 0.760168
|
import os
from connexion import App
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
conn = App(__name__, specification_dir='./')
app = conn.app
postgres_url = 'postgres://postgres:docker@10.5.95.65:54320/web_service_db'
app.config["SQLALCHEMY_ECHO"] = True
app.config["SQLALCHEMY_DATABASE_URI"] = postgres_url
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["UPLOAD_FOLDER"] = basedir + os.sep + "web_service_files"
app.config["DATABASE"] = "web_service_db"
app.config["PORT"] = 5433
app.config["USERNAME"] = "postgres"
app.config["HOSTNAME"] = "10.5.95.65"
db = SQLAlchemy(app)
ma = Marshmallow(app)
| true
| true
|
790360736808ec71a85aa153b300c50a926d3ce6
| 1,107
|
py
|
Python
|
tests/unit/__init__.py
|
stefwalter/packit
|
d675018518ef200a06ea7636dd203100d872a772
|
[
"MIT"
] | 1
|
2020-12-28T18:00:22.000Z
|
2020-12-28T18:00:22.000Z
|
tests/unit/__init__.py
|
stefwalter/packit
|
d675018518ef200a06ea7636dd203100d872a772
|
[
"MIT"
] | 7
|
2020-12-28T19:57:35.000Z
|
2021-04-17T14:43:15.000Z
|
tests/unit/__init__.py
|
stefwalter/packit
|
d675018518ef200a06ea7636dd203100d872a772
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 50.318182
| 80
| 0.775971
| true
| true
|
|
7903619646d14bbb5481225fca4a3723bcdcbbbc
| 1,115
|
py
|
Python
|
web/addons/stock/__init__.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/addons/stock/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/addons/stock/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from stock import *
import partner
import product
import procurement
import report
import wizard
import res_config
import controllers
| 34.84375
| 78
| 0.627803
| true
| true
|
|
790361faaf2d5169e7e370d3622b4df1a156470f
| 1,714
|
py
|
Python
|
tests/enumeration/run.py
|
jonnyrocks/pyangbind
|
7a7c6df6ddad7cbec941800431840253b5e2f186
|
[
"Apache-2.0"
] | 176
|
2015-06-17T15:44:07.000Z
|
2022-03-18T01:16:19.000Z
|
tests/enumeration/run.py
|
jonnyrocks/pyangbind
|
7a7c6df6ddad7cbec941800431840253b5e2f186
|
[
"Apache-2.0"
] | 245
|
2015-05-29T07:04:13.000Z
|
2022-03-25T14:44:37.000Z
|
tests/enumeration/run.py
|
jonnyrocks/pyangbind
|
7a7c6df6ddad7cbec941800431840253b5e2f186
|
[
"Apache-2.0"
] | 118
|
2015-07-02T07:04:36.000Z
|
2022-03-31T20:32:38.000Z
|
#!/usr/bin/env python
import unittest
from tests.base import PyangBindTestCase
class EnumerationTests(PyangBindTestCase):
yang_files = ["enumeration.yang"]
def setUp(self):
self.enum_obj = self.bindings.enumeration()
def test_container_has_all_leafs(self):
for leaf in ["e", "f"]:
with self.subTest(leaf=leaf):
self.assertTrue(
hasattr(self.enum_obj.container, leaf), "Container does not contain enumeration %s" % leaf
)
def test_assign_to_enum(self):
self.enum_obj.container.e = "one"
self.assertEqual(
self.enum_obj.container.e,
"one",
"Enumeration value was not correctly set (%s)" % self.enum_obj.container.e,
)
def test_enum_does_not_allow_invalid_value(self):
allowed = True
try:
self.enum_obj.container.e = "twentyseven"
except ValueError:
allowed = False
self.assertFalse(
allowed, "Erroneous value was not caught by restriction handler (%s)" % self.enum_obj.container.e
)
def test_enum_default_value(self):
self.assertEqual(
self.enum_obj.container.f._default,
"c",
"Erroneous default value for 'f' (%s)" % self.enum_obj.container.f._default,
)
def test_static_enum_value(self):
self.enum_obj.container.e = "two"
self.assertEqual(
self.enum_obj.container.e.getValue(mapped=True),
42,
"Erroneously statically defined value returned (%s)" % self.enum_obj.container.e.getValue(mapped=True),
)
if __name__ == "__main__":
unittest.main()
| 30.070175
| 115
| 0.608518
|
import unittest
from tests.base import PyangBindTestCase
class EnumerationTests(PyangBindTestCase):
yang_files = ["enumeration.yang"]
def setUp(self):
self.enum_obj = self.bindings.enumeration()
def test_container_has_all_leafs(self):
for leaf in ["e", "f"]:
with self.subTest(leaf=leaf):
self.assertTrue(
hasattr(self.enum_obj.container, leaf), "Container does not contain enumeration %s" % leaf
)
def test_assign_to_enum(self):
self.enum_obj.container.e = "one"
self.assertEqual(
self.enum_obj.container.e,
"one",
"Enumeration value was not correctly set (%s)" % self.enum_obj.container.e,
)
def test_enum_does_not_allow_invalid_value(self):
allowed = True
try:
self.enum_obj.container.e = "twentyseven"
except ValueError:
allowed = False
self.assertFalse(
allowed, "Erroneous value was not caught by restriction handler (%s)" % self.enum_obj.container.e
)
def test_enum_default_value(self):
self.assertEqual(
self.enum_obj.container.f._default,
"c",
"Erroneous default value for 'f' (%s)" % self.enum_obj.container.f._default,
)
def test_static_enum_value(self):
self.enum_obj.container.e = "two"
self.assertEqual(
self.enum_obj.container.e.getValue(mapped=True),
42,
"Erroneously statically defined value returned (%s)" % self.enum_obj.container.e.getValue(mapped=True),
)
if __name__ == "__main__":
unittest.main()
| true
| true
|
7903639ba74efdc3a98e9a3023c99764629751c1
| 968
|
py
|
Python
|
flask_service/views.py
|
mwprog/atomist-flask-microservice
|
65a18a0f149bf30af3cb5f9eb0818aa784901ade
|
[
"Apache-2.0"
] | null | null | null |
flask_service/views.py
|
mwprog/atomist-flask-microservice
|
65a18a0f149bf30af3cb5f9eb0818aa784901ade
|
[
"Apache-2.0"
] | 6
|
2018-06-06T20:00:46.000Z
|
2018-06-08T14:19:55.000Z
|
flask_service/views.py
|
mwprog/atomist-flask-microservice
|
65a18a0f149bf30af3cb5f9eb0818aa784901ade
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Blueprint, jsonify
from flask_service.swagger import spec
__all__ = ['main_app']
main_app = Blueprint('main_app', __name__)
@main_app.route('/api')
def swagger():
"""
Responds with the OpenAPI specification for this application.
"""
return jsonify(spec.to_dict())
@main_app.route('/health')
def health():
"""
Responds with the current's service health.
Could be used by the liveness probe of a Kubernetes cluster for instance.
"""
# put some logic here to decide if your app is doing well or not
# by default, we'll always return everything is okay!
return ""
@main_app.route('/status')
def status():
"""
Responds with the current's service status.
Could be used by the readiness probe of a Kubernetes cluster.
"""
# put some logic here to decide if your app is doing well or not
# by default, we'll always return everything is okay!
return ""
| 24.2
| 77
| 0.677686
|
from flask import Blueprint, jsonify
from flask_service.swagger import spec
__all__ = ['main_app']
main_app = Blueprint('main_app', __name__)
@main_app.route('/api')
def swagger():
return jsonify(spec.to_dict())
@main_app.route('/health')
def health():
return ""
@main_app.route('/status')
def status():
# put some logic here to decide if your app is doing well or not
# by default, we'll always return everything is okay!
return ""
| true
| true
|
790364466785d40ee8c57322728dd080b8ae9ad1
| 22,582
|
py
|
Python
|
sgkit/io/bgen/bgen_reader.py
|
pystatgen/sgk
|
f39e1b1bc3b16d05c5043ab5d445076424dad229
|
[
"Apache-2.0"
] | 74
|
2020-06-16T18:08:24.000Z
|
2022-02-10T06:42:30.000Z
|
sgkit/io/bgen/bgen_reader.py
|
pystatgen/sgk
|
f39e1b1bc3b16d05c5043ab5d445076424dad229
|
[
"Apache-2.0"
] | 677
|
2020-06-18T15:57:33.000Z
|
2022-03-31T16:20:50.000Z
|
sgkit/io/bgen/bgen_reader.py
|
pystatgen/sgk
|
f39e1b1bc3b16d05c5043ab5d445076424dad229
|
[
"Apache-2.0"
] | 20
|
2020-06-22T13:40:10.000Z
|
2022-03-05T03:33:13.000Z
|
"""BGEN reader implementation (using bgen_reader)"""
import logging
import tempfile
import time
from pathlib import Path
from typing import (
Any,
Dict,
Hashable,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
Union,
)
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import xarray as xr
import zarr
from cbgen import bgen_file, bgen_metafile
from rechunker import api as rechunker_api
from xarray import Dataset
from sgkit import create_genotype_dosage_dataset
from sgkit.io.utils import dataframe_to_dict, encode_contigs
from sgkit.typing import ArrayLike, DType, NDArray, PathType
logger = logging.getLogger(__name__)
GT_DATA_VARS = [
"call_genotype_probability",
"call_genotype_probability_mask",
"call_dosage",
"call_dosage_mask",
]
METAFILE_DTYPE = dict(
[
("id", "S"),
("rsid", "S"),
("chrom", "S"),
("pos", "int32"),
("a1", "S"),
("a2", "S"),
("offset", "int64"),
]
)
class BgenReader:
name = "bgen_reader"
def __init__(
self,
path: PathType,
metafile_path: Optional[PathType] = None,
dtype: DType = "float32",
) -> None:
self.path = Path(path)
self.metafile_path = (
Path(metafile_path) if metafile_path else self.path.with_suffix(".metafile")
)
with bgen_file(self.path) as bgen:
self.n_variants = bgen.nvariants
self.n_samples = bgen.nsamples
if not self.metafile_path.exists():
start = time.time()
logger.info(
f"Generating BGEN metafile for '{self.path}' (this may take a while)"
)
bgen.create_metafile(self.metafile_path, verbose=False)
stop = time.time()
logger.info(
f"BGEN metafile generation complete ({stop - start:.0f} seconds)"
)
with bgen_metafile(self.metafile_path) as mf:
assert self.n_variants == mf.nvariants
self.npartitions = mf.npartitions
self.partition_size = mf.partition_size
self.shape = (self.n_variants, self.n_samples, 3)
self.dtype = np.dtype(dtype)
self.precision = 64 if self.dtype.itemsize >= 8 else 32
self.ndim = 3
def __getitem__(self, idx: Any) -> NDArray:
if not isinstance(idx, tuple):
raise IndexError(f"Indexer must be tuple (received {type(idx)})")
if len(idx) != self.ndim:
raise IndexError(
f"Indexer must have {self.ndim} items (received {len(idx)} slices)"
)
if not all(isinstance(i, slice) or isinstance(i, int) for i in idx):
raise IndexError(
f"Indexer must contain only slices or ints (received types {[type(i) for i in idx]})"
)
# Determine which dims should have unit size in result
squeeze_dims = tuple(i for i in range(len(idx)) if isinstance(idx[i], int))
# Convert all indexers to slices
idx = tuple(slice(i, i + 1) if isinstance(i, int) else i for i in idx)
if idx[0].start == idx[0].stop:
return np.empty((0,) * self.ndim, dtype=self.dtype)
# Determine start and end partitions that correspond to the
# given variant dimension indexer
start_partition = idx[0].start // self.partition_size
start_partition_offset = idx[0].start % self.partition_size
end_partition = (idx[0].stop - 1) // self.partition_size
end_partition_offset = (idx[0].stop - 1) % self.partition_size
# Create a list of all offsets into the underlying file at which
# data for each variant begins
all_vaddr = []
with bgen_metafile(self.metafile_path) as mf:
for i in range(start_partition, end_partition + 1):
partition = mf.read_partition(i)
start_offset = start_partition_offset if i == start_partition else 0
end_offset = (
end_partition_offset + 1
if i == end_partition
else self.partition_size
)
vaddr = partition.variants.offset
all_vaddr.extend(vaddr[start_offset:end_offset].tolist())
# Read the probabilities for each variant, apply indexer for
# samples dimension to give probabilities for all genotypes,
# and then apply final genotype dimension indexer
with bgen_file(self.path) as bgen:
res = None
for i, vaddr in enumerate(all_vaddr):
probs = bgen.read_probability(vaddr, precision=self.precision)[idx[1]]
assert len(probs.shape) == 2 and probs.shape[1] == 3
if res is None:
res = np.zeros((len(all_vaddr), len(probs), 3), dtype=self.dtype)
res[i] = probs
res = res[..., idx[2]] # type: ignore[index]
return np.squeeze(res, axis=squeeze_dims)
def _split_alleles(allele_ids: bytes) -> List[bytes]:
alleles = allele_ids.split(b",")
if len(alleles) != 2:
raise NotImplementedError(
f"Bgen reads only supported for biallelic variants (found non-biallelic variant '{str(allele_ids)}')"
)
return alleles
def _read_metafile_partition(path: Path, partition: int) -> pd.DataFrame:
with bgen_metafile(path) as mf:
part = mf.read_partition(partition)
v = part.variants
allele_ids = np.array([_split_alleles(aid) for aid in v.allele_ids])
data = {
"id": v.id,
"rsid": v.rsid,
"chrom": v.chromosome,
"pos": v.position,
"a1": allele_ids[:, 0],
"a2": allele_ids[:, 1],
"offset": v.offset,
}
return pd.DataFrame(data).astype(METAFILE_DTYPE)
def read_metafile(path: PathType) -> dd.DataFrame:
"""Read cbgen metafile containing partitioned variant info"""
with bgen_metafile(path) as mf:
divisions = [mf.partition_size * i for i in range(mf.npartitions)] + [
mf.nvariants - 1
]
dfs = [
dask.delayed(_read_metafile_partition)(path, i)
for i in range(mf.npartitions)
]
meta = dd.utils.make_meta(METAFILE_DTYPE)
return dd.from_delayed(dfs, meta=meta, divisions=divisions)
def read_samples(path: PathType) -> pd.DataFrame:
"""Read BGEN .sample file"""
df = pd.read_csv(path, sep=" ", skiprows=[1], usecols=[0])
df.columns = ["sample_id"]
return df
def read_bgen(
path: PathType,
metafile_path: Optional[PathType] = None,
sample_path: Optional[PathType] = None,
chunks: Union[str, int, Tuple[int, int, int]] = "auto",
lock: bool = False,
persist: bool = True,
contig_dtype: DType = "str",
gp_dtype: DType = "float32",
) -> Dataset:
"""Read BGEN dataset.
Loads a single BGEN dataset as dask arrays within a Dataset
from a ``.bgen`` file.
Parameters
----------
path
Path to BGEN file.
metafile_path
Path to companion index file used to determine BGEN byte offsets.
Defaults to ``path`` + ".metafile" if not provided.
This file is necessary for reading BGEN genotype probabilities and it will be
generated the first time the file is read if it does not already exist.
If it needs to be created, it can make the first call to this function
much slower than subsequent calls.
sample_path
Path to ``.sample`` file, by default None. This is used to fetch sample identifiers
and when provided it is preferred over sample identifiers embedded in the ``.bgen`` file.
chunks
Chunk size for genotype probability data (3 dimensions),
by default "auto".
lock
Whether or not to synchronize concurrent reads of
file blocks, by default False. This is passed through to
[dask.array.from_array](https://docs.dask.org/en/latest/array-api.html#dask.array.from_array).
persist
Whether or not to persist variant information in memory, by default True.
This is an important performance consideration as the metadata file for this data will
be read multiple times when False.
contig_dtype
Data type for contig names, by default "str".
This may also be an integer type (e.g. "int"), but will fail if any of the contig names
cannot be converted to integers.
gp_dtype
Data type for genotype probabilities, by default "float32".
Warnings
--------
Only bi-allelic, diploid BGEN files are currently supported.
Returns
-------
A dataset containing the following variables:
- :data:`sgkit.variables.variant_id_spec` (variants)
- :data:`sgkit.variables.variant_contig_spec` (variants)
- :data:`sgkit.variables.variant_position_spec` (variants)
- :data:`sgkit.variables.variant_allele_spec` (variants)
- :data:`sgkit.variables.sample_id_spec` (samples)
- :data:`sgkit.variables.call_dosage_spec` (variants, samples)
- :data:`sgkit.variables.call_dosage_mask_spec` (variants, samples)
- :data:`sgkit.variables.call_genotype_probability_spec` (variants, samples, genotypes)
- :data:`sgkit.variables.call_genotype_probability_mask_spec` (variants, samples, genotypes)
"""
if isinstance(chunks, tuple) and len(chunks) != 3:
raise ValueError(f"`chunks` must be tuple with 3 items, not {chunks}")
if not np.issubdtype(gp_dtype, np.floating):
raise ValueError(
f"`gp_dtype` must be a floating point data type, not {gp_dtype}"
)
if not np.issubdtype(contig_dtype, np.integer) and np.dtype(
contig_dtype
).kind not in {"U", "S"}:
raise ValueError(
f"`contig_dtype` must be of string or int type, not {contig_dtype}"
)
path = Path(path)
sample_path = Path(sample_path) if sample_path else path.with_suffix(".sample")
if sample_path.exists():
sample_id = read_samples(sample_path).sample_id.values.astype("U")
else:
sample_id = _default_sample_ids(path)
bgen_reader = BgenReader(path, metafile_path=metafile_path, dtype=gp_dtype)
df = read_metafile(bgen_reader.metafile_path)
if persist:
df = df.persist()
arrs = dataframe_to_dict(df, METAFILE_DTYPE)
variant_id = arrs["id"]
variant_contig: ArrayLike = arrs["chrom"].astype(contig_dtype)
variant_contig, variant_contig_names = encode_contigs(variant_contig)
variant_contig_names = list(variant_contig_names)
variant_position = arrs["pos"]
variant_allele = da.hstack((arrs["a1"][:, np.newaxis], arrs["a2"][:, np.newaxis]))
call_genotype_probability = da.from_array(
bgen_reader,
chunks=chunks,
lock=lock,
fancy=False,
asarray=False,
name=f"{bgen_reader.name}:read_bgen:{path}",
)
call_dosage = _to_dosage(call_genotype_probability)
ds: Dataset = create_genotype_dosage_dataset(
variant_contig_names=variant_contig_names,
variant_contig=variant_contig,
variant_position=variant_position,
variant_allele=variant_allele,
sample_id=sample_id,
call_dosage=call_dosage,
call_genotype_probability=call_genotype_probability,
variant_id=variant_id,
)
return ds
def _default_sample_ids(path: PathType) -> ArrayLike:
"""Fetch or generate sample ids"""
with bgen_file(path) as bgen:
if bgen.contain_samples:
return bgen.read_samples()
else:
return np.char.add(b"sample_", np.arange(bgen.nsamples).astype("S")) # type: ignore[no-untyped-call]
def _to_dosage(probs: ArrayLike) -> ArrayLike:
"""Calculate the dosage from genotype likelihoods (probabilities)"""
assert (
probs.shape[-1] == 3
), f"Expecting genotype (trailing) dimension of size 3, got array of shape {probs.shape}"
return probs[..., 1] + 2 * probs[..., 2]
########################
# Rechunking Functions #
########################
def encode_variables(
ds: Dataset,
chunk_length: int,
chunk_width: int,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[Any] = "uint8",
) -> Dict[Hashable, Dict[str, Any]]:
encoding = {}
for v in ds:
e = {}
if compressor is not None:
e.update({"compressor": compressor})
if v in GT_DATA_VARS:
e.update({"chunks": (chunk_length, chunk_width) + ds[v].shape[2:]})
if probability_dtype is not None and v == "call_genotype_probability":
dtype = np.dtype(probability_dtype)
# Xarray will decode into float32 so any int greater than
# 16 bits will cause overflow/underflow
# See https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
# *bits precision column for single precision floats
if dtype not in [np.uint8, np.uint16]: # type: ignore[comparison-overlap]
raise ValueError(
"Probability integer dtype invalid, must "
f"be uint8 or uint16 not {probability_dtype}"
)
divisor = np.iinfo(dtype).max - 1
e.update(
{
"dtype": probability_dtype,
"add_offset": -1.0 / divisor,
"scale_factor": 1.0 / divisor,
"_FillValue": 0,
}
)
if e:
encoding[v] = e
return encoding
def pack_variables(ds: Dataset) -> Dataset:
# Remove dosage as it is unnecessary and should be redefined
# based on encoded probabilities later (w/ reduced precision)
ds = ds.drop_vars(["call_dosage", "call_dosage_mask"], errors="ignore")
# Remove homozygous reference GP and redefine mask
gp = ds["call_genotype_probability"][..., 1:]
gp_mask = ds["call_genotype_probability_mask"].any(dim="genotypes")
ds = ds.drop_vars(["call_genotype_probability", "call_genotype_probability_mask"])
ds = ds.assign(call_genotype_probability=gp, call_genotype_probability_mask=gp_mask)
return ds
def unpack_variables(ds: Dataset, dtype: DType = "float32") -> Dataset:
# Restore homozygous reference GP
gp = ds["call_genotype_probability"].astype(dtype)
if gp.sizes["genotypes"] != 2:
raise ValueError(
"Expecting variable 'call_genotype_probability' to have genotypes "
f"dimension of size 2 (received sizes = {dict(gp.sizes)})"
)
ds = ds.drop_vars("call_genotype_probability")
ds["call_genotype_probability"] = xr.concat(
[1 - gp.sum(dim="genotypes", skipna=False), gp], dim="genotypes"
)
# Restore dosage
ds["call_dosage"] = gp[..., 0] + 2 * gp[..., 1]
ds["call_dosage_mask"] = ds["call_genotype_probability_mask"]
ds["call_genotype_probability_mask"] = ds[
"call_genotype_probability_mask"
].broadcast_like(ds["call_genotype_probability"])
return ds
def rechunk_bgen(
ds: Dataset,
output: Union[PathType, MutableMapping[str, bytes]],
*,
chunk_length: int = 10_000,
chunk_width: int = 1_000,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[DType] = "uint8",
max_mem: str = "4GB",
pack: bool = True,
tempdir: Optional[PathType] = None,
) -> Dataset:
"""Rechunk BGEN dataset as Zarr.
This function will use the algorithm https://rechunker.readthedocs.io/en/latest/
to rechunk certain fields in a provided Dataset for better downstream performance.
Depending on the system memory available (and the `max_mem` setting) this
rechunking may occur without the need of any intermediate data store. Otherwise,
approximately as much disk space is required as was needed to store the original
BGEN data. Experiments show that this Zarr representation is ~20% larger even
with all available optimizations and fairly aggressive compression (i.e. the
default `clevel` 7).
Note that this function is not evaluated lazily. The rechunking algorithm
will run inline so calls to it may be slow. The resulting Dataset is
generated based on the final, serialized Zarr data.
Parameters
----------
ds
Dataset to rechunk, typically the result from `read_bgen`.
output
Zarr store or path to directory in file system.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
compressor
Zarr compressor, no compression is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset.
"""
if isinstance(output, Path):
output = str(output)
chunk_length = min(chunk_length, ds.dims["variants"])
chunk_width = min(chunk_width, ds.dims["samples"])
if pack:
ds = pack_variables(ds)
encoding = encode_variables(
ds,
chunk_length=chunk_length,
chunk_width=chunk_width,
compressor=compressor,
probability_dtype=probability_dtype,
)
target_chunks = {
var: encoding[var]["chunks"] for var in encoding if "chunks" in encoding[var]
}
target_options = {
var: {k: v for k, v in encoding[var].items() if k != "chunks"}
for var in encoding
}
with tempfile.TemporaryDirectory(
prefix="bgen_to_zarr_", suffix=".zarr", dir=tempdir
) as tmpdir:
rechunked = rechunker_api.rechunk(
ds,
max_mem=max_mem,
target_chunks=target_chunks,
target_store=output,
target_options=target_options,
temp_store=tmpdir,
executor="dask",
)
rechunked.execute()
zarr.consolidate_metadata(output)
ds: Dataset = xr.open_zarr(output, concat_characters=False) # type: ignore[no-untyped-call]
if pack:
ds = unpack_variables(ds)
return ds
def bgen_to_zarr(
input: PathType,
output: Union[PathType, MutableMapping[str, bytes]],
region: Optional[Mapping[Hashable, Any]] = None,
chunk_length: int = 10_000,
chunk_width: int = 1_000,
temp_chunk_length: int = 100,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[DType] = "uint8",
max_mem: str = "4GB",
pack: bool = True,
tempdir: Optional[PathType] = None,
) -> Dataset:
"""Convert a BGEN file to a Zarr on-disk store.
This function is a convenience for calling :func:`read_bgen` followed by
:func:`rechunk_bgen`.
Parameters
----------
input
Path to local BGEN dataset.
output
Zarr store or path to directory in file system.
region
Indexers on dataset dimensions used to define a subset of data to convert.
Must be None or a dict with keys matching dimension names and values
equal to integers or slice objects. This is passed directly to `Dataset.isel`
so it has the same semantics.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
temp_chunk_length
Length of chunks used in raw BGEN read, by default 100. This defines the vertical
chunking (i.e. in the variants dimension) used when reading the raw data and because
there is no horizontal chunking at this phase (i.e. in the samples dimension), this
value should be much smaller than the target `chunk_length`.
compressor
Zarr compressor, by default Blosc + zstd with compression level 7. No compression
is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset.
"""
ds = read_bgen(input, chunks=(temp_chunk_length, -1, -1))
if region is not None:
ds = ds.isel(indexers=region)
return rechunk_bgen(
ds,
output,
chunk_length=chunk_length,
chunk_width=chunk_width,
compressor=compressor,
probability_dtype=probability_dtype,
max_mem=max_mem,
pack=pack,
tempdir=tempdir,
)
| 36.959083
| 113
| 0.64299
|
import logging
import tempfile
import time
from pathlib import Path
from typing import (
Any,
Dict,
Hashable,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
Union,
)
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import xarray as xr
import zarr
from cbgen import bgen_file, bgen_metafile
from rechunker import api as rechunker_api
from xarray import Dataset
from sgkit import create_genotype_dosage_dataset
from sgkit.io.utils import dataframe_to_dict, encode_contigs
from sgkit.typing import ArrayLike, DType, NDArray, PathType
logger = logging.getLogger(__name__)
GT_DATA_VARS = [
"call_genotype_probability",
"call_genotype_probability_mask",
"call_dosage",
"call_dosage_mask",
]
METAFILE_DTYPE = dict(
[
("id", "S"),
("rsid", "S"),
("chrom", "S"),
("pos", "int32"),
("a1", "S"),
("a2", "S"),
("offset", "int64"),
]
)
class BgenReader:
name = "bgen_reader"
def __init__(
self,
path: PathType,
metafile_path: Optional[PathType] = None,
dtype: DType = "float32",
) -> None:
self.path = Path(path)
self.metafile_path = (
Path(metafile_path) if metafile_path else self.path.with_suffix(".metafile")
)
with bgen_file(self.path) as bgen:
self.n_variants = bgen.nvariants
self.n_samples = bgen.nsamples
if not self.metafile_path.exists():
start = time.time()
logger.info(
f"Generating BGEN metafile for '{self.path}' (this may take a while)"
)
bgen.create_metafile(self.metafile_path, verbose=False)
stop = time.time()
logger.info(
f"BGEN metafile generation complete ({stop - start:.0f} seconds)"
)
with bgen_metafile(self.metafile_path) as mf:
assert self.n_variants == mf.nvariants
self.npartitions = mf.npartitions
self.partition_size = mf.partition_size
self.shape = (self.n_variants, self.n_samples, 3)
self.dtype = np.dtype(dtype)
self.precision = 64 if self.dtype.itemsize >= 8 else 32
self.ndim = 3
def __getitem__(self, idx: Any) -> NDArray:
if not isinstance(idx, tuple):
raise IndexError(f"Indexer must be tuple (received {type(idx)})")
if len(idx) != self.ndim:
raise IndexError(
f"Indexer must have {self.ndim} items (received {len(idx)} slices)"
)
if not all(isinstance(i, slice) or isinstance(i, int) for i in idx):
raise IndexError(
f"Indexer must contain only slices or ints (received types {[type(i) for i in idx]})"
)
squeeze_dims = tuple(i for i in range(len(idx)) if isinstance(idx[i], int))
idx = tuple(slice(i, i + 1) if isinstance(i, int) else i for i in idx)
if idx[0].start == idx[0].stop:
return np.empty((0,) * self.ndim, dtype=self.dtype)
start_partition = idx[0].start // self.partition_size
start_partition_offset = idx[0].start % self.partition_size
end_partition = (idx[0].stop - 1) // self.partition_size
end_partition_offset = (idx[0].stop - 1) % self.partition_size
all_vaddr = []
with bgen_metafile(self.metafile_path) as mf:
for i in range(start_partition, end_partition + 1):
partition = mf.read_partition(i)
start_offset = start_partition_offset if i == start_partition else 0
end_offset = (
end_partition_offset + 1
if i == end_partition
else self.partition_size
)
vaddr = partition.variants.offset
all_vaddr.extend(vaddr[start_offset:end_offset].tolist())
with bgen_file(self.path) as bgen:
res = None
for i, vaddr in enumerate(all_vaddr):
probs = bgen.read_probability(vaddr, precision=self.precision)[idx[1]]
assert len(probs.shape) == 2 and probs.shape[1] == 3
if res is None:
res = np.zeros((len(all_vaddr), len(probs), 3), dtype=self.dtype)
res[i] = probs
res = res[..., idx[2]]
return np.squeeze(res, axis=squeeze_dims)
def _split_alleles(allele_ids: bytes) -> List[bytes]:
alleles = allele_ids.split(b",")
if len(alleles) != 2:
raise NotImplementedError(
f"Bgen reads only supported for biallelic variants (found non-biallelic variant '{str(allele_ids)}')"
)
return alleles
def _read_metafile_partition(path: Path, partition: int) -> pd.DataFrame:
with bgen_metafile(path) as mf:
part = mf.read_partition(partition)
v = part.variants
allele_ids = np.array([_split_alleles(aid) for aid in v.allele_ids])
data = {
"id": v.id,
"rsid": v.rsid,
"chrom": v.chromosome,
"pos": v.position,
"a1": allele_ids[:, 0],
"a2": allele_ids[:, 1],
"offset": v.offset,
}
return pd.DataFrame(data).astype(METAFILE_DTYPE)
def read_metafile(path: PathType) -> dd.DataFrame:
with bgen_metafile(path) as mf:
divisions = [mf.partition_size * i for i in range(mf.npartitions)] + [
mf.nvariants - 1
]
dfs = [
dask.delayed(_read_metafile_partition)(path, i)
for i in range(mf.npartitions)
]
meta = dd.utils.make_meta(METAFILE_DTYPE)
return dd.from_delayed(dfs, meta=meta, divisions=divisions)
def read_samples(path: PathType) -> pd.DataFrame:
df = pd.read_csv(path, sep=" ", skiprows=[1], usecols=[0])
df.columns = ["sample_id"]
return df
def read_bgen(
path: PathType,
metafile_path: Optional[PathType] = None,
sample_path: Optional[PathType] = None,
chunks: Union[str, int, Tuple[int, int, int]] = "auto",
lock: bool = False,
persist: bool = True,
contig_dtype: DType = "str",
gp_dtype: DType = "float32",
) -> Dataset:
if isinstance(chunks, tuple) and len(chunks) != 3:
raise ValueError(f"`chunks` must be tuple with 3 items, not {chunks}")
if not np.issubdtype(gp_dtype, np.floating):
raise ValueError(
f"`gp_dtype` must be a floating point data type, not {gp_dtype}"
)
if not np.issubdtype(contig_dtype, np.integer) and np.dtype(
contig_dtype
).kind not in {"U", "S"}:
raise ValueError(
f"`contig_dtype` must be of string or int type, not {contig_dtype}"
)
path = Path(path)
sample_path = Path(sample_path) if sample_path else path.with_suffix(".sample")
if sample_path.exists():
sample_id = read_samples(sample_path).sample_id.values.astype("U")
else:
sample_id = _default_sample_ids(path)
bgen_reader = BgenReader(path, metafile_path=metafile_path, dtype=gp_dtype)
df = read_metafile(bgen_reader.metafile_path)
if persist:
df = df.persist()
arrs = dataframe_to_dict(df, METAFILE_DTYPE)
variant_id = arrs["id"]
variant_contig: ArrayLike = arrs["chrom"].astype(contig_dtype)
variant_contig, variant_contig_names = encode_contigs(variant_contig)
variant_contig_names = list(variant_contig_names)
variant_position = arrs["pos"]
variant_allele = da.hstack((arrs["a1"][:, np.newaxis], arrs["a2"][:, np.newaxis]))
call_genotype_probability = da.from_array(
bgen_reader,
chunks=chunks,
lock=lock,
fancy=False,
asarray=False,
name=f"{bgen_reader.name}:read_bgen:{path}",
)
call_dosage = _to_dosage(call_genotype_probability)
ds: Dataset = create_genotype_dosage_dataset(
variant_contig_names=variant_contig_names,
variant_contig=variant_contig,
variant_position=variant_position,
variant_allele=variant_allele,
sample_id=sample_id,
call_dosage=call_dosage,
call_genotype_probability=call_genotype_probability,
variant_id=variant_id,
)
return ds
def _default_sample_ids(path: PathType) -> ArrayLike:
with bgen_file(path) as bgen:
if bgen.contain_samples:
return bgen.read_samples()
else:
return np.char.add(b"sample_", np.arange(bgen.nsamples).astype("S"))
def _to_dosage(probs: ArrayLike) -> ArrayLike:
assert (
probs.shape[-1] == 3
), f"Expecting genotype (trailing) dimension of size 3, got array of shape {probs.shape}"
return probs[..., 1] + 2 * probs[..., 2]
genotype_probability":
dtype = np.dtype(probability_dtype)
if dtype not in [np.uint8, np.uint16]:
raise ValueError(
"Probability integer dtype invalid, must "
f"be uint8 or uint16 not {probability_dtype}"
)
divisor = np.iinfo(dtype).max - 1
e.update(
{
"dtype": probability_dtype,
"add_offset": -1.0 / divisor,
"scale_factor": 1.0 / divisor,
"_FillValue": 0,
}
)
if e:
encoding[v] = e
return encoding
def pack_variables(ds: Dataset) -> Dataset:
ds = ds.drop_vars(["call_dosage", "call_dosage_mask"], errors="ignore")
gp = ds["call_genotype_probability"][..., 1:]
gp_mask = ds["call_genotype_probability_mask"].any(dim="genotypes")
ds = ds.drop_vars(["call_genotype_probability", "call_genotype_probability_mask"])
ds = ds.assign(call_genotype_probability=gp, call_genotype_probability_mask=gp_mask)
return ds
def unpack_variables(ds: Dataset, dtype: DType = "float32") -> Dataset:
gp = ds["call_genotype_probability"].astype(dtype)
if gp.sizes["genotypes"] != 2:
raise ValueError(
"Expecting variable 'call_genotype_probability' to have genotypes "
f"dimension of size 2 (received sizes = {dict(gp.sizes)})"
)
ds = ds.drop_vars("call_genotype_probability")
ds["call_genotype_probability"] = xr.concat(
[1 - gp.sum(dim="genotypes", skipna=False), gp], dim="genotypes"
)
ds["call_dosage"] = gp[..., 0] + 2 * gp[..., 1]
ds["call_dosage_mask"] = ds["call_genotype_probability_mask"]
ds["call_genotype_probability_mask"] = ds[
"call_genotype_probability_mask"
].broadcast_like(ds["call_genotype_probability"])
return ds
def rechunk_bgen(
ds: Dataset,
output: Union[PathType, MutableMapping[str, bytes]],
*,
chunk_length: int = 10_000,
chunk_width: int = 1_000,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[DType] = "uint8",
max_mem: str = "4GB",
pack: bool = True,
tempdir: Optional[PathType] = None,
) -> Dataset:
if isinstance(output, Path):
output = str(output)
chunk_length = min(chunk_length, ds.dims["variants"])
chunk_width = min(chunk_width, ds.dims["samples"])
if pack:
ds = pack_variables(ds)
encoding = encode_variables(
ds,
chunk_length=chunk_length,
chunk_width=chunk_width,
compressor=compressor,
probability_dtype=probability_dtype,
)
target_chunks = {
var: encoding[var]["chunks"] for var in encoding if "chunks" in encoding[var]
}
target_options = {
var: {k: v for k, v in encoding[var].items() if k != "chunks"}
for var in encoding
}
with tempfile.TemporaryDirectory(
prefix="bgen_to_zarr_", suffix=".zarr", dir=tempdir
) as tmpdir:
rechunked = rechunker_api.rechunk(
ds,
max_mem=max_mem,
target_chunks=target_chunks,
target_store=output,
target_options=target_options,
temp_store=tmpdir,
executor="dask",
)
rechunked.execute()
zarr.consolidate_metadata(output)
ds: Dataset = xr.open_zarr(output, concat_characters=False)
if pack:
ds = unpack_variables(ds)
return ds
def bgen_to_zarr(
input: PathType,
output: Union[PathType, MutableMapping[str, bytes]],
region: Optional[Mapping[Hashable, Any]] = None,
chunk_length: int = 10_000,
chunk_width: int = 1_000,
temp_chunk_length: int = 100,
compressor: Optional[Any] = zarr.Blosc(cname="zstd", clevel=7, shuffle=2),
probability_dtype: Optional[DType] = "uint8",
max_mem: str = "4GB",
pack: bool = True,
tempdir: Optional[PathType] = None,
) -> Dataset:
ds = read_bgen(input, chunks=(temp_chunk_length, -1, -1))
if region is not None:
ds = ds.isel(indexers=region)
return rechunk_bgen(
ds,
output,
chunk_length=chunk_length,
chunk_width=chunk_width,
compressor=compressor,
probability_dtype=probability_dtype,
max_mem=max_mem,
pack=pack,
tempdir=tempdir,
)
| true
| true
|
790364b2b7522f5e871c2f5f6190986de71a651e
| 516
|
py
|
Python
|
core/migrations/0058_auto_20200421_2342.py
|
ArthurGorgonio/suggestclasses
|
7e6ce79cca6cd92ed8a38b12707f900f019508c8
|
[
"MIT"
] | null | null | null |
core/migrations/0058_auto_20200421_2342.py
|
ArthurGorgonio/suggestclasses
|
7e6ce79cca6cd92ed8a38b12707f900f019508c8
|
[
"MIT"
] | null | null | null |
core/migrations/0058_auto_20200421_2342.py
|
ArthurGorgonio/suggestclasses
|
7e6ce79cca6cd92ed8a38b12707f900f019508c8
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-04-22 02:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0057_sugestaoturma_horarios'),
]
operations = [
migrations.RemoveField(
model_name='sugestaoturma',
name='horarios',
),
migrations.AddField(
model_name='sugestaoturma',
name='horarios',
field=models.ManyToManyField(to='core.Horario'),
),
]
| 22.434783
| 60
| 0.585271
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0057_sugestaoturma_horarios'),
]
operations = [
migrations.RemoveField(
model_name='sugestaoturma',
name='horarios',
),
migrations.AddField(
model_name='sugestaoturma',
name='horarios',
field=models.ManyToManyField(to='core.Horario'),
),
]
| true
| true
|
790365b158d207c9ebf6d939cfdb6c52a67232f5
| 681
|
py
|
Python
|
setup.py
|
bjoekeldude/edu_python_mini_lib
|
88250f145d3a97ea196f6be833bd61c102979f05
|
[
"MIT"
] | null | null | null |
setup.py
|
bjoekeldude/edu_python_mini_lib
|
88250f145d3a97ea196f6be833bd61c102979f05
|
[
"MIT"
] | null | null | null |
setup.py
|
bjoekeldude/edu_python_mini_lib
|
88250f145d3a97ea196f6be833bd61c102979f05
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
VERSION = '0.0.1'
DESCRIPTION = 'edu-lib'
LONG_DESCRIPTION = 'Libary zum erlernen der Grundstruktur.'
setup(
name="mylibrary",
version=VERSION,
author="Stephan Bökelmann",
author_email="sb@gruppe.ai",
scripts=[],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=[],
url="",
keywords=['python', 'debugging'],
classifiers= [
"Intended Audience :: Education",
"Programming Language :: Python :: 3",
"Operating System :: POSIX",
]
)
| 26.192308
| 59
| 0.581498
|
from setuptools import setup, find_packages
VERSION = '0.0.1'
DESCRIPTION = 'edu-lib'
LONG_DESCRIPTION = 'Libary zum erlernen der Grundstruktur.'
setup(
name="mylibrary",
version=VERSION,
author="Stephan Bökelmann",
author_email="sb@gruppe.ai",
scripts=[],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=[],
url="",
keywords=['python', 'debugging'],
classifiers= [
"Intended Audience :: Education",
"Programming Language :: Python :: 3",
"Operating System :: POSIX",
]
)
| true
| true
|
790365ece16e164a9628b1112873cb5cd411fbaa
| 162
|
py
|
Python
|
tests/functional/test_tweens.py
|
kevinjalbert/h
|
0f260bf59847f27eff720eeb3c3b2468571412b2
|
[
"BSD-2-Clause"
] | 1
|
2020-06-19T01:49:39.000Z
|
2020-06-19T01:49:39.000Z
|
tests/functional/test_tweens.py
|
kevinjalbert/h
|
0f260bf59847f27eff720eeb3c3b2468571412b2
|
[
"BSD-2-Clause"
] | 5
|
2019-10-31T14:23:18.000Z
|
2019-11-15T19:24:27.000Z
|
tests/functional/test_tweens.py
|
liquidinvestigations/hypothesis-h
|
2eebc0b20823fc5bc42a8e8c33551a6d448ad6ba
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
class TestInvalidPathTweenFactory:
def test_it_400s_if_the_requested_path_isnt_utf8(self, app):
app.get("/%c5", status=400)
| 23.142857
| 64
| 0.697531
|
class TestInvalidPathTweenFactory:
def test_it_400s_if_the_requested_path_isnt_utf8(self, app):
app.get("/%c5", status=400)
| true
| true
|
79036680f5e4af3db294cdc0dec88cb9d0ded8ce
| 3,563
|
py
|
Python
|
monitor.py
|
mvelten/office-weather
|
9e6f8da90f12dcb5a947da930477ce1ffc3b163d
|
[
"MIT"
] | null | null | null |
monitor.py
|
mvelten/office-weather
|
9e6f8da90f12dcb5a947da930477ce1ffc3b163d
|
[
"MIT"
] | null | null | null |
monitor.py
|
mvelten/office-weather
|
9e6f8da90f12dcb5a947da930477ce1ffc3b163d
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
# based on code by henryk ploetz
# https://hackaday.io/project/5301-reverse-engineering-a-low-cost-usb-co-monitor/log/17909-all-your-base-are-belong-to-us
# and the wooga office weather project
# https://blog.wooga.com/woogas-office-weather-wow-67e24a5338
import os, sys, fcntl, time, socket
from prometheus_client import start_http_server, Gauge, Summary, Counter
import requests
def callback_function(error, result):
if error:
print(error)
return
print(result)
def hd(d):
return " ".join("%02X" % e for e in d)
def now():
return int(time.time())
# Create a metric to track time spent and requests made.
decrypt_time = Summary('decrypt_time_seconds', 'Time spent decrypting')
# Decorate function with metric.
@decrypt_time.time()
def decrypt(key, data):
cstate = [0x48, 0x74, 0x65, 0x6D, 0x70, 0x39, 0x39, 0x65]
shuffle = [2, 4, 0, 7, 1, 6, 5, 3]
phase1 = [0] * 8
for i, o in enumerate(shuffle):
phase1[o] = data[i]
phase2 = [0] * 8
for i in range(8):
phase2[i] = phase1[i] ^ key[i]
phase3 = [0] * 8
for i in range(8):
phase3[i] = ( (phase2[i] >> 3) | (phase2[ (i-1+8)%8 ] << 5) ) & 0xff
ctmp = [0] * 8
for i in range(8):
ctmp[i] = ( (cstate[i] >> 4) | (cstate[i]<<4) ) & 0xff
out = [0] * 8
for i in range(8):
out[i] = (0x100 + phase3[i] - ctmp[i]) & 0xff
return out
if __name__ == "__main__":
"""main"""
# use lock on socket to indicate that script is already running
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
## Create an abstract socket, by prefixing it with null.
s.bind('\0postconnect_gateway_notify_lock')
except socket.error, e:
# if script is already running just exit silently
sys.exit(0)
key = [0xc4, 0xc6, 0xc0, 0x92, 0x40, 0x23, 0xdc, 0x96]
fp = open(sys.argv[1], "a+b", 0)
HIDIOCSFEATURE_9 = 0xC0094806
set_report = "\x00" + "".join(chr(e) for e in key)
fcntl.ioctl(fp, HIDIOCSFEATURE_9, set_report)
values = {}
stamp = now()
notified = False
# define Gauge metrice for temp and co2
co2_metric = Gauge('co2_value', 'Current CO_2 Value from sensor')
temperature_metric = Gauge('temperature_value', 'Current Temperature Value from sensor')
# define loop counter
loop_counter = Counter('loops_total', 'Number of loops to query the sensor for values')
# Start up the server to expose the metrics.
start_http_server(8000)
while True:
loop_counter.inc()
data = list(ord(e) for e in fp.read(8))
decrypted = decrypt(key, data)
if decrypted[4] != 0x0d or (sum(decrypted[:3]) & 0xff) != decrypted[3]:
print hd(data), " => ", hd(decrypted), "Checksum error"
else:
op = decrypted[0]
val = decrypted[1] << 8 | decrypted[2]
values[op] = val
if (0x50 in values) and (0x42 in values):
co2 = values[0x50]
tmp = (values[0x42]/16.0-273.15)
# check if it's a sensible value
# (i.e. within the measuring range plus some margin)
if (co2 > 5000 or co2 < 0):
continue
if now() - stamp > 10:
print "TMP %3.1f" % (tmp)
temperature_metric.set(tmp)
print "CO2 %4i" % (co2)
co2_metric.set(co2)
print ">>>"
stamp = now()
| 30.452991
| 121
| 0.575639
|
import os, sys, fcntl, time, socket
from prometheus_client import start_http_server, Gauge, Summary, Counter
import requests
def callback_function(error, result):
if error:
print(error)
return
print(result)
def hd(d):
return " ".join("%02X" % e for e in d)
def now():
return int(time.time())
decrypt_time = Summary('decrypt_time_seconds', 'Time spent decrypting')
@decrypt_time.time()
def decrypt(key, data):
cstate = [0x48, 0x74, 0x65, 0x6D, 0x70, 0x39, 0x39, 0x65]
shuffle = [2, 4, 0, 7, 1, 6, 5, 3]
phase1 = [0] * 8
for i, o in enumerate(shuffle):
phase1[o] = data[i]
phase2 = [0] * 8
for i in range(8):
phase2[i] = phase1[i] ^ key[i]
phase3 = [0] * 8
for i in range(8):
phase3[i] = ( (phase2[i] >> 3) | (phase2[ (i-1+8)%8 ] << 5) ) & 0xff
ctmp = [0] * 8
for i in range(8):
ctmp[i] = ( (cstate[i] >> 4) | (cstate[i]<<4) ) & 0xff
out = [0] * 8
for i in range(8):
out[i] = (0x100 + phase3[i] - ctmp[i]) & 0xff
return out
if __name__ == "__main__":
"""main"""
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
except socket.error, e:
sys.exit(0)
key = [0xc4, 0xc6, 0xc0, 0x92, 0x40, 0x23, 0xdc, 0x96]
fp = open(sys.argv[1], "a+b", 0)
HIDIOCSFEATURE_9 = 0xC0094806
set_report = "\x00" + "".join(chr(e) for e in key)
fcntl.ioctl(fp, HIDIOCSFEATURE_9, set_report)
values = {}
stamp = now()
notified = False
co2_metric = Gauge('co2_value', 'Current CO_2 Value from sensor')
temperature_metric = Gauge('temperature_value', 'Current Temperature Value from sensor')
loop_counter = Counter('loops_total', 'Number of loops to query the sensor for values')
start_http_server(8000)
while True:
loop_counter.inc()
data = list(ord(e) for e in fp.read(8))
decrypted = decrypt(key, data)
if decrypted[4] != 0x0d or (sum(decrypted[:3]) & 0xff) != decrypted[3]:
print hd(data), " => ", hd(decrypted), "Checksum error"
else:
op = decrypted[0]
val = decrypted[1] << 8 | decrypted[2]
values[op] = val
if (0x50 in values) and (0x42 in values):
co2 = values[0x50]
tmp = (values[0x42]/16.0-273.15)
# (i.e. within the measuring range plus some margin)
if (co2 > 5000 or co2 < 0):
continue
if now() - stamp > 10:
print "TMP %3.1f" % (tmp)
temperature_metric.set(tmp)
print "CO2 %4i" % (co2)
co2_metric.set(co2)
print ">>>"
stamp = now()
| false
| true
|
790367d4b5a5c50d54a33a4c20347f5a8e4d547f
| 4,611
|
py
|
Python
|
Concordance/condordance_utils.py
|
erikvanmulligen/etransafe-heatmap
|
effba453d661f2feaa756640a730483fa41e37fc
|
[
"MIT"
] | null | null | null |
Concordance/condordance_utils.py
|
erikvanmulligen/etransafe-heatmap
|
effba453d661f2feaa756640a730483fa41e37fc
|
[
"MIT"
] | 1
|
2021-02-11T14:59:37.000Z
|
2021-02-11T14:59:37.000Z
|
Concordance/condordance_utils.py
|
erikvanmulligen/etransafe-heatmap
|
effba453d661f2feaa756640a730483fa41e37fc
|
[
"MIT"
] | null | null | null |
import mysql.connector
import json
import os
import requests
def getAllFindings(host, database, user, password, table, where):
db = mysql.connector.connect(host=host, database=database, user=user, password=password)
cursor = db.cursor()
cursor.execute("SELECT distinct findingCode, specimenOrganCode FROM " + table + " " + where)
return cursor.fetchall()
def getDrugs(api, filename):
if filename is None:
drugs = getDrugsMapping(api)
else:
if os.path.isfile(filename):
with open(filename, 'r') as drug_file:
drugs = json.loads(drug_file.read())
else:
drugs = getDrugsMapping(api)
with open(filename, 'w') as drug_file:
drug_file.write(json.dumps(drugs))
return drugs
def getDrugsMapping(api):
result = {}
clinicalCompounds = getClinicalCompounds(api)
preclinicalCompounds = getPreclinicalCompounds(api)
# iterate over the clinical and preclinical compounds and match them om inchiKey
for clinicalCompound in clinicalCompounds:
for preclinicalCompound in preclinicalCompounds:
if (clinicalCompound['inchiKey'] is not None) and (clinicalCompound['inchiKey'] == preclinicalCompound['inchiKey']):
inchiKey = clinicalCompound['inchiKey']
if inchiKey not in result:
result[inchiKey] = {
'inchiKey': inchiKey,
'clinicalName': clinicalCompound['name'],
'preclinicalName': preclinicalCompound['name']
}
result[inchiKey][preclinicalCompound['source']] = preclinicalCompound['findingIds']
result[inchiKey][clinicalCompound['source']] = clinicalCompound['findingIds']
return result
def getClinicalCompounds(api):
ct_compounds = api.ClinicalTrials().getAllCompounds();
for ct_compound in ct_compounds:
ct_compound['source'] = 'ClinicalTrials'
ml_compounds = api.Medline().getAllCompounds();
for ml_compound in ml_compounds:
ml_compound['source'] = 'Medline'
fa_compounds = api.Faers().getAllCompounds();
for fa_compound in fa_compounds:
fa_compound['source'] = 'Faers'
dm_compounds = api.DailyMed().getAllCompounds();
for dm_compound in dm_compounds:
dm_compound['source'] = 'DailyMed'
return ct_compounds + ml_compounds + fa_compounds + dm_compounds
def getPreclinicalCompounds(api):
et_compounds = api.eToxSys().getAllCompounds()
for et_compound in et_compounds:
et_compound['source'] = 'eToxSys'
return et_compounds
def getFindingsByIds(api, service, findingIds):
result = []
record_count = 0
query = {
"filter": {
"criteria": [
[
{
"field": {
"dataClassKey": "FINDING",
"name": "id"
},
"primitiveType": "Integer",
"comparisonOperator": "IN",
"values": None
},
]
]
},
"selectedFields": [
{
"dataClassKey": "FINDING",
"names": [
"id",
"specimenOrgan", "specimenOrganCode", "specimenOrganVocabulary",
"findingIdentifier", "finding", "findingCode", "findingVocabulary", "findingType",
"severity", "observation", "frequency",
"dose", "doseUnit",
"timepoint", "timepointUnit",
"treatmentRelated",
"compoundId",
"studyId",
"createdDate", "modifiedDate", "sex"
]
}
],
"offset": 0,
"limit": 500
}
for offset in range(0, len(findingIds), 500):
query['filter']['criteria'][0][0]['values'] = [{'value': findingId} for findingId in findingIds[offset:offset+500]]
r = requests.post(service.endpoint + 'query', verify=False, headers={"Authorization": f"Bearer {api.get_token()}"}, json=query, timeout=None)
if r.status_code == 200:
response = json.loads(r.text)
for record in response['resultData']['data']:
record['FINDING']['source'] = response['origin']
result.append(record['FINDING'])
elif r.status_code == 401:
api.reconnect()
continue
return result
| 35.744186
| 149
| 0.561483
|
import mysql.connector
import json
import os
import requests
def getAllFindings(host, database, user, password, table, where):
db = mysql.connector.connect(host=host, database=database, user=user, password=password)
cursor = db.cursor()
cursor.execute("SELECT distinct findingCode, specimenOrganCode FROM " + table + " " + where)
return cursor.fetchall()
def getDrugs(api, filename):
if filename is None:
drugs = getDrugsMapping(api)
else:
if os.path.isfile(filename):
with open(filename, 'r') as drug_file:
drugs = json.loads(drug_file.read())
else:
drugs = getDrugsMapping(api)
with open(filename, 'w') as drug_file:
drug_file.write(json.dumps(drugs))
return drugs
def getDrugsMapping(api):
result = {}
clinicalCompounds = getClinicalCompounds(api)
preclinicalCompounds = getPreclinicalCompounds(api)
for clinicalCompound in clinicalCompounds:
for preclinicalCompound in preclinicalCompounds:
if (clinicalCompound['inchiKey'] is not None) and (clinicalCompound['inchiKey'] == preclinicalCompound['inchiKey']):
inchiKey = clinicalCompound['inchiKey']
if inchiKey not in result:
result[inchiKey] = {
'inchiKey': inchiKey,
'clinicalName': clinicalCompound['name'],
'preclinicalName': preclinicalCompound['name']
}
result[inchiKey][preclinicalCompound['source']] = preclinicalCompound['findingIds']
result[inchiKey][clinicalCompound['source']] = clinicalCompound['findingIds']
return result
def getClinicalCompounds(api):
ct_compounds = api.ClinicalTrials().getAllCompounds();
for ct_compound in ct_compounds:
ct_compound['source'] = 'ClinicalTrials'
ml_compounds = api.Medline().getAllCompounds();
for ml_compound in ml_compounds:
ml_compound['source'] = 'Medline'
fa_compounds = api.Faers().getAllCompounds();
for fa_compound in fa_compounds:
fa_compound['source'] = 'Faers'
dm_compounds = api.DailyMed().getAllCompounds();
for dm_compound in dm_compounds:
dm_compound['source'] = 'DailyMed'
return ct_compounds + ml_compounds + fa_compounds + dm_compounds
def getPreclinicalCompounds(api):
et_compounds = api.eToxSys().getAllCompounds()
for et_compound in et_compounds:
et_compound['source'] = 'eToxSys'
return et_compounds
def getFindingsByIds(api, service, findingIds):
result = []
record_count = 0
query = {
"filter": {
"criteria": [
[
{
"field": {
"dataClassKey": "FINDING",
"name": "id"
},
"primitiveType": "Integer",
"comparisonOperator": "IN",
"values": None
},
]
]
},
"selectedFields": [
{
"dataClassKey": "FINDING",
"names": [
"id",
"specimenOrgan", "specimenOrganCode", "specimenOrganVocabulary",
"findingIdentifier", "finding", "findingCode", "findingVocabulary", "findingType",
"severity", "observation", "frequency",
"dose", "doseUnit",
"timepoint", "timepointUnit",
"treatmentRelated",
"compoundId",
"studyId",
"createdDate", "modifiedDate", "sex"
]
}
],
"offset": 0,
"limit": 500
}
for offset in range(0, len(findingIds), 500):
query['filter']['criteria'][0][0]['values'] = [{'value': findingId} for findingId in findingIds[offset:offset+500]]
r = requests.post(service.endpoint + 'query', verify=False, headers={"Authorization": f"Bearer {api.get_token()}"}, json=query, timeout=None)
if r.status_code == 200:
response = json.loads(r.text)
for record in response['resultData']['data']:
record['FINDING']['source'] = response['origin']
result.append(record['FINDING'])
elif r.status_code == 401:
api.reconnect()
continue
return result
| true
| true
|
7903683c5afe13c10cb0b8d29c63cf46b5c93d8b
| 433
|
py
|
Python
|
kafka-python/ConsumerAuth.py
|
pengfei99/KafkaPyClient
|
b18b361aedec9b58eef27c1d6f97346a64a1f154
|
[
"Apache-2.0"
] | null | null | null |
kafka-python/ConsumerAuth.py
|
pengfei99/KafkaPyClient
|
b18b361aedec9b58eef27c1d6f97346a64a1f154
|
[
"Apache-2.0"
] | null | null | null |
kafka-python/ConsumerAuth.py
|
pengfei99/KafkaPyClient
|
b18b361aedec9b58eef27c1d6f97346a64a1f154
|
[
"Apache-2.0"
] | null | null | null |
from kafka import KafkaConsumer
KAFKA_SERVER_URL = 'localhost:9092'
LOGIN = "bob"
PWD = "bob-secret"
TOPIC = "test-topic"
GROUP_ID = 'bob-group'
consumer = KafkaConsumer(TOPIC, group_id=GROUP_ID, bootstrap_servers=KAFKA_SERVER_URL,
security_protocol="SASL_PLAINTEXT",
sasl_mechanism='PLAIN', sasl_plain_username=LOGIN, sasl_plain_password=PWD)
for msg in consumer:
print(msg)
| 28.866667
| 100
| 0.69746
|
from kafka import KafkaConsumer
KAFKA_SERVER_URL = 'localhost:9092'
LOGIN = "bob"
PWD = "bob-secret"
TOPIC = "test-topic"
GROUP_ID = 'bob-group'
consumer = KafkaConsumer(TOPIC, group_id=GROUP_ID, bootstrap_servers=KAFKA_SERVER_URL,
security_protocol="SASL_PLAINTEXT",
sasl_mechanism='PLAIN', sasl_plain_username=LOGIN, sasl_plain_password=PWD)
for msg in consumer:
print(msg)
| true
| true
|
790369cc05bead275aea27c7f3664bac057fd5a2
| 1,220
|
py
|
Python
|
conftest.py
|
bennylope/django-simple-auth
|
4b2acbc4bb4d0a958895235ca36b9c371853bc6e
|
[
"BSD-2-Clause"
] | 1
|
2018-05-18T07:42:35.000Z
|
2018-05-18T07:42:35.000Z
|
conftest.py
|
bennylope/django-simple-auth
|
4b2acbc4bb4d0a958895235ca36b9c371853bc6e
|
[
"BSD-2-Clause"
] | 2
|
2017-03-16T18:02:50.000Z
|
2018-01-02T17:43:18.000Z
|
conftest.py
|
bennylope/django-simple-auth
|
4b2acbc4bb4d0a958895235ca36b9c371853bc6e
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Configuration file for py.test
"""
import django
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
USE_I18N=True,
ROOT_URLCONF="tests.urls",
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.sqlite3",
}
},
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"simple_auth",
],
MIDDLEWARE=[
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"simple_auth.middleware.SimpleAuthMiddleware",
],
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
],
SITE_ID=1,
)
django.setup()
| 27.111111
| 77
| 0.536066
|
import django
def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG=True,
USE_TZ=True,
USE_I18N=True,
ROOT_URLCONF="tests.urls",
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "test.sqlite3",
}
},
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"simple_auth",
],
MIDDLEWARE=[
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"simple_auth.middleware.SimpleAuthMiddleware",
],
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
],
SITE_ID=1,
)
django.setup()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.